OLD | NEW |
(Empty) | |
| 1 """scons.Node.FS |
| 2 |
| 3 File system nodes. |
| 4 |
| 5 These Nodes represent the canonical external objects that people think |
| 6 of when they think of building software: files and directories. |
| 7 |
| 8 This holds a "default_fs" variable that should be initialized with an FS |
| 9 that can be used by scripts or modules looking for the canonical default. |
| 10 |
| 11 """ |
| 12 |
| 13 # |
| 14 # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The S
Cons Foundation |
| 15 # |
| 16 # Permission is hereby granted, free of charge, to any person obtaining |
| 17 # a copy of this software and associated documentation files (the |
| 18 # "Software"), to deal in the Software without restriction, including |
| 19 # without limitation the rights to use, copy, modify, merge, publish, |
| 20 # distribute, sublicense, and/or sell copies of the Software, and to |
| 21 # permit persons to whom the Software is furnished to do so, subject to |
| 22 # the following conditions: |
| 23 # |
| 24 # The above copyright notice and this permission notice shall be included |
| 25 # in all copies or substantial portions of the Software. |
| 26 # |
| 27 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY |
| 28 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE |
| 29 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 30 # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE |
| 31 # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION |
| 32 # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION |
| 33 # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 34 |
| 35 __revision__ = "src/engine/SCons/Node/FS.py 5134 2010/08/16 23:02:40 bdeegan" |
| 36 |
| 37 import fnmatch |
| 38 import os |
| 39 import re |
| 40 import shutil |
| 41 import stat |
| 42 import sys |
| 43 import time |
| 44 import codecs |
| 45 |
| 46 import SCons.Action |
| 47 from SCons.Debug import logInstanceCreation |
| 48 import SCons.Errors |
| 49 import SCons.Memoize |
| 50 import SCons.Node |
| 51 import SCons.Node.Alias |
| 52 import SCons.Subst |
| 53 import SCons.Util |
| 54 import SCons.Warnings |
| 55 |
| 56 from SCons.Debug import Trace |
| 57 |
| 58 do_store_info = True |
| 59 |
| 60 |
| 61 class EntryProxyAttributeError(AttributeError): |
| 62 """ |
| 63 An AttributeError subclass for recording and displaying the name |
| 64 of the underlying Entry involved in an AttributeError exception. |
| 65 """ |
| 66 def __init__(self, entry_proxy, attribute): |
| 67 AttributeError.__init__(self) |
| 68 self.entry_proxy = entry_proxy |
| 69 self.attribute = attribute |
| 70 def __str__(self): |
| 71 entry = self.entry_proxy.get() |
| 72 fmt = "%s instance %s has no attribute %s" |
| 73 return fmt % (entry.__class__.__name__, |
| 74 repr(entry.name), |
| 75 repr(self.attribute)) |
| 76 |
| 77 # The max_drift value: by default, use a cached signature value for |
| 78 # any file that's been untouched for more than two days. |
| 79 default_max_drift = 2*24*60*60 |
| 80 |
| 81 # |
| 82 # We stringify these file system Nodes a lot. Turning a file system Node |
| 83 # into a string is non-trivial, because the final string representation |
| 84 # can depend on a lot of factors: whether it's a derived target or not, |
| 85 # whether it's linked to a repository or source directory, and whether |
| 86 # there's duplication going on. The normal technique for optimizing |
| 87 # calculations like this is to memoize (cache) the string value, so you |
| 88 # only have to do the calculation once. |
| 89 # |
| 90 # A number of the above factors, however, can be set after we've already |
| 91 # been asked to return a string for a Node, because a Repository() or |
| 92 # VariantDir() call or the like may not occur until later in SConscript |
| 93 # files. So this variable controls whether we bother trying to save |
| 94 # string values for Nodes. The wrapper interface can set this whenever |
| 95 # they're done mucking with Repository and VariantDir and the other stuff, |
| 96 # to let this module know it can start returning saved string values |
| 97 # for Nodes. |
| 98 # |
| 99 Save_Strings = None |
| 100 |
| 101 def save_strings(val): |
| 102 global Save_Strings |
| 103 Save_Strings = val |
| 104 |
| 105 # |
| 106 # Avoid unnecessary function calls by recording a Boolean value that |
| 107 # tells us whether or not os.path.splitdrive() actually does anything |
| 108 # on this system, and therefore whether we need to bother calling it |
| 109 # when looking up path names in various methods below. |
| 110 # |
| 111 |
| 112 do_splitdrive = None |
| 113 |
| 114 def initialize_do_splitdrive(): |
| 115 global do_splitdrive |
| 116 drive, path = os.path.splitdrive('X:/foo') |
| 117 do_splitdrive = not not drive |
| 118 |
| 119 initialize_do_splitdrive() |
| 120 |
| 121 # |
| 122 |
| 123 needs_normpath_check = None |
| 124 |
| 125 def initialize_normpath_check(): |
| 126 """ |
| 127 Initialize the normpath_check regular expression. |
| 128 |
| 129 This function is used by the unit tests to re-initialize the pattern |
| 130 when testing for behavior with different values of os.sep. |
| 131 """ |
| 132 global needs_normpath_check |
| 133 if os.sep == '/': |
| 134 pattern = r'.*/|\.$|\.\.$' |
| 135 else: |
| 136 pattern = r'.*[/%s]|\.$|\.\.$' % re.escape(os.sep) |
| 137 needs_normpath_check = re.compile(pattern) |
| 138 |
| 139 initialize_normpath_check() |
| 140 |
| 141 # |
| 142 # SCons.Action objects for interacting with the outside world. |
| 143 # |
| 144 # The Node.FS methods in this module should use these actions to |
| 145 # create and/or remove files and directories; they should *not* use |
| 146 # os.{link,symlink,unlink,mkdir}(), etc., directly. |
| 147 # |
| 148 # Using these SCons.Action objects ensures that descriptions of these |
| 149 # external activities are properly displayed, that the displays are |
| 150 # suppressed when the -s (silent) option is used, and (most importantly) |
| 151 # the actions are disabled when the the -n option is used, in which case |
| 152 # there should be *no* changes to the external file system(s)... |
| 153 # |
| 154 |
| 155 if hasattr(os, 'link'): |
| 156 def _hardlink_func(fs, src, dst): |
| 157 # If the source is a symlink, we can't just hard-link to it |
| 158 # because a relative symlink may point somewhere completely |
| 159 # different. We must disambiguate the symlink and then |
| 160 # hard-link the final destination file. |
| 161 while fs.islink(src): |
| 162 link = fs.readlink(src) |
| 163 if not os.path.isabs(link): |
| 164 src = link |
| 165 else: |
| 166 src = os.path.join(os.path.dirname(src), link) |
| 167 fs.link(src, dst) |
| 168 else: |
| 169 _hardlink_func = None |
| 170 |
| 171 if hasattr(os, 'symlink'): |
| 172 def _softlink_func(fs, src, dst): |
| 173 fs.symlink(src, dst) |
| 174 else: |
| 175 _softlink_func = None |
| 176 |
| 177 def _copy_func(fs, src, dest): |
| 178 shutil.copy2(src, dest) |
| 179 st = fs.stat(src) |
| 180 fs.chmod(dest, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE) |
| 181 |
| 182 |
| 183 Valid_Duplicates = ['hard-soft-copy', 'soft-hard-copy', |
| 184 'hard-copy', 'soft-copy', 'copy'] |
| 185 |
| 186 Link_Funcs = [] # contains the callables of the specified duplication style |
| 187 |
| 188 def set_duplicate(duplicate): |
| 189 # Fill in the Link_Funcs list according to the argument |
| 190 # (discarding those not available on the platform). |
| 191 |
| 192 # Set up the dictionary that maps the argument names to the |
| 193 # underlying implementations. We do this inside this function, |
| 194 # not in the top-level module code, so that we can remap os.link |
| 195 # and os.symlink for testing purposes. |
| 196 link_dict = { |
| 197 'hard' : _hardlink_func, |
| 198 'soft' : _softlink_func, |
| 199 'copy' : _copy_func |
| 200 } |
| 201 |
| 202 if not duplicate in Valid_Duplicates: |
| 203 raise SCons.Errors.InternalError("The argument of set_duplicate " |
| 204 "should be in Valid_Duplicates") |
| 205 global Link_Funcs |
| 206 Link_Funcs = [] |
| 207 for func in duplicate.split('-'): |
| 208 if link_dict[func]: |
| 209 Link_Funcs.append(link_dict[func]) |
| 210 |
| 211 def LinkFunc(target, source, env): |
| 212 # Relative paths cause problems with symbolic links, so |
| 213 # we use absolute paths, which may be a problem for people |
| 214 # who want to move their soft-linked src-trees around. Those |
| 215 # people should use the 'hard-copy' mode, softlinks cannot be |
| 216 # used for that; at least I have no idea how ... |
| 217 src = source[0].abspath |
| 218 dest = target[0].abspath |
| 219 dir, file = os.path.split(dest) |
| 220 if dir and not target[0].fs.isdir(dir): |
| 221 os.makedirs(dir) |
| 222 if not Link_Funcs: |
| 223 # Set a default order of link functions. |
| 224 set_duplicate('hard-soft-copy') |
| 225 fs = source[0].fs |
| 226 # Now link the files with the previously specified order. |
| 227 for func in Link_Funcs: |
| 228 try: |
| 229 func(fs, src, dest) |
| 230 break |
| 231 except (IOError, OSError): |
| 232 # An OSError indicates something happened like a permissions |
| 233 # problem or an attempt to symlink across file-system |
| 234 # boundaries. An IOError indicates something like the file |
| 235 # not existing. In either case, keeping trying additional |
| 236 # functions in the list and only raise an error if the last |
| 237 # one failed. |
| 238 if func == Link_Funcs[-1]: |
| 239 # exception of the last link method (copy) are fatal |
| 240 raise |
| 241 return 0 |
| 242 |
| 243 Link = SCons.Action.Action(LinkFunc, None) |
| 244 def LocalString(target, source, env): |
| 245 return 'Local copy of %s from %s' % (target[0], source[0]) |
| 246 |
| 247 LocalCopy = SCons.Action.Action(LinkFunc, LocalString) |
| 248 |
| 249 def UnlinkFunc(target, source, env): |
| 250 t = target[0] |
| 251 t.fs.unlink(t.abspath) |
| 252 return 0 |
| 253 |
| 254 Unlink = SCons.Action.Action(UnlinkFunc, None) |
| 255 |
| 256 def MkdirFunc(target, source, env): |
| 257 t = target[0] |
| 258 if not t.exists(): |
| 259 t.fs.mkdir(t.abspath) |
| 260 return 0 |
| 261 |
| 262 Mkdir = SCons.Action.Action(MkdirFunc, None, presub=None) |
| 263 |
| 264 MkdirBuilder = None |
| 265 |
| 266 def get_MkdirBuilder(): |
| 267 global MkdirBuilder |
| 268 if MkdirBuilder is None: |
| 269 import SCons.Builder |
| 270 import SCons.Defaults |
| 271 # "env" will get filled in by Executor.get_build_env() |
| 272 # calling SCons.Defaults.DefaultEnvironment() when necessary. |
| 273 MkdirBuilder = SCons.Builder.Builder(action = Mkdir, |
| 274 env = None, |
| 275 explain = None, |
| 276 is_explicit = None, |
| 277 target_scanner = SCons.Defaults.Dir
EntryScanner, |
| 278 name = "MkdirBuilder") |
| 279 return MkdirBuilder |
| 280 |
| 281 class _Null(object): |
| 282 pass |
| 283 |
| 284 _null = _Null() |
| 285 |
| 286 DefaultSCCSBuilder = None |
| 287 DefaultRCSBuilder = None |
| 288 |
| 289 def get_DefaultSCCSBuilder(): |
| 290 global DefaultSCCSBuilder |
| 291 if DefaultSCCSBuilder is None: |
| 292 import SCons.Builder |
| 293 # "env" will get filled in by Executor.get_build_env() |
| 294 # calling SCons.Defaults.DefaultEnvironment() when necessary. |
| 295 act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR') |
| 296 DefaultSCCSBuilder = SCons.Builder.Builder(action = act, |
| 297 env = None, |
| 298 name = "DefaultSCCSBuilder") |
| 299 return DefaultSCCSBuilder |
| 300 |
| 301 def get_DefaultRCSBuilder(): |
| 302 global DefaultRCSBuilder |
| 303 if DefaultRCSBuilder is None: |
| 304 import SCons.Builder |
| 305 # "env" will get filled in by Executor.get_build_env() |
| 306 # calling SCons.Defaults.DefaultEnvironment() when necessary. |
| 307 act = SCons.Action.Action('$RCS_COCOM', '$RCS_COCOMSTR') |
| 308 DefaultRCSBuilder = SCons.Builder.Builder(action = act, |
| 309 env = None, |
| 310 name = "DefaultRCSBuilder") |
| 311 return DefaultRCSBuilder |
| 312 |
| 313 # Cygwin's os.path.normcase pretends it's on a case-sensitive filesystem. |
| 314 _is_cygwin = sys.platform == "cygwin" |
| 315 if os.path.normcase("TeSt") == os.path.normpath("TeSt") and not _is_cygwin: |
| 316 def _my_normcase(x): |
| 317 return x |
| 318 else: |
| 319 def _my_normcase(x): |
| 320 return x.upper() |
| 321 |
| 322 |
| 323 |
| 324 class DiskChecker(object): |
| 325 def __init__(self, type, do, ignore): |
| 326 self.type = type |
| 327 self.do = do |
| 328 self.ignore = ignore |
| 329 self.func = do |
| 330 def __call__(self, *args, **kw): |
| 331 return self.func(*args, **kw) |
| 332 def set(self, list): |
| 333 if self.type in list: |
| 334 self.func = self.do |
| 335 else: |
| 336 self.func = self.ignore |
| 337 |
| 338 def do_diskcheck_match(node, predicate, errorfmt): |
| 339 result = predicate() |
| 340 try: |
| 341 # If calling the predicate() cached a None value from stat(), |
| 342 # remove it so it doesn't interfere with later attempts to |
| 343 # build this Node as we walk the DAG. (This isn't a great way |
| 344 # to do this, we're reaching into an interface that doesn't |
| 345 # really belong to us, but it's all about performance, so |
| 346 # for now we'll just document the dependency...) |
| 347 if node._memo['stat'] is None: |
| 348 del node._memo['stat'] |
| 349 except (AttributeError, KeyError): |
| 350 pass |
| 351 if result: |
| 352 raise TypeError(errorfmt % node.abspath) |
| 353 |
| 354 def ignore_diskcheck_match(node, predicate, errorfmt): |
| 355 pass |
| 356 |
| 357 def do_diskcheck_rcs(node, name): |
| 358 try: |
| 359 rcs_dir = node.rcs_dir |
| 360 except AttributeError: |
| 361 if node.entry_exists_on_disk('RCS'): |
| 362 rcs_dir = node.Dir('RCS') |
| 363 else: |
| 364 rcs_dir = None |
| 365 node.rcs_dir = rcs_dir |
| 366 if rcs_dir: |
| 367 return rcs_dir.entry_exists_on_disk(name+',v') |
| 368 return None |
| 369 |
| 370 def ignore_diskcheck_rcs(node, name): |
| 371 return None |
| 372 |
| 373 def do_diskcheck_sccs(node, name): |
| 374 try: |
| 375 sccs_dir = node.sccs_dir |
| 376 except AttributeError: |
| 377 if node.entry_exists_on_disk('SCCS'): |
| 378 sccs_dir = node.Dir('SCCS') |
| 379 else: |
| 380 sccs_dir = None |
| 381 node.sccs_dir = sccs_dir |
| 382 if sccs_dir: |
| 383 return sccs_dir.entry_exists_on_disk('s.'+name) |
| 384 return None |
| 385 |
| 386 def ignore_diskcheck_sccs(node, name): |
| 387 return None |
| 388 |
| 389 diskcheck_match = DiskChecker('match', do_diskcheck_match, ignore_diskcheck_matc
h) |
| 390 diskcheck_rcs = DiskChecker('rcs', do_diskcheck_rcs, ignore_diskcheck_rcs) |
| 391 diskcheck_sccs = DiskChecker('sccs', do_diskcheck_sccs, ignore_diskcheck_sccs) |
| 392 |
| 393 diskcheckers = [ |
| 394 diskcheck_match, |
| 395 diskcheck_rcs, |
| 396 diskcheck_sccs, |
| 397 ] |
| 398 |
| 399 def set_diskcheck(list): |
| 400 for dc in diskcheckers: |
| 401 dc.set(list) |
| 402 |
| 403 def diskcheck_types(): |
| 404 return [dc.type for dc in diskcheckers] |
| 405 |
| 406 |
| 407 |
| 408 class EntryProxy(SCons.Util.Proxy): |
| 409 |
| 410 __str__ = SCons.Util.Delegate('__str__') |
| 411 |
| 412 def __get_abspath(self): |
| 413 entry = self.get() |
| 414 return SCons.Subst.SpecialAttrWrapper(entry.get_abspath(), |
| 415 entry.name + "_abspath") |
| 416 |
| 417 def __get_filebase(self): |
| 418 name = self.get().name |
| 419 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[0], |
| 420 name + "_filebase") |
| 421 |
| 422 def __get_suffix(self): |
| 423 name = self.get().name |
| 424 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(name)[1], |
| 425 name + "_suffix") |
| 426 |
| 427 def __get_file(self): |
| 428 name = self.get().name |
| 429 return SCons.Subst.SpecialAttrWrapper(name, name + "_file") |
| 430 |
| 431 def __get_base_path(self): |
| 432 """Return the file's directory and file name, with the |
| 433 suffix stripped.""" |
| 434 entry = self.get() |
| 435 return SCons.Subst.SpecialAttrWrapper(SCons.Util.splitext(entry.get_path
())[0], |
| 436 entry.name + "_base") |
| 437 |
| 438 def __get_posix_path(self): |
| 439 """Return the path with / as the path separator, |
| 440 regardless of platform.""" |
| 441 if os.sep == '/': |
| 442 return self |
| 443 else: |
| 444 entry = self.get() |
| 445 r = entry.get_path().replace(os.sep, '/') |
| 446 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_posix") |
| 447 |
| 448 def __get_windows_path(self): |
| 449 """Return the path with \ as the path separator, |
| 450 regardless of platform.""" |
| 451 if os.sep == '\\': |
| 452 return self |
| 453 else: |
| 454 entry = self.get() |
| 455 r = entry.get_path().replace(os.sep, '\\') |
| 456 return SCons.Subst.SpecialAttrWrapper(r, entry.name + "_windows") |
| 457 |
| 458 def __get_srcnode(self): |
| 459 return EntryProxy(self.get().srcnode()) |
| 460 |
| 461 def __get_srcdir(self): |
| 462 """Returns the directory containing the source node linked to this |
| 463 node via VariantDir(), or the directory of this node if not linked.""" |
| 464 return EntryProxy(self.get().srcnode().dir) |
| 465 |
| 466 def __get_rsrcnode(self): |
| 467 return EntryProxy(self.get().srcnode().rfile()) |
| 468 |
| 469 def __get_rsrcdir(self): |
| 470 """Returns the directory containing the source node linked to this |
| 471 node via VariantDir(), or the directory of this node if not linked.""" |
| 472 return EntryProxy(self.get().srcnode().rfile().dir) |
| 473 |
| 474 def __get_dir(self): |
| 475 return EntryProxy(self.get().dir) |
| 476 |
| 477 dictSpecialAttrs = { "base" : __get_base_path, |
| 478 "posix" : __get_posix_path, |
| 479 "windows" : __get_windows_path, |
| 480 "win32" : __get_windows_path, |
| 481 "srcpath" : __get_srcnode, |
| 482 "srcdir" : __get_srcdir, |
| 483 "dir" : __get_dir, |
| 484 "abspath" : __get_abspath, |
| 485 "filebase" : __get_filebase, |
| 486 "suffix" : __get_suffix, |
| 487 "file" : __get_file, |
| 488 "rsrcpath" : __get_rsrcnode, |
| 489 "rsrcdir" : __get_rsrcdir, |
| 490 } |
| 491 |
| 492 def __getattr__(self, name): |
| 493 # This is how we implement the "special" attributes |
| 494 # such as base, posix, srcdir, etc. |
| 495 try: |
| 496 attr_function = self.dictSpecialAttrs[name] |
| 497 except KeyError: |
| 498 try: |
| 499 attr = SCons.Util.Proxy.__getattr__(self, name) |
| 500 except AttributeError, e: |
| 501 # Raise our own AttributeError subclass with an |
| 502 # overridden __str__() method that identifies the |
| 503 # name of the entry that caused the exception. |
| 504 raise EntryProxyAttributeError(self, name) |
| 505 return attr |
| 506 else: |
| 507 return attr_function(self) |
| 508 |
| 509 class Base(SCons.Node.Node): |
| 510 """A generic class for file system entries. This class is for |
| 511 when we don't know yet whether the entry being looked up is a file |
| 512 or a directory. Instances of this class can morph into either |
| 513 Dir or File objects by a later, more precise lookup. |
| 514 |
| 515 Note: this class does not define __cmp__ and __hash__ for |
| 516 efficiency reasons. SCons does a lot of comparing of |
| 517 Node.FS.{Base,Entry,File,Dir} objects, so those operations must be |
| 518 as fast as possible, which means we want to use Python's built-in |
| 519 object identity comparisons. |
| 520 """ |
| 521 |
| 522 memoizer_counters = [] |
| 523 |
| 524 def __init__(self, name, directory, fs): |
| 525 """Initialize a generic Node.FS.Base object. |
| 526 |
| 527 Call the superclass initialization, take care of setting up |
| 528 our relative and absolute paths, identify our parent |
| 529 directory, and indicate that this node should use |
| 530 signatures.""" |
| 531 if __debug__: logInstanceCreation(self, 'Node.FS.Base') |
| 532 SCons.Node.Node.__init__(self) |
| 533 |
| 534 # Filenames and paths are probably reused and are intern'ed to |
| 535 # save some memory. |
| 536 self.name = SCons.Util.silent_intern(name) |
| 537 self.suffix = SCons.Util.silent_intern(SCons.Util.splitext(name)[1]) |
| 538 self.fs = fs |
| 539 |
| 540 assert directory, "A directory must be provided" |
| 541 |
| 542 self.abspath = SCons.Util.silent_intern(directory.entry_abspath(name)) |
| 543 self.labspath = SCons.Util.silent_intern(directory.entry_labspath(name)) |
| 544 if directory.path == '.': |
| 545 self.path = SCons.Util.silent_intern(name) |
| 546 else: |
| 547 self.path = SCons.Util.silent_intern(directory.entry_path(name)) |
| 548 if directory.tpath == '.': |
| 549 self.tpath = SCons.Util.silent_intern(name) |
| 550 else: |
| 551 self.tpath = SCons.Util.silent_intern(directory.entry_tpath(name)) |
| 552 self.path_elements = directory.path_elements + [self] |
| 553 |
| 554 self.dir = directory |
| 555 self.cwd = None # will hold the SConscript directory for target nodes |
| 556 self.duplicate = directory.duplicate |
| 557 |
| 558 def str_for_display(self): |
| 559 return '"' + self.__str__() + '"' |
| 560 |
| 561 def must_be_same(self, klass): |
| 562 """ |
| 563 This node, which already existed, is being looked up as the |
| 564 specified klass. Raise an exception if it isn't. |
| 565 """ |
| 566 if isinstance(self, klass) or klass is Entry: |
| 567 return |
| 568 raise TypeError("Tried to lookup %s '%s' as a %s." %\ |
| 569 (self.__class__.__name__, self.path, klass.__name__)) |
| 570 |
| 571 def get_dir(self): |
| 572 return self.dir |
| 573 |
| 574 def get_suffix(self): |
| 575 return self.suffix |
| 576 |
| 577 def rfile(self): |
| 578 return self |
| 579 |
| 580 def __str__(self): |
| 581 """A Node.FS.Base object's string representation is its path |
| 582 name.""" |
| 583 global Save_Strings |
| 584 if Save_Strings: |
| 585 return self._save_str() |
| 586 return self._get_str() |
| 587 |
| 588 memoizer_counters.append(SCons.Memoize.CountValue('_save_str')) |
| 589 |
| 590 def _save_str(self): |
| 591 try: |
| 592 return self._memo['_save_str'] |
| 593 except KeyError: |
| 594 pass |
| 595 result = sys.intern(self._get_str()) |
| 596 self._memo['_save_str'] = result |
| 597 return result |
| 598 |
| 599 def _get_str(self): |
| 600 global Save_Strings |
| 601 if self.duplicate or self.is_derived(): |
| 602 return self.get_path() |
| 603 srcnode = self.srcnode() |
| 604 if srcnode.stat() is None and self.stat() is not None: |
| 605 result = self.get_path() |
| 606 else: |
| 607 result = srcnode.get_path() |
| 608 if not Save_Strings: |
| 609 # We're not at the point where we're saving the string string |
| 610 # representations of FS Nodes (because we haven't finished |
| 611 # reading the SConscript files and need to have str() return |
| 612 # things relative to them). That also means we can't yet |
| 613 # cache values returned (or not returned) by stat(), since |
| 614 # Python code in the SConscript files might still create |
| 615 # or otherwise affect the on-disk file. So get rid of the |
| 616 # values that the underlying stat() method saved. |
| 617 try: del self._memo['stat'] |
| 618 except KeyError: pass |
| 619 if self is not srcnode: |
| 620 try: del srcnode._memo['stat'] |
| 621 except KeyError: pass |
| 622 return result |
| 623 |
| 624 rstr = __str__ |
| 625 |
| 626 memoizer_counters.append(SCons.Memoize.CountValue('stat')) |
| 627 |
| 628 def stat(self): |
| 629 try: return self._memo['stat'] |
| 630 except KeyError: pass |
| 631 try: result = self.fs.stat(self.abspath) |
| 632 except os.error: result = None |
| 633 self._memo['stat'] = result |
| 634 return result |
| 635 |
| 636 def exists(self): |
| 637 return self.stat() is not None |
| 638 |
| 639 def rexists(self): |
| 640 return self.rfile().exists() |
| 641 |
| 642 def getmtime(self): |
| 643 st = self.stat() |
| 644 if st: return st[stat.ST_MTIME] |
| 645 else: return None |
| 646 |
| 647 def getsize(self): |
| 648 st = self.stat() |
| 649 if st: return st[stat.ST_SIZE] |
| 650 else: return None |
| 651 |
| 652 def isdir(self): |
| 653 st = self.stat() |
| 654 return st is not None and stat.S_ISDIR(st[stat.ST_MODE]) |
| 655 |
| 656 def isfile(self): |
| 657 st = self.stat() |
| 658 return st is not None and stat.S_ISREG(st[stat.ST_MODE]) |
| 659 |
| 660 if hasattr(os, 'symlink'): |
| 661 def islink(self): |
| 662 try: st = self.fs.lstat(self.abspath) |
| 663 except os.error: return 0 |
| 664 return stat.S_ISLNK(st[stat.ST_MODE]) |
| 665 else: |
| 666 def islink(self): |
| 667 return 0 # no symlinks |
| 668 |
| 669 def is_under(self, dir): |
| 670 if self is dir: |
| 671 return 1 |
| 672 else: |
| 673 return self.dir.is_under(dir) |
| 674 |
| 675 def set_local(self): |
| 676 self._local = 1 |
| 677 |
| 678 def srcnode(self): |
| 679 """If this node is in a build path, return the node |
| 680 corresponding to its source file. Otherwise, return |
| 681 ourself. |
| 682 """ |
| 683 srcdir_list = self.dir.srcdir_list() |
| 684 if srcdir_list: |
| 685 srcnode = srcdir_list[0].Entry(self.name) |
| 686 srcnode.must_be_same(self.__class__) |
| 687 return srcnode |
| 688 return self |
| 689 |
| 690 def get_path(self, dir=None): |
| 691 """Return path relative to the current working directory of the |
| 692 Node.FS.Base object that owns us.""" |
| 693 if not dir: |
| 694 dir = self.fs.getcwd() |
| 695 if self == dir: |
| 696 return '.' |
| 697 path_elems = self.path_elements |
| 698 try: i = path_elems.index(dir) |
| 699 except ValueError: pass |
| 700 else: path_elems = path_elems[i+1:] |
| 701 path_elems = [n.name for n in path_elems] |
| 702 return os.sep.join(path_elems) |
| 703 |
| 704 def set_src_builder(self, builder): |
| 705 """Set the source code builder for this node.""" |
| 706 self.sbuilder = builder |
| 707 if not self.has_builder(): |
| 708 self.builder_set(builder) |
| 709 |
| 710 def src_builder(self): |
| 711 """Fetch the source code builder for this node. |
| 712 |
| 713 If there isn't one, we cache the source code builder specified |
| 714 for the directory (which in turn will cache the value from its |
| 715 parent directory, and so on up to the file system root). |
| 716 """ |
| 717 try: |
| 718 scb = self.sbuilder |
| 719 except AttributeError: |
| 720 scb = self.dir.src_builder() |
| 721 self.sbuilder = scb |
| 722 return scb |
| 723 |
| 724 def get_abspath(self): |
| 725 """Get the absolute path of the file.""" |
| 726 return self.abspath |
| 727 |
| 728 def for_signature(self): |
| 729 # Return just our name. Even an absolute path would not work, |
| 730 # because that can change thanks to symlinks or remapped network |
| 731 # paths. |
| 732 return self.name |
| 733 |
| 734 def get_subst_proxy(self): |
| 735 try: |
| 736 return self._proxy |
| 737 except AttributeError: |
| 738 ret = EntryProxy(self) |
| 739 self._proxy = ret |
| 740 return ret |
| 741 |
| 742 def target_from_source(self, prefix, suffix, splitext=SCons.Util.splitext): |
| 743 """ |
| 744 |
| 745 Generates a target entry that corresponds to this entry (usually |
| 746 a source file) with the specified prefix and suffix. |
| 747 |
| 748 Note that this method can be overridden dynamically for generated |
| 749 files that need different behavior. See Tool/swig.py for |
| 750 an example. |
| 751 """ |
| 752 return self.dir.Entry(prefix + splitext(self.name)[0] + suffix) |
| 753 |
| 754 def _Rfindalldirs_key(self, pathlist): |
| 755 return pathlist |
| 756 |
| 757 memoizer_counters.append(SCons.Memoize.CountDict('Rfindalldirs', _Rfindalldi
rs_key)) |
| 758 |
| 759 def Rfindalldirs(self, pathlist): |
| 760 """ |
| 761 Return all of the directories for a given path list, including |
| 762 corresponding "backing" directories in any repositories. |
| 763 |
| 764 The Node lookups are relative to this Node (typically a |
| 765 directory), so memoizing result saves cycles from looking |
| 766 up the same path for each target in a given directory. |
| 767 """ |
| 768 try: |
| 769 memo_dict = self._memo['Rfindalldirs'] |
| 770 except KeyError: |
| 771 memo_dict = {} |
| 772 self._memo['Rfindalldirs'] = memo_dict |
| 773 else: |
| 774 try: |
| 775 return memo_dict[pathlist] |
| 776 except KeyError: |
| 777 pass |
| 778 |
| 779 create_dir_relative_to_self = self.Dir |
| 780 result = [] |
| 781 for path in pathlist: |
| 782 if isinstance(path, SCons.Node.Node): |
| 783 result.append(path) |
| 784 else: |
| 785 dir = create_dir_relative_to_self(path) |
| 786 result.extend(dir.get_all_rdirs()) |
| 787 |
| 788 memo_dict[pathlist] = result |
| 789 |
| 790 return result |
| 791 |
| 792 def RDirs(self, pathlist): |
| 793 """Search for a list of directories in the Repository list.""" |
| 794 cwd = self.cwd or self.fs._cwd |
| 795 return cwd.Rfindalldirs(pathlist) |
| 796 |
| 797 memoizer_counters.append(SCons.Memoize.CountValue('rentry')) |
| 798 |
| 799 def rentry(self): |
| 800 try: |
| 801 return self._memo['rentry'] |
| 802 except KeyError: |
| 803 pass |
| 804 result = self |
| 805 if not self.exists(): |
| 806 norm_name = _my_normcase(self.name) |
| 807 for dir in self.dir.get_all_rdirs(): |
| 808 try: |
| 809 node = dir.entries[norm_name] |
| 810 except KeyError: |
| 811 if dir.entry_exists_on_disk(self.name): |
| 812 result = dir.Entry(self.name) |
| 813 break |
| 814 self._memo['rentry'] = result |
| 815 return result |
| 816 |
| 817 def _glob1(self, pattern, ondisk=True, source=False, strings=False): |
| 818 return [] |
| 819 |
| 820 class Entry(Base): |
| 821 """This is the class for generic Node.FS entries--that is, things |
| 822 that could be a File or a Dir, but we're just not sure yet. |
| 823 Consequently, the methods in this class really exist just to |
| 824 transform their associated object into the right class when the |
| 825 time comes, and then call the same-named method in the transformed |
| 826 class.""" |
| 827 |
| 828 def diskcheck_match(self): |
| 829 pass |
| 830 |
| 831 def disambiguate(self, must_exist=None): |
| 832 """ |
| 833 """ |
| 834 if self.isdir(): |
| 835 self.__class__ = Dir |
| 836 self._morph() |
| 837 elif self.isfile(): |
| 838 self.__class__ = File |
| 839 self._morph() |
| 840 self.clear() |
| 841 else: |
| 842 # There was nothing on-disk at this location, so look in |
| 843 # the src directory. |
| 844 # |
| 845 # We can't just use self.srcnode() straight away because |
| 846 # that would create an actual Node for this file in the src |
| 847 # directory, and there might not be one. Instead, use the |
| 848 # dir_on_disk() method to see if there's something on-disk |
| 849 # with that name, in which case we can go ahead and call |
| 850 # self.srcnode() to create the right type of entry. |
| 851 srcdir = self.dir.srcnode() |
| 852 if srcdir != self.dir and \ |
| 853 srcdir.entry_exists_on_disk(self.name) and \ |
| 854 self.srcnode().isdir(): |
| 855 self.__class__ = Dir |
| 856 self._morph() |
| 857 elif must_exist: |
| 858 msg = "No such file or directory: '%s'" % self.abspath |
| 859 raise SCons.Errors.UserError(msg) |
| 860 else: |
| 861 self.__class__ = File |
| 862 self._morph() |
| 863 self.clear() |
| 864 return self |
| 865 |
| 866 def rfile(self): |
| 867 """We're a generic Entry, but the caller is actually looking for |
| 868 a File at this point, so morph into one.""" |
| 869 self.__class__ = File |
| 870 self._morph() |
| 871 self.clear() |
| 872 return File.rfile(self) |
| 873 |
| 874 def scanner_key(self): |
| 875 return self.get_suffix() |
| 876 |
| 877 def get_contents(self): |
| 878 """Fetch the contents of the entry. Returns the exact binary |
| 879 contents of the file.""" |
| 880 try: |
| 881 self = self.disambiguate(must_exist=1) |
| 882 except SCons.Errors.UserError: |
| 883 # There was nothing on disk with which to disambiguate |
| 884 # this entry. Leave it as an Entry, but return a null |
| 885 # string so calls to get_contents() in emitters and the |
| 886 # like (e.g. in qt.py) don't have to disambiguate by hand |
| 887 # or catch the exception. |
| 888 return '' |
| 889 else: |
| 890 return self.get_contents() |
| 891 |
| 892 def get_text_contents(self): |
| 893 """Fetch the decoded text contents of a Unicode encoded Entry. |
| 894 |
| 895 Since this should return the text contents from the file |
| 896 system, we check to see into what sort of subclass we should |
| 897 morph this Entry.""" |
| 898 try: |
| 899 self = self.disambiguate(must_exist=1) |
| 900 except SCons.Errors.UserError: |
| 901 # There was nothing on disk with which to disambiguate |
| 902 # this entry. Leave it as an Entry, but return a null |
| 903 # string so calls to get_text_contents() in emitters and |
| 904 # the like (e.g. in qt.py) don't have to disambiguate by |
| 905 # hand or catch the exception. |
| 906 return '' |
| 907 else: |
| 908 return self.get_text_contents() |
| 909 |
| 910 def must_be_same(self, klass): |
| 911 """Called to make sure a Node is a Dir. Since we're an |
| 912 Entry, we can morph into one.""" |
| 913 if self.__class__ is not klass: |
| 914 self.__class__ = klass |
| 915 self._morph() |
| 916 self.clear() |
| 917 |
| 918 # The following methods can get called before the Taskmaster has |
| 919 # had a chance to call disambiguate() directly to see if this Entry |
| 920 # should really be a Dir or a File. We therefore use these to call |
| 921 # disambiguate() transparently (from our caller's point of view). |
| 922 # |
| 923 # Right now, this minimal set of methods has been derived by just |
| 924 # looking at some of the methods that will obviously be called early |
| 925 # in any of the various Taskmasters' calling sequences, and then |
| 926 # empirically figuring out which additional methods are necessary |
| 927 # to make various tests pass. |
| 928 |
| 929 def exists(self): |
| 930 """Return if the Entry exists. Check the file system to see |
| 931 what we should turn into first. Assume a file if there's no |
| 932 directory.""" |
| 933 return self.disambiguate().exists() |
| 934 |
| 935 def rel_path(self, other): |
| 936 d = self.disambiguate() |
| 937 if d.__class__ is Entry: |
| 938 raise Exception("rel_path() could not disambiguate File/Dir") |
| 939 return d.rel_path(other) |
| 940 |
| 941 def new_ninfo(self): |
| 942 return self.disambiguate().new_ninfo() |
| 943 |
| 944 def changed_since_last_build(self, target, prev_ni): |
| 945 return self.disambiguate().changed_since_last_build(target, prev_ni) |
| 946 |
| 947 def _glob1(self, pattern, ondisk=True, source=False, strings=False): |
| 948 return self.disambiguate()._glob1(pattern, ondisk, source, strings) |
| 949 |
| 950 def get_subst_proxy(self): |
| 951 return self.disambiguate().get_subst_proxy() |
| 952 |
| 953 # This is for later so we can differentiate between Entry the class and Entry |
| 954 # the method of the FS class. |
| 955 _classEntry = Entry |
| 956 |
| 957 |
| 958 class LocalFS(object): |
| 959 |
| 960 if SCons.Memoize.use_memoizer: |
| 961 __metaclass__ = SCons.Memoize.Memoized_Metaclass |
| 962 |
| 963 # This class implements an abstraction layer for operations involving |
| 964 # a local file system. Essentially, this wraps any function in |
| 965 # the os, os.path or shutil modules that we use to actually go do |
| 966 # anything with or to the local file system. |
| 967 # |
| 968 # Note that there's a very good chance we'll refactor this part of |
| 969 # the architecture in some way as we really implement the interface(s) |
| 970 # for remote file system Nodes. For example, the right architecture |
| 971 # might be to have this be a subclass instead of a base class. |
| 972 # Nevertheless, we're using this as a first step in that direction. |
| 973 # |
| 974 # We're not using chdir() yet because the calling subclass method |
| 975 # needs to use os.chdir() directly to avoid recursion. Will we |
| 976 # really need this one? |
| 977 #def chdir(self, path): |
| 978 # return os.chdir(path) |
| 979 def chmod(self, path, mode): |
| 980 return os.chmod(path, mode) |
| 981 def copy(self, src, dst): |
| 982 return shutil.copy(src, dst) |
| 983 def copy2(self, src, dst): |
| 984 return shutil.copy2(src, dst) |
| 985 def exists(self, path): |
| 986 return os.path.exists(path) |
| 987 def getmtime(self, path): |
| 988 return os.path.getmtime(path) |
| 989 def getsize(self, path): |
| 990 return os.path.getsize(path) |
| 991 def isdir(self, path): |
| 992 return os.path.isdir(path) |
| 993 def isfile(self, path): |
| 994 return os.path.isfile(path) |
| 995 def link(self, src, dst): |
| 996 return os.link(src, dst) |
| 997 def lstat(self, path): |
| 998 return os.lstat(path) |
| 999 def listdir(self, path): |
| 1000 return os.listdir(path) |
| 1001 def makedirs(self, path): |
| 1002 return os.makedirs(path) |
| 1003 def mkdir(self, path): |
| 1004 return os.mkdir(path) |
| 1005 def rename(self, old, new): |
| 1006 return os.rename(old, new) |
| 1007 def stat(self, path): |
| 1008 return os.stat(path) |
| 1009 def symlink(self, src, dst): |
| 1010 return os.symlink(src, dst) |
| 1011 def open(self, path): |
| 1012 return open(path) |
| 1013 def unlink(self, path): |
| 1014 return os.unlink(path) |
| 1015 |
| 1016 if hasattr(os, 'symlink'): |
| 1017 def islink(self, path): |
| 1018 return os.path.islink(path) |
| 1019 else: |
| 1020 def islink(self, path): |
| 1021 return 0 # no symlinks |
| 1022 |
| 1023 if hasattr(os, 'readlink'): |
| 1024 def readlink(self, file): |
| 1025 return os.readlink(file) |
| 1026 else: |
| 1027 def readlink(self, file): |
| 1028 return '' |
| 1029 |
| 1030 |
| 1031 #class RemoteFS: |
| 1032 # # Skeleton for the obvious methods we might need from the |
| 1033 # # abstraction layer for a remote filesystem. |
| 1034 # def upload(self, local_src, remote_dst): |
| 1035 # pass |
| 1036 # def download(self, remote_src, local_dst): |
| 1037 # pass |
| 1038 |
| 1039 |
| 1040 class FS(LocalFS): |
| 1041 |
| 1042 memoizer_counters = [] |
| 1043 |
| 1044 def __init__(self, path = None): |
| 1045 """Initialize the Node.FS subsystem. |
| 1046 |
| 1047 The supplied path is the top of the source tree, where we |
| 1048 expect to find the top-level build file. If no path is |
| 1049 supplied, the current directory is the default. |
| 1050 |
| 1051 The path argument must be a valid absolute path. |
| 1052 """ |
| 1053 if __debug__: logInstanceCreation(self, 'Node.FS') |
| 1054 |
| 1055 self._memo = {} |
| 1056 |
| 1057 self.Root = {} |
| 1058 self.SConstruct_dir = None |
| 1059 self.max_drift = default_max_drift |
| 1060 |
| 1061 self.Top = None |
| 1062 if path is None: |
| 1063 self.pathTop = os.getcwd() |
| 1064 else: |
| 1065 self.pathTop = path |
| 1066 self.defaultDrive = _my_normcase(os.path.splitdrive(self.pathTop)[0]) |
| 1067 |
| 1068 self.Top = self.Dir(self.pathTop) |
| 1069 self.Top.path = '.' |
| 1070 self.Top.tpath = '.' |
| 1071 self._cwd = self.Top |
| 1072 |
| 1073 DirNodeInfo.fs = self |
| 1074 FileNodeInfo.fs = self |
| 1075 |
| 1076 def set_SConstruct_dir(self, dir): |
| 1077 self.SConstruct_dir = dir |
| 1078 |
| 1079 def get_max_drift(self): |
| 1080 return self.max_drift |
| 1081 |
| 1082 def set_max_drift(self, max_drift): |
| 1083 self.max_drift = max_drift |
| 1084 |
| 1085 def getcwd(self): |
| 1086 return self._cwd |
| 1087 |
| 1088 def chdir(self, dir, change_os_dir=0): |
| 1089 """Change the current working directory for lookups. |
| 1090 If change_os_dir is true, we will also change the "real" cwd |
| 1091 to match. |
| 1092 """ |
| 1093 curr=self._cwd |
| 1094 try: |
| 1095 if dir is not None: |
| 1096 self._cwd = dir |
| 1097 if change_os_dir: |
| 1098 os.chdir(dir.abspath) |
| 1099 except OSError: |
| 1100 self._cwd = curr |
| 1101 raise |
| 1102 |
| 1103 def get_root(self, drive): |
| 1104 """ |
| 1105 Returns the root directory for the specified drive, creating |
| 1106 it if necessary. |
| 1107 """ |
| 1108 drive = _my_normcase(drive) |
| 1109 try: |
| 1110 return self.Root[drive] |
| 1111 except KeyError: |
| 1112 root = RootDir(drive, self) |
| 1113 self.Root[drive] = root |
| 1114 if not drive: |
| 1115 self.Root[self.defaultDrive] = root |
| 1116 elif drive == self.defaultDrive: |
| 1117 self.Root[''] = root |
| 1118 return root |
| 1119 |
| 1120 def _lookup(self, p, directory, fsclass, create=1): |
| 1121 """ |
| 1122 The generic entry point for Node lookup with user-supplied data. |
| 1123 |
| 1124 This translates arbitrary input into a canonical Node.FS object |
| 1125 of the specified fsclass. The general approach for strings is |
| 1126 to turn it into a fully normalized absolute path and then call |
| 1127 the root directory's lookup_abs() method for the heavy lifting. |
| 1128 |
| 1129 If the path name begins with '#', it is unconditionally |
| 1130 interpreted relative to the top-level directory of this FS. '#' |
| 1131 is treated as a synonym for the top-level SConstruct directory, |
| 1132 much like '~' is treated as a synonym for the user's home |
| 1133 directory in a UNIX shell. So both '#foo' and '#/foo' refer |
| 1134 to the 'foo' subdirectory underneath the top-level SConstruct |
| 1135 directory. |
| 1136 |
| 1137 If the path name is relative, then the path is looked up relative |
| 1138 to the specified directory, or the current directory (self._cwd, |
| 1139 typically the SConscript directory) if the specified directory |
| 1140 is None. |
| 1141 """ |
| 1142 if isinstance(p, Base): |
| 1143 # It's already a Node.FS object. Make sure it's the right |
| 1144 # class and return. |
| 1145 p.must_be_same(fsclass) |
| 1146 return p |
| 1147 # str(p) in case it's something like a proxy object |
| 1148 p = str(p) |
| 1149 |
| 1150 initial_hash = (p[0:1] == '#') |
| 1151 if initial_hash: |
| 1152 # There was an initial '#', so we strip it and override |
| 1153 # whatever directory they may have specified with the |
| 1154 # top-level SConstruct directory. |
| 1155 p = p[1:] |
| 1156 directory = self.Top |
| 1157 |
| 1158 if directory and not isinstance(directory, Dir): |
| 1159 directory = self.Dir(directory) |
| 1160 |
| 1161 if do_splitdrive: |
| 1162 drive, p = os.path.splitdrive(p) |
| 1163 else: |
| 1164 drive = '' |
| 1165 if drive and not p: |
| 1166 # This causes a naked drive letter to be treated as a synonym |
| 1167 # for the root directory on that drive. |
| 1168 p = os.sep |
| 1169 absolute = os.path.isabs(p) |
| 1170 |
| 1171 needs_normpath = needs_normpath_check.match(p) |
| 1172 |
| 1173 if initial_hash or not absolute: |
| 1174 # This is a relative lookup, either to the top-level |
| 1175 # SConstruct directory (because of the initial '#') or to |
| 1176 # the current directory (the path name is not absolute). |
| 1177 # Add the string to the appropriate directory lookup path, |
| 1178 # after which the whole thing gets normalized. |
| 1179 if not directory: |
| 1180 directory = self._cwd |
| 1181 if p: |
| 1182 p = directory.labspath + '/' + p |
| 1183 else: |
| 1184 p = directory.labspath |
| 1185 |
| 1186 if needs_normpath: |
| 1187 p = os.path.normpath(p) |
| 1188 |
| 1189 if drive or absolute: |
| 1190 root = self.get_root(drive) |
| 1191 else: |
| 1192 if not directory: |
| 1193 directory = self._cwd |
| 1194 root = directory.root |
| 1195 |
| 1196 if os.sep != '/': |
| 1197 p = p.replace(os.sep, '/') |
| 1198 return root._lookup_abs(p, fsclass, create) |
| 1199 |
| 1200 def Entry(self, name, directory = None, create = 1): |
| 1201 """Look up or create a generic Entry node with the specified name. |
| 1202 If the name is a relative path (begins with ./, ../, or a file |
| 1203 name), then it is looked up relative to the supplied directory |
| 1204 node, or to the top level directory of the FS (supplied at |
| 1205 construction time) if no directory is supplied. |
| 1206 """ |
| 1207 return self._lookup(name, directory, Entry, create) |
| 1208 |
| 1209 def File(self, name, directory = None, create = 1): |
| 1210 """Look up or create a File node with the specified name. If |
| 1211 the name is a relative path (begins with ./, ../, or a file name), |
| 1212 then it is looked up relative to the supplied directory node, |
| 1213 or to the top level directory of the FS (supplied at construction |
| 1214 time) if no directory is supplied. |
| 1215 |
| 1216 This method will raise TypeError if a directory is found at the |
| 1217 specified path. |
| 1218 """ |
| 1219 return self._lookup(name, directory, File, create) |
| 1220 |
| 1221 def Dir(self, name, directory = None, create = True): |
| 1222 """Look up or create a Dir node with the specified name. If |
| 1223 the name is a relative path (begins with ./, ../, or a file name), |
| 1224 then it is looked up relative to the supplied directory node, |
| 1225 or to the top level directory of the FS (supplied at construction |
| 1226 time) if no directory is supplied. |
| 1227 |
| 1228 This method will raise TypeError if a normal file is found at the |
| 1229 specified path. |
| 1230 """ |
| 1231 return self._lookup(name, directory, Dir, create) |
| 1232 |
| 1233 def VariantDir(self, variant_dir, src_dir, duplicate=1): |
| 1234 """Link the supplied variant directory to the source directory |
| 1235 for purposes of building files.""" |
| 1236 |
| 1237 if not isinstance(src_dir, SCons.Node.Node): |
| 1238 src_dir = self.Dir(src_dir) |
| 1239 if not isinstance(variant_dir, SCons.Node.Node): |
| 1240 variant_dir = self.Dir(variant_dir) |
| 1241 if src_dir.is_under(variant_dir): |
| 1242 raise SCons.Errors.UserError("Source directory cannot be under varia
nt directory.") |
| 1243 if variant_dir.srcdir: |
| 1244 if variant_dir.srcdir == src_dir: |
| 1245 return # We already did this. |
| 1246 raise SCons.Errors.UserError("'%s' already has a source directory: '
%s'."%(variant_dir, variant_dir.srcdir)) |
| 1247 variant_dir.link(src_dir, duplicate) |
| 1248 |
| 1249 def Repository(self, *dirs): |
| 1250 """Specify Repository directories to search.""" |
| 1251 for d in dirs: |
| 1252 if not isinstance(d, SCons.Node.Node): |
| 1253 d = self.Dir(d) |
| 1254 self.Top.addRepository(d) |
| 1255 |
| 1256 def variant_dir_target_climb(self, orig, dir, tail): |
| 1257 """Create targets in corresponding variant directories |
| 1258 |
| 1259 Climb the directory tree, and look up path names |
| 1260 relative to any linked variant directories we find. |
| 1261 |
| 1262 Even though this loops and walks up the tree, we don't memoize |
| 1263 the return value because this is really only used to process |
| 1264 the command-line targets. |
| 1265 """ |
| 1266 targets = [] |
| 1267 message = None |
| 1268 fmt = "building associated VariantDir targets: %s" |
| 1269 start_dir = dir |
| 1270 while dir: |
| 1271 for bd in dir.variant_dirs: |
| 1272 if start_dir.is_under(bd): |
| 1273 # If already in the build-dir location, don't reflect |
| 1274 return [orig], fmt % str(orig) |
| 1275 p = os.path.join(bd.path, *tail) |
| 1276 targets.append(self.Entry(p)) |
| 1277 tail = [dir.name] + tail |
| 1278 dir = dir.up() |
| 1279 if targets: |
| 1280 message = fmt % ' '.join(map(str, targets)) |
| 1281 return targets, message |
| 1282 |
| 1283 def Glob(self, pathname, ondisk=True, source=True, strings=False, cwd=None): |
| 1284 """ |
| 1285 Globs |
| 1286 |
| 1287 This is mainly a shim layer |
| 1288 """ |
| 1289 if cwd is None: |
| 1290 cwd = self.getcwd() |
| 1291 return cwd.glob(pathname, ondisk, source, strings) |
| 1292 |
| 1293 class DirNodeInfo(SCons.Node.NodeInfoBase): |
| 1294 # This should get reset by the FS initialization. |
| 1295 current_version_id = 1 |
| 1296 |
| 1297 fs = None |
| 1298 |
| 1299 def str_to_node(self, s): |
| 1300 top = self.fs.Top |
| 1301 root = top.root |
| 1302 if do_splitdrive: |
| 1303 drive, s = os.path.splitdrive(s) |
| 1304 if drive: |
| 1305 root = self.fs.get_root(drive) |
| 1306 if not os.path.isabs(s): |
| 1307 s = top.labspath + '/' + s |
| 1308 return root._lookup_abs(s, Entry) |
| 1309 |
| 1310 class DirBuildInfo(SCons.Node.BuildInfoBase): |
| 1311 current_version_id = 1 |
| 1312 |
| 1313 glob_magic_check = re.compile('[*?[]') |
| 1314 |
| 1315 def has_glob_magic(s): |
| 1316 return glob_magic_check.search(s) is not None |
| 1317 |
| 1318 class Dir(Base): |
| 1319 """A class for directories in a file system. |
| 1320 """ |
| 1321 |
| 1322 memoizer_counters = [] |
| 1323 |
| 1324 NodeInfo = DirNodeInfo |
| 1325 BuildInfo = DirBuildInfo |
| 1326 |
| 1327 def __init__(self, name, directory, fs): |
| 1328 if __debug__: logInstanceCreation(self, 'Node.FS.Dir') |
| 1329 Base.__init__(self, name, directory, fs) |
| 1330 self._morph() |
| 1331 |
| 1332 def _morph(self): |
| 1333 """Turn a file system Node (either a freshly initialized directory |
| 1334 object or a separate Entry object) into a proper directory object. |
| 1335 |
| 1336 Set up this directory's entries and hook it into the file |
| 1337 system tree. Specify that directories (this Node) don't use |
| 1338 signatures for calculating whether they're current. |
| 1339 """ |
| 1340 |
| 1341 self.repositories = [] |
| 1342 self.srcdir = None |
| 1343 |
| 1344 self.entries = {} |
| 1345 self.entries['.'] = self |
| 1346 self.entries['..'] = self.dir |
| 1347 self.cwd = self |
| 1348 self.searched = 0 |
| 1349 self._sconsign = None |
| 1350 self.variant_dirs = [] |
| 1351 self.root = self.dir.root |
| 1352 |
| 1353 # Don't just reset the executor, replace its action list, |
| 1354 # because it might have some pre-or post-actions that need to |
| 1355 # be preserved. |
| 1356 self.builder = get_MkdirBuilder() |
| 1357 self.get_executor().set_action_list(self.builder.action) |
| 1358 |
| 1359 def diskcheck_match(self): |
| 1360 diskcheck_match(self, self.isfile, |
| 1361 "File %s found where directory expected.") |
| 1362 |
| 1363 def __clearRepositoryCache(self, duplicate=None): |
| 1364 """Called when we change the repository(ies) for a directory. |
| 1365 This clears any cached information that is invalidated by changing |
| 1366 the repository.""" |
| 1367 |
| 1368 for node in self.entries.values(): |
| 1369 if node != self.dir: |
| 1370 if node != self and isinstance(node, Dir): |
| 1371 node.__clearRepositoryCache(duplicate) |
| 1372 else: |
| 1373 node.clear() |
| 1374 try: |
| 1375 del node._srcreps |
| 1376 except AttributeError: |
| 1377 pass |
| 1378 if duplicate is not None: |
| 1379 node.duplicate=duplicate |
| 1380 |
| 1381 def __resetDuplicate(self, node): |
| 1382 if node != self: |
| 1383 node.duplicate = node.get_dir().duplicate |
| 1384 |
| 1385 def Entry(self, name): |
| 1386 """ |
| 1387 Looks up or creates an entry node named 'name' relative to |
| 1388 this directory. |
| 1389 """ |
| 1390 return self.fs.Entry(name, self) |
| 1391 |
| 1392 def Dir(self, name, create=True): |
| 1393 """ |
| 1394 Looks up or creates a directory node named 'name' relative to |
| 1395 this directory. |
| 1396 """ |
| 1397 return self.fs.Dir(name, self, create) |
| 1398 |
| 1399 def File(self, name): |
| 1400 """ |
| 1401 Looks up or creates a file node named 'name' relative to |
| 1402 this directory. |
| 1403 """ |
| 1404 return self.fs.File(name, self) |
| 1405 |
| 1406 def _lookup_rel(self, name, klass, create=1): |
| 1407 """ |
| 1408 Looks up a *normalized* relative path name, relative to this |
| 1409 directory. |
| 1410 |
| 1411 This method is intended for use by internal lookups with |
| 1412 already-normalized path data. For general-purpose lookups, |
| 1413 use the Entry(), Dir() and File() methods above. |
| 1414 |
| 1415 This method does *no* input checking and will die or give |
| 1416 incorrect results if it's passed a non-normalized path name (e.g., |
| 1417 a path containing '..'), an absolute path name, a top-relative |
| 1418 ('#foo') path name, or any kind of object. |
| 1419 """ |
| 1420 name = self.entry_labspath(name) |
| 1421 return self.root._lookup_abs(name, klass, create) |
| 1422 |
| 1423 def link(self, srcdir, duplicate): |
| 1424 """Set this directory as the variant directory for the |
| 1425 supplied source directory.""" |
| 1426 self.srcdir = srcdir |
| 1427 self.duplicate = duplicate |
| 1428 self.__clearRepositoryCache(duplicate) |
| 1429 srcdir.variant_dirs.append(self) |
| 1430 |
| 1431 def getRepositories(self): |
| 1432 """Returns a list of repositories for this directory. |
| 1433 """ |
| 1434 if self.srcdir and not self.duplicate: |
| 1435 return self.srcdir.get_all_rdirs() + self.repositories |
| 1436 return self.repositories |
| 1437 |
| 1438 memoizer_counters.append(SCons.Memoize.CountValue('get_all_rdirs')) |
| 1439 |
| 1440 def get_all_rdirs(self): |
| 1441 try: |
| 1442 return list(self._memo['get_all_rdirs']) |
| 1443 except KeyError: |
| 1444 pass |
| 1445 |
| 1446 result = [self] |
| 1447 fname = '.' |
| 1448 dir = self |
| 1449 while dir: |
| 1450 for rep in dir.getRepositories(): |
| 1451 result.append(rep.Dir(fname)) |
| 1452 if fname == '.': |
| 1453 fname = dir.name |
| 1454 else: |
| 1455 fname = dir.name + os.sep + fname |
| 1456 dir = dir.up() |
| 1457 |
| 1458 self._memo['get_all_rdirs'] = list(result) |
| 1459 |
| 1460 return result |
| 1461 |
| 1462 def addRepository(self, dir): |
| 1463 if dir != self and not dir in self.repositories: |
| 1464 self.repositories.append(dir) |
| 1465 dir.tpath = '.' |
| 1466 self.__clearRepositoryCache() |
| 1467 |
| 1468 def up(self): |
| 1469 return self.entries['..'] |
| 1470 |
| 1471 def _rel_path_key(self, other): |
| 1472 return str(other) |
| 1473 |
| 1474 memoizer_counters.append(SCons.Memoize.CountDict('rel_path', _rel_path_key)) |
| 1475 |
| 1476 def rel_path(self, other): |
| 1477 """Return a path to "other" relative to this directory. |
| 1478 """ |
| 1479 |
| 1480 # This complicated and expensive method, which constructs relative |
| 1481 # paths between arbitrary Node.FS objects, is no longer used |
| 1482 # by SCons itself. It was introduced to store dependency paths |
| 1483 # in .sconsign files relative to the target, but that ended up |
| 1484 # being significantly inefficient. |
| 1485 # |
| 1486 # We're continuing to support the method because some SConstruct |
| 1487 # files out there started using it when it was available, and |
| 1488 # we're all about backwards compatibility.. |
| 1489 |
| 1490 try: |
| 1491 memo_dict = self._memo['rel_path'] |
| 1492 except KeyError: |
| 1493 memo_dict = {} |
| 1494 self._memo['rel_path'] = memo_dict |
| 1495 else: |
| 1496 try: |
| 1497 return memo_dict[other] |
| 1498 except KeyError: |
| 1499 pass |
| 1500 |
| 1501 if self is other: |
| 1502 result = '.' |
| 1503 |
| 1504 elif not other in self.path_elements: |
| 1505 try: |
| 1506 other_dir = other.get_dir() |
| 1507 except AttributeError: |
| 1508 result = str(other) |
| 1509 else: |
| 1510 if other_dir is None: |
| 1511 result = other.name |
| 1512 else: |
| 1513 dir_rel_path = self.rel_path(other_dir) |
| 1514 if dir_rel_path == '.': |
| 1515 result = other.name |
| 1516 else: |
| 1517 result = dir_rel_path + os.sep + other.name |
| 1518 else: |
| 1519 i = self.path_elements.index(other) + 1 |
| 1520 |
| 1521 path_elems = ['..'] * (len(self.path_elements) - i) \ |
| 1522 + [n.name for n in other.path_elements[i:]] |
| 1523 |
| 1524 result = os.sep.join(path_elems) |
| 1525 |
| 1526 memo_dict[other] = result |
| 1527 |
| 1528 return result |
| 1529 |
| 1530 def get_env_scanner(self, env, kw={}): |
| 1531 import SCons.Defaults |
| 1532 return SCons.Defaults.DirEntryScanner |
| 1533 |
| 1534 def get_target_scanner(self): |
| 1535 import SCons.Defaults |
| 1536 return SCons.Defaults.DirEntryScanner |
| 1537 |
| 1538 def get_found_includes(self, env, scanner, path): |
| 1539 """Return this directory's implicit dependencies. |
| 1540 |
| 1541 We don't bother caching the results because the scan typically |
| 1542 shouldn't be requested more than once (as opposed to scanning |
| 1543 .h file contents, which can be requested as many times as the |
| 1544 files is #included by other files). |
| 1545 """ |
| 1546 if not scanner: |
| 1547 return [] |
| 1548 # Clear cached info for this Dir. If we already visited this |
| 1549 # directory on our walk down the tree (because we didn't know at |
| 1550 # that point it was being used as the source for another Node) |
| 1551 # then we may have calculated build signature before realizing |
| 1552 # we had to scan the disk. Now that we have to, though, we need |
| 1553 # to invalidate the old calculated signature so that any node |
| 1554 # dependent on our directory structure gets one that includes |
| 1555 # info about everything on disk. |
| 1556 self.clear() |
| 1557 return scanner(self, env, path) |
| 1558 |
| 1559 # |
| 1560 # Taskmaster interface subsystem |
| 1561 # |
| 1562 |
| 1563 def prepare(self): |
| 1564 pass |
| 1565 |
| 1566 def build(self, **kw): |
| 1567 """A null "builder" for directories.""" |
| 1568 global MkdirBuilder |
| 1569 if self.builder is not MkdirBuilder: |
| 1570 SCons.Node.Node.build(self, **kw) |
| 1571 |
| 1572 # |
| 1573 # |
| 1574 # |
| 1575 |
| 1576 def _create(self): |
| 1577 """Create this directory, silently and without worrying about |
| 1578 whether the builder is the default or not.""" |
| 1579 listDirs = [] |
| 1580 parent = self |
| 1581 while parent: |
| 1582 if parent.exists(): |
| 1583 break |
| 1584 listDirs.append(parent) |
| 1585 p = parent.up() |
| 1586 if p is None: |
| 1587 # Don't use while: - else: for this condition because |
| 1588 # if so, then parent is None and has no .path attribute. |
| 1589 raise SCons.Errors.StopError(parent.path) |
| 1590 parent = p |
| 1591 listDirs.reverse() |
| 1592 for dirnode in listDirs: |
| 1593 try: |
| 1594 # Don't call dirnode.build(), call the base Node method |
| 1595 # directly because we definitely *must* create this |
| 1596 # directory. The dirnode.build() method will suppress |
| 1597 # the build if it's the default builder. |
| 1598 SCons.Node.Node.build(dirnode) |
| 1599 dirnode.get_executor().nullify() |
| 1600 # The build() action may or may not have actually |
| 1601 # created the directory, depending on whether the -n |
| 1602 # option was used or not. Delete the _exists and |
| 1603 # _rexists attributes so they can be reevaluated. |
| 1604 dirnode.clear() |
| 1605 except OSError: |
| 1606 pass |
| 1607 |
| 1608 def multiple_side_effect_has_builder(self): |
| 1609 global MkdirBuilder |
| 1610 return self.builder is not MkdirBuilder and self.has_builder() |
| 1611 |
| 1612 def alter_targets(self): |
| 1613 """Return any corresponding targets in a variant directory. |
| 1614 """ |
| 1615 return self.fs.variant_dir_target_climb(self, self, []) |
| 1616 |
| 1617 def scanner_key(self): |
| 1618 """A directory does not get scanned.""" |
| 1619 return None |
| 1620 |
| 1621 def get_text_contents(self): |
| 1622 """We already emit things in text, so just return the binary |
| 1623 version.""" |
| 1624 return self.get_contents() |
| 1625 |
| 1626 def get_contents(self): |
| 1627 """Return content signatures and names of all our children |
| 1628 separated by new-lines. Ensure that the nodes are sorted.""" |
| 1629 contents = [] |
| 1630 for node in sorted(self.children(), key=lambda t: t.name): |
| 1631 contents.append('%s %s\n' % (node.get_csig(), node.name)) |
| 1632 return ''.join(contents) |
| 1633 |
| 1634 def get_csig(self): |
| 1635 """Compute the content signature for Directory nodes. In |
| 1636 general, this is not needed and the content signature is not |
| 1637 stored in the DirNodeInfo. However, if get_contents on a Dir |
| 1638 node is called which has a child directory, the child |
| 1639 directory should return the hash of its contents.""" |
| 1640 contents = self.get_contents() |
| 1641 return SCons.Util.MD5signature(contents) |
| 1642 |
| 1643 def do_duplicate(self, src): |
| 1644 pass |
| 1645 |
| 1646 changed_since_last_build = SCons.Node.Node.state_has_changed |
| 1647 |
| 1648 def is_up_to_date(self): |
| 1649 """If any child is not up-to-date, then this directory isn't, |
| 1650 either.""" |
| 1651 if self.builder is not MkdirBuilder and not self.exists(): |
| 1652 return 0 |
| 1653 up_to_date = SCons.Node.up_to_date |
| 1654 for kid in self.children(): |
| 1655 if kid.get_state() > up_to_date: |
| 1656 return 0 |
| 1657 return 1 |
| 1658 |
| 1659 def rdir(self): |
| 1660 if not self.exists(): |
| 1661 norm_name = _my_normcase(self.name) |
| 1662 for dir in self.dir.get_all_rdirs(): |
| 1663 try: node = dir.entries[norm_name] |
| 1664 except KeyError: node = dir.dir_on_disk(self.name) |
| 1665 if node and node.exists() and \ |
| 1666 (isinstance(dir, Dir) or isinstance(dir, Entry)): |
| 1667 return node |
| 1668 return self |
| 1669 |
| 1670 def sconsign(self): |
| 1671 """Return the .sconsign file info for this directory, |
| 1672 creating it first if necessary.""" |
| 1673 if not self._sconsign: |
| 1674 import SCons.SConsign |
| 1675 self._sconsign = SCons.SConsign.ForDirectory(self) |
| 1676 return self._sconsign |
| 1677 |
| 1678 def srcnode(self): |
| 1679 """Dir has a special need for srcnode()...if we |
| 1680 have a srcdir attribute set, then that *is* our srcnode.""" |
| 1681 if self.srcdir: |
| 1682 return self.srcdir |
| 1683 return Base.srcnode(self) |
| 1684 |
| 1685 def get_timestamp(self): |
| 1686 """Return the latest timestamp from among our children""" |
| 1687 stamp = 0 |
| 1688 for kid in self.children(): |
| 1689 if kid.get_timestamp() > stamp: |
| 1690 stamp = kid.get_timestamp() |
| 1691 return stamp |
| 1692 |
| 1693 def entry_abspath(self, name): |
| 1694 return self.abspath + os.sep + name |
| 1695 |
| 1696 def entry_labspath(self, name): |
| 1697 return self.labspath + '/' + name |
| 1698 |
| 1699 def entry_path(self, name): |
| 1700 return self.path + os.sep + name |
| 1701 |
| 1702 def entry_tpath(self, name): |
| 1703 return self.tpath + os.sep + name |
| 1704 |
| 1705 def entry_exists_on_disk(self, name): |
| 1706 try: |
| 1707 d = self.on_disk_entries |
| 1708 except AttributeError: |
| 1709 d = {} |
| 1710 try: |
| 1711 entries = os.listdir(self.abspath) |
| 1712 except OSError: |
| 1713 pass |
| 1714 else: |
| 1715 for entry in map(_my_normcase, entries): |
| 1716 d[entry] = True |
| 1717 self.on_disk_entries = d |
| 1718 if sys.platform == 'win32': |
| 1719 name = _my_normcase(name) |
| 1720 result = d.get(name) |
| 1721 if result is None: |
| 1722 # Belt-and-suspenders for Windows: check directly for |
| 1723 # 8.3 file names that don't show up in os.listdir(). |
| 1724 result = os.path.exists(self.abspath + os.sep + name) |
| 1725 d[name] = result |
| 1726 return result |
| 1727 else: |
| 1728 return name in d |
| 1729 |
| 1730 memoizer_counters.append(SCons.Memoize.CountValue('srcdir_list')) |
| 1731 |
| 1732 def srcdir_list(self): |
| 1733 try: |
| 1734 return self._memo['srcdir_list'] |
| 1735 except KeyError: |
| 1736 pass |
| 1737 |
| 1738 result = [] |
| 1739 |
| 1740 dirname = '.' |
| 1741 dir = self |
| 1742 while dir: |
| 1743 if dir.srcdir: |
| 1744 result.append(dir.srcdir.Dir(dirname)) |
| 1745 dirname = dir.name + os.sep + dirname |
| 1746 dir = dir.up() |
| 1747 |
| 1748 self._memo['srcdir_list'] = result |
| 1749 |
| 1750 return result |
| 1751 |
| 1752 def srcdir_duplicate(self, name): |
| 1753 for dir in self.srcdir_list(): |
| 1754 if self.is_under(dir): |
| 1755 # We shouldn't source from something in the build path; |
| 1756 # variant_dir is probably under src_dir, in which case |
| 1757 # we are reflecting. |
| 1758 break |
| 1759 if dir.entry_exists_on_disk(name): |
| 1760 srcnode = dir.Entry(name).disambiguate() |
| 1761 if self.duplicate: |
| 1762 node = self.Entry(name).disambiguate() |
| 1763 node.do_duplicate(srcnode) |
| 1764 return node |
| 1765 else: |
| 1766 return srcnode |
| 1767 return None |
| 1768 |
| 1769 def _srcdir_find_file_key(self, filename): |
| 1770 return filename |
| 1771 |
| 1772 memoizer_counters.append(SCons.Memoize.CountDict('srcdir_find_file', _srcdir
_find_file_key)) |
| 1773 |
| 1774 def srcdir_find_file(self, filename): |
| 1775 try: |
| 1776 memo_dict = self._memo['srcdir_find_file'] |
| 1777 except KeyError: |
| 1778 memo_dict = {} |
| 1779 self._memo['srcdir_find_file'] = memo_dict |
| 1780 else: |
| 1781 try: |
| 1782 return memo_dict[filename] |
| 1783 except KeyError: |
| 1784 pass |
| 1785 |
| 1786 def func(node): |
| 1787 if (isinstance(node, File) or isinstance(node, Entry)) and \ |
| 1788 (node.is_derived() or node.exists()): |
| 1789 return node |
| 1790 return None |
| 1791 |
| 1792 norm_name = _my_normcase(filename) |
| 1793 |
| 1794 for rdir in self.get_all_rdirs(): |
| 1795 try: node = rdir.entries[norm_name] |
| 1796 except KeyError: node = rdir.file_on_disk(filename) |
| 1797 else: node = func(node) |
| 1798 if node: |
| 1799 result = (node, self) |
| 1800 memo_dict[filename] = result |
| 1801 return result |
| 1802 |
| 1803 for srcdir in self.srcdir_list(): |
| 1804 for rdir in srcdir.get_all_rdirs(): |
| 1805 try: node = rdir.entries[norm_name] |
| 1806 except KeyError: node = rdir.file_on_disk(filename) |
| 1807 else: node = func(node) |
| 1808 if node: |
| 1809 result = (File(filename, self, self.fs), srcdir) |
| 1810 memo_dict[filename] = result |
| 1811 return result |
| 1812 |
| 1813 result = (None, None) |
| 1814 memo_dict[filename] = result |
| 1815 return result |
| 1816 |
| 1817 def dir_on_disk(self, name): |
| 1818 if self.entry_exists_on_disk(name): |
| 1819 try: return self.Dir(name) |
| 1820 except TypeError: pass |
| 1821 node = self.srcdir_duplicate(name) |
| 1822 if isinstance(node, File): |
| 1823 return None |
| 1824 return node |
| 1825 |
| 1826 def file_on_disk(self, name): |
| 1827 if self.entry_exists_on_disk(name) or \ |
| 1828 diskcheck_rcs(self, name) or \ |
| 1829 diskcheck_sccs(self, name): |
| 1830 try: return self.File(name) |
| 1831 except TypeError: pass |
| 1832 node = self.srcdir_duplicate(name) |
| 1833 if isinstance(node, Dir): |
| 1834 return None |
| 1835 return node |
| 1836 |
| 1837 def walk(self, func, arg): |
| 1838 """ |
| 1839 Walk this directory tree by calling the specified function |
| 1840 for each directory in the tree. |
| 1841 |
| 1842 This behaves like the os.path.walk() function, but for in-memory |
| 1843 Node.FS.Dir objects. The function takes the same arguments as |
| 1844 the functions passed to os.path.walk(): |
| 1845 |
| 1846 func(arg, dirname, fnames) |
| 1847 |
| 1848 Except that "dirname" will actually be the directory *Node*, |
| 1849 not the string. The '.' and '..' entries are excluded from |
| 1850 fnames. The fnames list may be modified in-place to filter the |
| 1851 subdirectories visited or otherwise impose a specific order. |
| 1852 The "arg" argument is always passed to func() and may be used |
| 1853 in any way (or ignored, passing None is common). |
| 1854 """ |
| 1855 entries = self.entries |
| 1856 names = list(entries.keys()) |
| 1857 names.remove('.') |
| 1858 names.remove('..') |
| 1859 func(arg, self, names) |
| 1860 for dirname in [n for n in names if isinstance(entries[n], Dir)]: |
| 1861 entries[dirname].walk(func, arg) |
| 1862 |
| 1863 def glob(self, pathname, ondisk=True, source=False, strings=False): |
| 1864 """ |
| 1865 Returns a list of Nodes (or strings) matching a specified |
| 1866 pathname pattern. |
| 1867 |
| 1868 Pathname patterns follow UNIX shell semantics: * matches |
| 1869 any-length strings of any characters, ? matches any character, |
| 1870 and [] can enclose lists or ranges of characters. Matches do |
| 1871 not span directory separators. |
| 1872 |
| 1873 The matches take into account Repositories, returning local |
| 1874 Nodes if a corresponding entry exists in a Repository (either |
| 1875 an in-memory Node or something on disk). |
| 1876 |
| 1877 By defafult, the glob() function matches entries that exist |
| 1878 on-disk, in addition to in-memory Nodes. Setting the "ondisk" |
| 1879 argument to False (or some other non-true value) causes the glob() |
| 1880 function to only match in-memory Nodes. The default behavior is |
| 1881 to return both the on-disk and in-memory Nodes. |
| 1882 |
| 1883 The "source" argument, when true, specifies that corresponding |
| 1884 source Nodes must be returned if you're globbing in a build |
| 1885 directory (initialized with VariantDir()). The default behavior |
| 1886 is to return Nodes local to the VariantDir(). |
| 1887 |
| 1888 The "strings" argument, when true, returns the matches as strings, |
| 1889 not Nodes. The strings are path names relative to this directory. |
| 1890 |
| 1891 The underlying algorithm is adapted from the glob.glob() function |
| 1892 in the Python library (but heavily modified), and uses fnmatch() |
| 1893 under the covers. |
| 1894 """ |
| 1895 dirname, basename = os.path.split(pathname) |
| 1896 if not dirname: |
| 1897 return sorted(self._glob1(basename, ondisk, source, strings), |
| 1898 key=lambda t: str(t)) |
| 1899 if has_glob_magic(dirname): |
| 1900 list = self.glob(dirname, ondisk, source, strings=False) |
| 1901 else: |
| 1902 list = [self.Dir(dirname, create=True)] |
| 1903 result = [] |
| 1904 for dir in list: |
| 1905 r = dir._glob1(basename, ondisk, source, strings) |
| 1906 if strings: |
| 1907 r = [os.path.join(str(dir), x) for x in r] |
| 1908 result.extend(r) |
| 1909 return sorted(result, key=lambda a: str(a)) |
| 1910 |
| 1911 def _glob1(self, pattern, ondisk=True, source=False, strings=False): |
| 1912 """ |
| 1913 Globs for and returns a list of entry names matching a single |
| 1914 pattern in this directory. |
| 1915 |
| 1916 This searches any repositories and source directories for |
| 1917 corresponding entries and returns a Node (or string) relative |
| 1918 to the current directory if an entry is found anywhere. |
| 1919 |
| 1920 TODO: handle pattern with no wildcard |
| 1921 """ |
| 1922 search_dir_list = self.get_all_rdirs() |
| 1923 for srcdir in self.srcdir_list(): |
| 1924 search_dir_list.extend(srcdir.get_all_rdirs()) |
| 1925 |
| 1926 selfEntry = self.Entry |
| 1927 names = [] |
| 1928 for dir in search_dir_list: |
| 1929 # We use the .name attribute from the Node because the keys of |
| 1930 # the dir.entries dictionary are normalized (that is, all upper |
| 1931 # case) on case-insensitive systems like Windows. |
| 1932 node_names = [ v.name for k, v in dir.entries.items() |
| 1933 if k not in ('.', '..') ] |
| 1934 names.extend(node_names) |
| 1935 if not strings: |
| 1936 # Make sure the working directory (self) actually has |
| 1937 # entries for all Nodes in repositories or variant dirs. |
| 1938 for name in node_names: selfEntry(name) |
| 1939 if ondisk: |
| 1940 try: |
| 1941 disk_names = os.listdir(dir.abspath) |
| 1942 except os.error: |
| 1943 continue |
| 1944 names.extend(disk_names) |
| 1945 if not strings: |
| 1946 # We're going to return corresponding Nodes in |
| 1947 # the local directory, so we need to make sure |
| 1948 # those Nodes exist. We only want to create |
| 1949 # Nodes for the entries that will match the |
| 1950 # specified pattern, though, which means we |
| 1951 # need to filter the list here, even though |
| 1952 # the overall list will also be filtered later, |
| 1953 # after we exit this loop. |
| 1954 if pattern[0] != '.': |
| 1955 #disk_names = [ d for d in disk_names if d[0] != '.' ] |
| 1956 disk_names = [x for x in disk_names if x[0] != '.'] |
| 1957 disk_names = fnmatch.filter(disk_names, pattern) |
| 1958 dirEntry = dir.Entry |
| 1959 for name in disk_names: |
| 1960 # Add './' before disk filename so that '#' at |
| 1961 # beginning of filename isn't interpreted. |
| 1962 name = './' + name |
| 1963 node = dirEntry(name).disambiguate() |
| 1964 n = selfEntry(name) |
| 1965 if n.__class__ != node.__class__: |
| 1966 n.__class__ = node.__class__ |
| 1967 n._morph() |
| 1968 |
| 1969 names = set(names) |
| 1970 if pattern[0] != '.': |
| 1971 #names = [ n for n in names if n[0] != '.' ] |
| 1972 names = [x for x in names if x[0] != '.'] |
| 1973 names = fnmatch.filter(names, pattern) |
| 1974 |
| 1975 if strings: |
| 1976 return names |
| 1977 |
| 1978 #return [ self.entries[_my_normcase(n)] for n in names ] |
| 1979 return [self.entries[_my_normcase(n)] for n in names] |
| 1980 |
| 1981 class RootDir(Dir): |
| 1982 """A class for the root directory of a file system. |
| 1983 |
| 1984 This is the same as a Dir class, except that the path separator |
| 1985 ('/' or '\\') is actually part of the name, so we don't need to |
| 1986 add a separator when creating the path names of entries within |
| 1987 this directory. |
| 1988 """ |
| 1989 def __init__(self, name, fs): |
| 1990 if __debug__: logInstanceCreation(self, 'Node.FS.RootDir') |
| 1991 # We're going to be our own parent directory (".." entry and .dir |
| 1992 # attribute) so we have to set up some values so Base.__init__() |
| 1993 # won't gag won't it calls some of our methods. |
| 1994 self.abspath = '' |
| 1995 self.labspath = '' |
| 1996 self.path = '' |
| 1997 self.tpath = '' |
| 1998 self.path_elements = [] |
| 1999 self.duplicate = 0 |
| 2000 self.root = self |
| 2001 Base.__init__(self, name, self, fs) |
| 2002 |
| 2003 # Now set our paths to what we really want them to be: the |
| 2004 # initial drive letter (the name) plus the directory separator, |
| 2005 # except for the "lookup abspath," which does not have the |
| 2006 # drive letter. |
| 2007 self.abspath = name + os.sep |
| 2008 self.labspath = '' |
| 2009 self.path = name + os.sep |
| 2010 self.tpath = name + os.sep |
| 2011 self._morph() |
| 2012 |
| 2013 self._lookupDict = {} |
| 2014 |
| 2015 # The // and os.sep + os.sep entries are necessary because |
| 2016 # os.path.normpath() seems to preserve double slashes at the |
| 2017 # beginning of a path (presumably for UNC path names), but |
| 2018 # collapses triple slashes to a single slash. |
| 2019 self._lookupDict[''] = self |
| 2020 self._lookupDict['/'] = self |
| 2021 self._lookupDict['//'] = self |
| 2022 self._lookupDict[os.sep] = self |
| 2023 self._lookupDict[os.sep + os.sep] = self |
| 2024 |
| 2025 def must_be_same(self, klass): |
| 2026 if klass is Dir: |
| 2027 return |
| 2028 Base.must_be_same(self, klass) |
| 2029 |
| 2030 def _lookup_abs(self, p, klass, create=1): |
| 2031 """ |
| 2032 Fast (?) lookup of a *normalized* absolute path. |
| 2033 |
| 2034 This method is intended for use by internal lookups with |
| 2035 already-normalized path data. For general-purpose lookups, |
| 2036 use the FS.Entry(), FS.Dir() or FS.File() methods. |
| 2037 |
| 2038 The caller is responsible for making sure we're passed a |
| 2039 normalized absolute path; we merely let Python's dictionary look |
| 2040 up and return the One True Node.FS object for the path. |
| 2041 |
| 2042 If no Node for the specified "p" doesn't already exist, and |
| 2043 "create" is specified, the Node may be created after recursive |
| 2044 invocation to find or create the parent directory or directories. |
| 2045 """ |
| 2046 k = _my_normcase(p) |
| 2047 try: |
| 2048 result = self._lookupDict[k] |
| 2049 except KeyError: |
| 2050 if not create: |
| 2051 msg = "No such file or directory: '%s' in '%s' (and create is Fa
lse)" % (p, str(self)) |
| 2052 raise SCons.Errors.UserError(msg) |
| 2053 # There is no Node for this path name, and we're allowed |
| 2054 # to create it. |
| 2055 dir_name, file_name = os.path.split(p) |
| 2056 dir_node = self._lookup_abs(dir_name, Dir) |
| 2057 result = klass(file_name, dir_node, self.fs) |
| 2058 |
| 2059 # Double-check on disk (as configured) that the Node we |
| 2060 # created matches whatever is out there in the real world. |
| 2061 result.diskcheck_match() |
| 2062 |
| 2063 self._lookupDict[k] = result |
| 2064 dir_node.entries[_my_normcase(file_name)] = result |
| 2065 dir_node.implicit = None |
| 2066 else: |
| 2067 # There is already a Node for this path name. Allow it to |
| 2068 # complain if we were looking for an inappropriate type. |
| 2069 result.must_be_same(klass) |
| 2070 return result |
| 2071 |
| 2072 def __str__(self): |
| 2073 return self.abspath |
| 2074 |
| 2075 def entry_abspath(self, name): |
| 2076 return self.abspath + name |
| 2077 |
| 2078 def entry_labspath(self, name): |
| 2079 return '/' + name |
| 2080 |
| 2081 def entry_path(self, name): |
| 2082 return self.path + name |
| 2083 |
| 2084 def entry_tpath(self, name): |
| 2085 return self.tpath + name |
| 2086 |
| 2087 def is_under(self, dir): |
| 2088 if self is dir: |
| 2089 return 1 |
| 2090 else: |
| 2091 return 0 |
| 2092 |
| 2093 def up(self): |
| 2094 return None |
| 2095 |
| 2096 def get_dir(self): |
| 2097 return None |
| 2098 |
| 2099 def src_builder(self): |
| 2100 return _null |
| 2101 |
| 2102 class FileNodeInfo(SCons.Node.NodeInfoBase): |
| 2103 current_version_id = 1 |
| 2104 |
| 2105 field_list = ['csig', 'timestamp', 'size'] |
| 2106 |
| 2107 # This should get reset by the FS initialization. |
| 2108 fs = None |
| 2109 |
| 2110 def str_to_node(self, s): |
| 2111 top = self.fs.Top |
| 2112 root = top.root |
| 2113 if do_splitdrive: |
| 2114 drive, s = os.path.splitdrive(s) |
| 2115 if drive: |
| 2116 root = self.fs.get_root(drive) |
| 2117 if not os.path.isabs(s): |
| 2118 s = top.labspath + '/' + s |
| 2119 return root._lookup_abs(s, Entry) |
| 2120 |
| 2121 class FileBuildInfo(SCons.Node.BuildInfoBase): |
| 2122 current_version_id = 1 |
| 2123 |
| 2124 def convert_to_sconsign(self): |
| 2125 """ |
| 2126 Converts this FileBuildInfo object for writing to a .sconsign file |
| 2127 |
| 2128 This replaces each Node in our various dependency lists with its |
| 2129 usual string representation: relative to the top-level SConstruct |
| 2130 directory, or an absolute path if it's outside. |
| 2131 """ |
| 2132 if os.sep == '/': |
| 2133 node_to_str = str |
| 2134 else: |
| 2135 def node_to_str(n): |
| 2136 try: |
| 2137 s = n.path |
| 2138 except AttributeError: |
| 2139 s = str(n) |
| 2140 else: |
| 2141 s = s.replace(os.sep, '/') |
| 2142 return s |
| 2143 for attr in ['bsources', 'bdepends', 'bimplicit']: |
| 2144 try: |
| 2145 val = getattr(self, attr) |
| 2146 except AttributeError: |
| 2147 pass |
| 2148 else: |
| 2149 setattr(self, attr, list(map(node_to_str, val))) |
| 2150 def convert_from_sconsign(self, dir, name): |
| 2151 """ |
| 2152 Converts a newly-read FileBuildInfo object for in-SCons use |
| 2153 |
| 2154 For normal up-to-date checking, we don't have any conversion to |
| 2155 perform--but we're leaving this method here to make that clear. |
| 2156 """ |
| 2157 pass |
| 2158 def prepare_dependencies(self): |
| 2159 """ |
| 2160 Prepares a FileBuildInfo object for explaining what changed |
| 2161 |
| 2162 The bsources, bdepends and bimplicit lists have all been |
| 2163 stored on disk as paths relative to the top-level SConstruct |
| 2164 directory. Convert the strings to actual Nodes (for use by the |
| 2165 --debug=explain code and --implicit-cache). |
| 2166 """ |
| 2167 attrs = [ |
| 2168 ('bsources', 'bsourcesigs'), |
| 2169 ('bdepends', 'bdependsigs'), |
| 2170 ('bimplicit', 'bimplicitsigs'), |
| 2171 ] |
| 2172 for (nattr, sattr) in attrs: |
| 2173 try: |
| 2174 strings = getattr(self, nattr) |
| 2175 nodeinfos = getattr(self, sattr) |
| 2176 except AttributeError: |
| 2177 continue |
| 2178 nodes = [] |
| 2179 for s, ni in zip(strings, nodeinfos): |
| 2180 if not isinstance(s, SCons.Node.Node): |
| 2181 s = ni.str_to_node(s) |
| 2182 nodes.append(s) |
| 2183 setattr(self, nattr, nodes) |
| 2184 def format(self, names=0): |
| 2185 result = [] |
| 2186 bkids = self.bsources + self.bdepends + self.bimplicit |
| 2187 bkidsigs = self.bsourcesigs + self.bdependsigs + self.bimplicitsigs |
| 2188 for bkid, bkidsig in zip(bkids, bkidsigs): |
| 2189 result.append(str(bkid) + ': ' + |
| 2190 ' '.join(bkidsig.format(names=names))) |
| 2191 result.append('%s [%s]' % (self.bactsig, self.bact)) |
| 2192 return '\n'.join(result) |
| 2193 |
| 2194 class File(Base): |
| 2195 """A class for files in a file system. |
| 2196 """ |
| 2197 |
| 2198 memoizer_counters = [] |
| 2199 |
| 2200 NodeInfo = FileNodeInfo |
| 2201 BuildInfo = FileBuildInfo |
| 2202 |
| 2203 md5_chunksize = 64 |
| 2204 |
| 2205 def diskcheck_match(self): |
| 2206 diskcheck_match(self, self.isdir, |
| 2207 "Directory %s found where file expected.") |
| 2208 |
| 2209 def __init__(self, name, directory, fs): |
| 2210 if __debug__: logInstanceCreation(self, 'Node.FS.File') |
| 2211 Base.__init__(self, name, directory, fs) |
| 2212 self._morph() |
| 2213 |
| 2214 def Entry(self, name): |
| 2215 """Create an entry node named 'name' relative to |
| 2216 the directory of this file.""" |
| 2217 return self.dir.Entry(name) |
| 2218 |
| 2219 def Dir(self, name, create=True): |
| 2220 """Create a directory node named 'name' relative to |
| 2221 the directory of this file.""" |
| 2222 return self.dir.Dir(name, create=create) |
| 2223 |
| 2224 def Dirs(self, pathlist): |
| 2225 """Create a list of directories relative to the SConscript |
| 2226 directory of this file.""" |
| 2227 return [self.Dir(p) for p in pathlist] |
| 2228 |
| 2229 def File(self, name): |
| 2230 """Create a file node named 'name' relative to |
| 2231 the directory of this file.""" |
| 2232 return self.dir.File(name) |
| 2233 |
| 2234 #def generate_build_dict(self): |
| 2235 # """Return an appropriate dictionary of values for building |
| 2236 # this File.""" |
| 2237 # return {'Dir' : self.Dir, |
| 2238 # 'File' : self.File, |
| 2239 # 'RDirs' : self.RDirs} |
| 2240 |
| 2241 def _morph(self): |
| 2242 """Turn a file system node into a File object.""" |
| 2243 self.scanner_paths = {} |
| 2244 if not hasattr(self, '_local'): |
| 2245 self._local = 0 |
| 2246 |
| 2247 # If there was already a Builder set on this entry, then |
| 2248 # we need to make sure we call the target-decider function, |
| 2249 # not the source-decider. Reaching in and doing this by hand |
| 2250 # is a little bogus. We'd prefer to handle this by adding |
| 2251 # an Entry.builder_set() method that disambiguates like the |
| 2252 # other methods, but that starts running into problems with the |
| 2253 # fragile way we initialize Dir Nodes with their Mkdir builders, |
| 2254 # yet still allow them to be overridden by the user. Since it's |
| 2255 # not clear right now how to fix that, stick with what works |
| 2256 # until it becomes clear... |
| 2257 if self.has_builder(): |
| 2258 self.changed_since_last_build = self.decide_target |
| 2259 |
| 2260 def scanner_key(self): |
| 2261 return self.get_suffix() |
| 2262 |
| 2263 def get_contents(self): |
| 2264 if not self.rexists(): |
| 2265 return '' |
| 2266 fname = self.rfile().abspath |
| 2267 try: |
| 2268 contents = open(fname, "rb").read() |
| 2269 except EnvironmentError, e: |
| 2270 if not e.filename: |
| 2271 e.filename = fname |
| 2272 raise |
| 2273 return contents |
| 2274 |
| 2275 # This attempts to figure out what the encoding of the text is |
| 2276 # based upon the BOM bytes, and then decodes the contents so that |
| 2277 # it's a valid python string. |
| 2278 def get_text_contents(self): |
| 2279 contents = self.get_contents() |
| 2280 # The behavior of various decode() methods and functions |
| 2281 # w.r.t. the initial BOM bytes is different for different |
| 2282 # encodings and/or Python versions. ('utf-8' does not strip |
| 2283 # them, but has a 'utf-8-sig' which does; 'utf-16' seems to |
| 2284 # strip them; etc.) Just sidestep all the complication by |
| 2285 # explicitly stripping the BOM before we decode(). |
| 2286 if contents.startswith(codecs.BOM_UTF8): |
| 2287 return contents[len(codecs.BOM_UTF8):].decode('utf-8') |
| 2288 if contents.startswith(codecs.BOM_UTF16_LE): |
| 2289 return contents[len(codecs.BOM_UTF16_LE):].decode('utf-16-le') |
| 2290 if contents.startswith(codecs.BOM_UTF16_BE): |
| 2291 return contents[len(codecs.BOM_UTF16_BE):].decode('utf-16-be') |
| 2292 return contents |
| 2293 |
| 2294 def get_content_hash(self): |
| 2295 """ |
| 2296 Compute and return the MD5 hash for this file. |
| 2297 """ |
| 2298 if not self.rexists(): |
| 2299 return SCons.Util.MD5signature('') |
| 2300 fname = self.rfile().abspath |
| 2301 try: |
| 2302 cs = SCons.Util.MD5filesignature(fname, |
| 2303 chunksize=SCons.Node.FS.File.md5_chunksize*1024) |
| 2304 except EnvironmentError, e: |
| 2305 if not e.filename: |
| 2306 e.filename = fname |
| 2307 raise |
| 2308 return cs |
| 2309 |
| 2310 |
| 2311 memoizer_counters.append(SCons.Memoize.CountValue('get_size')) |
| 2312 |
| 2313 def get_size(self): |
| 2314 try: |
| 2315 return self._memo['get_size'] |
| 2316 except KeyError: |
| 2317 pass |
| 2318 |
| 2319 if self.rexists(): |
| 2320 size = self.rfile().getsize() |
| 2321 else: |
| 2322 size = 0 |
| 2323 |
| 2324 self._memo['get_size'] = size |
| 2325 |
| 2326 return size |
| 2327 |
| 2328 memoizer_counters.append(SCons.Memoize.CountValue('get_timestamp')) |
| 2329 |
| 2330 def get_timestamp(self): |
| 2331 try: |
| 2332 return self._memo['get_timestamp'] |
| 2333 except KeyError: |
| 2334 pass |
| 2335 |
| 2336 if self.rexists(): |
| 2337 timestamp = self.rfile().getmtime() |
| 2338 else: |
| 2339 timestamp = 0 |
| 2340 |
| 2341 self._memo['get_timestamp'] = timestamp |
| 2342 |
| 2343 return timestamp |
| 2344 |
| 2345 def store_info(self): |
| 2346 # Merge our build information into the already-stored entry. |
| 2347 # This accomodates "chained builds" where a file that's a target |
| 2348 # in one build (SConstruct file) is a source in a different build. |
| 2349 # See test/chained-build.py for the use case. |
| 2350 if do_store_info: |
| 2351 self.dir.sconsign().store_info(self.name, self) |
| 2352 |
| 2353 convert_copy_attrs = [ |
| 2354 'bsources', |
| 2355 'bimplicit', |
| 2356 'bdepends', |
| 2357 'bact', |
| 2358 'bactsig', |
| 2359 'ninfo', |
| 2360 ] |
| 2361 |
| 2362 |
| 2363 convert_sig_attrs = [ |
| 2364 'bsourcesigs', |
| 2365 'bimplicitsigs', |
| 2366 'bdependsigs', |
| 2367 ] |
| 2368 |
| 2369 def convert_old_entry(self, old_entry): |
| 2370 # Convert a .sconsign entry from before the Big Signature |
| 2371 # Refactoring, doing what we can to convert its information |
| 2372 # to the new .sconsign entry format. |
| 2373 # |
| 2374 # The old format looked essentially like this: |
| 2375 # |
| 2376 # BuildInfo |
| 2377 # .ninfo (NodeInfo) |
| 2378 # .bsig |
| 2379 # .csig |
| 2380 # .timestamp |
| 2381 # .size |
| 2382 # .bsources |
| 2383 # .bsourcesigs ("signature" list) |
| 2384 # .bdepends |
| 2385 # .bdependsigs ("signature" list) |
| 2386 # .bimplicit |
| 2387 # .bimplicitsigs ("signature" list) |
| 2388 # .bact |
| 2389 # .bactsig |
| 2390 # |
| 2391 # The new format looks like this: |
| 2392 # |
| 2393 # .ninfo (NodeInfo) |
| 2394 # .bsig |
| 2395 # .csig |
| 2396 # .timestamp |
| 2397 # .size |
| 2398 # .binfo (BuildInfo) |
| 2399 # .bsources |
| 2400 # .bsourcesigs (NodeInfo list) |
| 2401 # .bsig |
| 2402 # .csig |
| 2403 # .timestamp |
| 2404 # .size |
| 2405 # .bdepends |
| 2406 # .bdependsigs (NodeInfo list) |
| 2407 # .bsig |
| 2408 # .csig |
| 2409 # .timestamp |
| 2410 # .size |
| 2411 # .bimplicit |
| 2412 # .bimplicitsigs (NodeInfo list) |
| 2413 # .bsig |
| 2414 # .csig |
| 2415 # .timestamp |
| 2416 # .size |
| 2417 # .bact |
| 2418 # .bactsig |
| 2419 # |
| 2420 # The basic idea of the new structure is that a NodeInfo always |
| 2421 # holds all available information about the state of a given Node |
| 2422 # at a certain point in time. The various .b*sigs lists can just |
| 2423 # be a list of pointers to the .ninfo attributes of the different |
| 2424 # dependent nodes, without any copying of information until it's |
| 2425 # time to pickle it for writing out to a .sconsign file. |
| 2426 # |
| 2427 # The complicating issue is that the *old* format only stored one |
| 2428 # "signature" per dependency, based on however the *last* build |
| 2429 # was configured. We don't know from just looking at it whether |
| 2430 # it was a build signature, a content signature, or a timestamp |
| 2431 # "signature". Since we no longer use build signatures, the |
| 2432 # best we can do is look at the length and if it's thirty two, |
| 2433 # assume that it was (or might have been) a content signature. |
| 2434 # If it was actually a build signature, then it will cause a |
| 2435 # rebuild anyway when it doesn't match the new content signature, |
| 2436 # but that's probably the best we can do. |
| 2437 import SCons.SConsign |
| 2438 new_entry = SCons.SConsign.SConsignEntry() |
| 2439 new_entry.binfo = self.new_binfo() |
| 2440 binfo = new_entry.binfo |
| 2441 for attr in self.convert_copy_attrs: |
| 2442 try: |
| 2443 value = getattr(old_entry, attr) |
| 2444 except AttributeError: |
| 2445 continue |
| 2446 setattr(binfo, attr, value) |
| 2447 delattr(old_entry, attr) |
| 2448 for attr in self.convert_sig_attrs: |
| 2449 try: |
| 2450 sig_list = getattr(old_entry, attr) |
| 2451 except AttributeError: |
| 2452 continue |
| 2453 value = [] |
| 2454 for sig in sig_list: |
| 2455 ninfo = self.new_ninfo() |
| 2456 if len(sig) == 32: |
| 2457 ninfo.csig = sig |
| 2458 else: |
| 2459 ninfo.timestamp = sig |
| 2460 value.append(ninfo) |
| 2461 setattr(binfo, attr, value) |
| 2462 delattr(old_entry, attr) |
| 2463 return new_entry |
| 2464 |
| 2465 memoizer_counters.append(SCons.Memoize.CountValue('get_stored_info')) |
| 2466 |
| 2467 def get_stored_info(self): |
| 2468 try: |
| 2469 return self._memo['get_stored_info'] |
| 2470 except KeyError: |
| 2471 pass |
| 2472 |
| 2473 try: |
| 2474 sconsign_entry = self.dir.sconsign().get_entry(self.name) |
| 2475 except (KeyError, EnvironmentError): |
| 2476 import SCons.SConsign |
| 2477 sconsign_entry = SCons.SConsign.SConsignEntry() |
| 2478 sconsign_entry.binfo = self.new_binfo() |
| 2479 sconsign_entry.ninfo = self.new_ninfo() |
| 2480 else: |
| 2481 if isinstance(sconsign_entry, FileBuildInfo): |
| 2482 # This is a .sconsign file from before the Big Signature |
| 2483 # Refactoring; convert it as best we can. |
| 2484 sconsign_entry = self.convert_old_entry(sconsign_entry) |
| 2485 try: |
| 2486 delattr(sconsign_entry.ninfo, 'bsig') |
| 2487 except AttributeError: |
| 2488 pass |
| 2489 |
| 2490 self._memo['get_stored_info'] = sconsign_entry |
| 2491 |
| 2492 return sconsign_entry |
| 2493 |
| 2494 def get_stored_implicit(self): |
| 2495 binfo = self.get_stored_info().binfo |
| 2496 binfo.prepare_dependencies() |
| 2497 try: return binfo.bimplicit |
| 2498 except AttributeError: return None |
| 2499 |
| 2500 def rel_path(self, other): |
| 2501 return self.dir.rel_path(other) |
| 2502 |
| 2503 def _get_found_includes_key(self, env, scanner, path): |
| 2504 return (id(env), id(scanner), path) |
| 2505 |
| 2506 memoizer_counters.append(SCons.Memoize.CountDict('get_found_includes', _get_
found_includes_key)) |
| 2507 |
| 2508 def get_found_includes(self, env, scanner, path): |
| 2509 """Return the included implicit dependencies in this file. |
| 2510 Cache results so we only scan the file once per path |
| 2511 regardless of how many times this information is requested. |
| 2512 """ |
| 2513 memo_key = (id(env), id(scanner), path) |
| 2514 try: |
| 2515 memo_dict = self._memo['get_found_includes'] |
| 2516 except KeyError: |
| 2517 memo_dict = {} |
| 2518 self._memo['get_found_includes'] = memo_dict |
| 2519 else: |
| 2520 try: |
| 2521 return memo_dict[memo_key] |
| 2522 except KeyError: |
| 2523 pass |
| 2524 |
| 2525 if scanner: |
| 2526 # result = [n.disambiguate() for n in scanner(self, env, path)] |
| 2527 result = scanner(self, env, path) |
| 2528 result = [N.disambiguate() for N in result] |
| 2529 else: |
| 2530 result = [] |
| 2531 |
| 2532 memo_dict[memo_key] = result |
| 2533 |
| 2534 return result |
| 2535 |
| 2536 def _createDir(self): |
| 2537 # ensure that the directories for this node are |
| 2538 # created. |
| 2539 self.dir._create() |
| 2540 |
| 2541 def push_to_cache(self): |
| 2542 """Try to push the node into a cache |
| 2543 """ |
| 2544 # This should get called before the Nodes' .built() method is |
| 2545 # called, which would clear the build signature if the file has |
| 2546 # a source scanner. |
| 2547 # |
| 2548 # We have to clear the local memoized values *before* we push |
| 2549 # the node to cache so that the memoization of the self.exists() |
| 2550 # return value doesn't interfere. |
| 2551 if self.nocache: |
| 2552 return |
| 2553 self.clear_memoized_values() |
| 2554 if self.exists(): |
| 2555 self.get_build_env().get_CacheDir().push(self) |
| 2556 |
| 2557 def retrieve_from_cache(self): |
| 2558 """Try to retrieve the node's content from a cache |
| 2559 |
| 2560 This method is called from multiple threads in a parallel build, |
| 2561 so only do thread safe stuff here. Do thread unsafe stuff in |
| 2562 built(). |
| 2563 |
| 2564 Returns true iff the node was successfully retrieved. |
| 2565 """ |
| 2566 if self.nocache: |
| 2567 return None |
| 2568 if not self.is_derived(): |
| 2569 return None |
| 2570 return self.get_build_env().get_CacheDir().retrieve(self) |
| 2571 |
| 2572 def visited(self): |
| 2573 if self.exists(): |
| 2574 self.get_build_env().get_CacheDir().push_if_forced(self) |
| 2575 |
| 2576 ninfo = self.get_ninfo() |
| 2577 |
| 2578 csig = self.get_max_drift_csig() |
| 2579 if csig: |
| 2580 ninfo.csig = csig |
| 2581 |
| 2582 ninfo.timestamp = self.get_timestamp() |
| 2583 ninfo.size = self.get_size() |
| 2584 |
| 2585 if not self.has_builder(): |
| 2586 # This is a source file, but it might have been a target file |
| 2587 # in another build that included more of the DAG. Copy |
| 2588 # any build information that's stored in the .sconsign file |
| 2589 # into our binfo object so it doesn't get lost. |
| 2590 old = self.get_stored_info() |
| 2591 self.get_binfo().__dict__.update(old.binfo.__dict__) |
| 2592 |
| 2593 self.store_info() |
| 2594 |
| 2595 def find_src_builder(self): |
| 2596 if self.rexists(): |
| 2597 return None |
| 2598 scb = self.dir.src_builder() |
| 2599 if scb is _null: |
| 2600 if diskcheck_sccs(self.dir, self.name): |
| 2601 scb = get_DefaultSCCSBuilder() |
| 2602 elif diskcheck_rcs(self.dir, self.name): |
| 2603 scb = get_DefaultRCSBuilder() |
| 2604 else: |
| 2605 scb = None |
| 2606 if scb is not None: |
| 2607 try: |
| 2608 b = self.builder |
| 2609 except AttributeError: |
| 2610 b = None |
| 2611 if b is None: |
| 2612 self.builder_set(scb) |
| 2613 return scb |
| 2614 |
| 2615 def has_src_builder(self): |
| 2616 """Return whether this Node has a source builder or not. |
| 2617 |
| 2618 If this Node doesn't have an explicit source code builder, this |
| 2619 is where we figure out, on the fly, if there's a transparent |
| 2620 source code builder for it. |
| 2621 |
| 2622 Note that if we found a source builder, we also set the |
| 2623 self.builder attribute, so that all of the methods that actually |
| 2624 *build* this file don't have to do anything different. |
| 2625 """ |
| 2626 try: |
| 2627 scb = self.sbuilder |
| 2628 except AttributeError: |
| 2629 scb = self.sbuilder = self.find_src_builder() |
| 2630 return scb is not None |
| 2631 |
| 2632 def alter_targets(self): |
| 2633 """Return any corresponding targets in a variant directory. |
| 2634 """ |
| 2635 if self.is_derived(): |
| 2636 return [], None |
| 2637 return self.fs.variant_dir_target_climb(self, self.dir, [self.name]) |
| 2638 |
| 2639 def _rmv_existing(self): |
| 2640 self.clear_memoized_values() |
| 2641 e = Unlink(self, [], None) |
| 2642 if isinstance(e, SCons.Errors.BuildError): |
| 2643 raise e |
| 2644 |
| 2645 # |
| 2646 # Taskmaster interface subsystem |
| 2647 # |
| 2648 |
| 2649 def make_ready(self): |
| 2650 self.has_src_builder() |
| 2651 self.get_binfo() |
| 2652 |
| 2653 def prepare(self): |
| 2654 """Prepare for this file to be created.""" |
| 2655 SCons.Node.Node.prepare(self) |
| 2656 |
| 2657 if self.get_state() != SCons.Node.up_to_date: |
| 2658 if self.exists(): |
| 2659 if self.is_derived() and not self.precious: |
| 2660 self._rmv_existing() |
| 2661 else: |
| 2662 try: |
| 2663 self._createDir() |
| 2664 except SCons.Errors.StopError, drive: |
| 2665 desc = "No drive `%s' for target `%s'." % (drive, self) |
| 2666 raise SCons.Errors.StopError(desc) |
| 2667 |
| 2668 # |
| 2669 # |
| 2670 # |
| 2671 |
| 2672 def remove(self): |
| 2673 """Remove this file.""" |
| 2674 if self.exists() or self.islink(): |
| 2675 self.fs.unlink(self.path) |
| 2676 return 1 |
| 2677 return None |
| 2678 |
| 2679 def do_duplicate(self, src): |
| 2680 self._createDir() |
| 2681 Unlink(self, None, None) |
| 2682 e = Link(self, src, None) |
| 2683 if isinstance(e, SCons.Errors.BuildError): |
| 2684 desc = "Cannot duplicate `%s' in `%s': %s." % (src.path, self.dir.pa
th, e.errstr) |
| 2685 raise SCons.Errors.StopError(desc) |
| 2686 self.linked = 1 |
| 2687 # The Link() action may or may not have actually |
| 2688 # created the file, depending on whether the -n |
| 2689 # option was used or not. Delete the _exists and |
| 2690 # _rexists attributes so they can be reevaluated. |
| 2691 self.clear() |
| 2692 |
| 2693 memoizer_counters.append(SCons.Memoize.CountValue('exists')) |
| 2694 |
| 2695 def exists(self): |
| 2696 try: |
| 2697 return self._memo['exists'] |
| 2698 except KeyError: |
| 2699 pass |
| 2700 # Duplicate from source path if we are set up to do this. |
| 2701 if self.duplicate and not self.is_derived() and not self.linked: |
| 2702 src = self.srcnode() |
| 2703 if src is not self: |
| 2704 # At this point, src is meant to be copied in a variant director
y. |
| 2705 src = src.rfile() |
| 2706 if src.abspath != self.abspath: |
| 2707 if src.exists(): |
| 2708 self.do_duplicate(src) |
| 2709 # Can't return 1 here because the duplication might |
| 2710 # not actually occur if the -n option is being used. |
| 2711 else: |
| 2712 # The source file does not exist. Make sure no old |
| 2713 # copy remains in the variant directory. |
| 2714 if Base.exists(self) or self.islink(): |
| 2715 self.fs.unlink(self.path) |
| 2716 # Return None explicitly because the Base.exists() call |
| 2717 # above will have cached its value if the file existed. |
| 2718 self._memo['exists'] = None |
| 2719 return None |
| 2720 result = Base.exists(self) |
| 2721 self._memo['exists'] = result |
| 2722 return result |
| 2723 |
| 2724 # |
| 2725 # SIGNATURE SUBSYSTEM |
| 2726 # |
| 2727 |
| 2728 def get_max_drift_csig(self): |
| 2729 """ |
| 2730 Returns the content signature currently stored for this node |
| 2731 if it's been unmodified longer than the max_drift value, or the |
| 2732 max_drift value is 0. Returns None otherwise. |
| 2733 """ |
| 2734 old = self.get_stored_info() |
| 2735 mtime = self.get_timestamp() |
| 2736 |
| 2737 max_drift = self.fs.max_drift |
| 2738 if max_drift > 0: |
| 2739 if (time.time() - mtime) > max_drift: |
| 2740 try: |
| 2741 n = old.ninfo |
| 2742 if n.timestamp and n.csig and n.timestamp == mtime: |
| 2743 return n.csig |
| 2744 except AttributeError: |
| 2745 pass |
| 2746 elif max_drift == 0: |
| 2747 try: |
| 2748 return old.ninfo.csig |
| 2749 except AttributeError: |
| 2750 pass |
| 2751 |
| 2752 return None |
| 2753 |
| 2754 def get_csig(self): |
| 2755 """ |
| 2756 Generate a node's content signature, the digested signature |
| 2757 of its content. |
| 2758 |
| 2759 node - the node |
| 2760 cache - alternate node to use for the signature cache |
| 2761 returns - the content signature |
| 2762 """ |
| 2763 ninfo = self.get_ninfo() |
| 2764 try: |
| 2765 return ninfo.csig |
| 2766 except AttributeError: |
| 2767 pass |
| 2768 |
| 2769 csig = self.get_max_drift_csig() |
| 2770 if csig is None: |
| 2771 |
| 2772 try: |
| 2773 if self.get_size() < SCons.Node.FS.File.md5_chunksize: |
| 2774 contents = self.get_contents() |
| 2775 else: |
| 2776 csig = self.get_content_hash() |
| 2777 except IOError: |
| 2778 # This can happen if there's actually a directory on-disk, |
| 2779 # which can be the case if they've disabled disk checks, |
| 2780 # or if an action with a File target actually happens to |
| 2781 # create a same-named directory by mistake. |
| 2782 csig = '' |
| 2783 else: |
| 2784 if not csig: |
| 2785 csig = SCons.Util.MD5signature(contents) |
| 2786 |
| 2787 ninfo.csig = csig |
| 2788 |
| 2789 return csig |
| 2790 |
| 2791 # |
| 2792 # DECISION SUBSYSTEM |
| 2793 # |
| 2794 |
| 2795 def builder_set(self, builder): |
| 2796 SCons.Node.Node.builder_set(self, builder) |
| 2797 self.changed_since_last_build = self.decide_target |
| 2798 |
| 2799 def changed_content(self, target, prev_ni): |
| 2800 cur_csig = self.get_csig() |
| 2801 try: |
| 2802 return cur_csig != prev_ni.csig |
| 2803 except AttributeError: |
| 2804 return 1 |
| 2805 |
| 2806 def changed_state(self, target, prev_ni): |
| 2807 return self.state != SCons.Node.up_to_date |
| 2808 |
| 2809 def changed_timestamp_then_content(self, target, prev_ni): |
| 2810 if not self.changed_timestamp_match(target, prev_ni): |
| 2811 try: |
| 2812 self.get_ninfo().csig = prev_ni.csig |
| 2813 except AttributeError: |
| 2814 pass |
| 2815 return False |
| 2816 return self.changed_content(target, prev_ni) |
| 2817 |
| 2818 def changed_timestamp_newer(self, target, prev_ni): |
| 2819 try: |
| 2820 return self.get_timestamp() > target.get_timestamp() |
| 2821 except AttributeError: |
| 2822 return 1 |
| 2823 |
| 2824 def changed_timestamp_match(self, target, prev_ni): |
| 2825 try: |
| 2826 return self.get_timestamp() != prev_ni.timestamp |
| 2827 except AttributeError: |
| 2828 return 1 |
| 2829 |
| 2830 def decide_source(self, target, prev_ni): |
| 2831 return target.get_build_env().decide_source(self, target, prev_ni) |
| 2832 |
| 2833 def decide_target(self, target, prev_ni): |
| 2834 return target.get_build_env().decide_target(self, target, prev_ni) |
| 2835 |
| 2836 # Initialize this Node's decider function to decide_source() because |
| 2837 # every file is a source file until it has a Builder attached... |
| 2838 changed_since_last_build = decide_source |
| 2839 |
| 2840 def is_up_to_date(self): |
| 2841 T = 0 |
| 2842 if T: Trace('is_up_to_date(%s):' % self) |
| 2843 if not self.exists(): |
| 2844 if T: Trace(' not self.exists():') |
| 2845 # The file doesn't exist locally... |
| 2846 r = self.rfile() |
| 2847 if r != self: |
| 2848 # ...but there is one in a Repository... |
| 2849 if not self.changed(r): |
| 2850 if T: Trace(' changed(%s):' % r) |
| 2851 # ...and it's even up-to-date... |
| 2852 if self._local: |
| 2853 # ...and they'd like a local copy. |
| 2854 e = LocalCopy(self, r, None) |
| 2855 if isinstance(e, SCons.Errors.BuildError): |
| 2856 raise |
| 2857 self.store_info() |
| 2858 if T: Trace(' 1\n') |
| 2859 return 1 |
| 2860 self.changed() |
| 2861 if T: Trace(' None\n') |
| 2862 return None |
| 2863 else: |
| 2864 r = self.changed() |
| 2865 if T: Trace(' self.exists(): %s\n' % r) |
| 2866 return not r |
| 2867 |
| 2868 memoizer_counters.append(SCons.Memoize.CountValue('rfile')) |
| 2869 |
| 2870 def rfile(self): |
| 2871 try: |
| 2872 return self._memo['rfile'] |
| 2873 except KeyError: |
| 2874 pass |
| 2875 result = self |
| 2876 if not self.exists(): |
| 2877 norm_name = _my_normcase(self.name) |
| 2878 for dir in self.dir.get_all_rdirs(): |
| 2879 try: node = dir.entries[norm_name] |
| 2880 except KeyError: node = dir.file_on_disk(self.name) |
| 2881 if node and node.exists() and \ |
| 2882 (isinstance(node, File) or isinstance(node, Entry) \ |
| 2883 or not node.is_derived()): |
| 2884 result = node |
| 2885 # Copy over our local attributes to the repository |
| 2886 # Node so we identify shared object files in the |
| 2887 # repository and don't assume they're static. |
| 2888 # |
| 2889 # This isn't perfect; the attribute would ideally |
| 2890 # be attached to the object in the repository in |
| 2891 # case it was built statically in the repository |
| 2892 # and we changed it to shared locally, but that's |
| 2893 # rarely the case and would only occur if you |
| 2894 # intentionally used the same suffix for both |
| 2895 # shared and static objects anyway. So this |
| 2896 # should work well in practice. |
| 2897 result.attributes = self.attributes |
| 2898 break |
| 2899 self._memo['rfile'] = result |
| 2900 return result |
| 2901 |
| 2902 def rstr(self): |
| 2903 return str(self.rfile()) |
| 2904 |
| 2905 def get_cachedir_csig(self): |
| 2906 """ |
| 2907 Fetch a Node's content signature for purposes of computing |
| 2908 another Node's cachesig. |
| 2909 |
| 2910 This is a wrapper around the normal get_csig() method that handles |
| 2911 the somewhat obscure case of using CacheDir with the -n option. |
| 2912 Any files that don't exist would normally be "built" by fetching |
| 2913 them from the cache, but the normal get_csig() method will try |
| 2914 to open up the local file, which doesn't exist because the -n |
| 2915 option meant we didn't actually pull the file from cachedir. |
| 2916 But since the file *does* actually exist in the cachedir, we |
| 2917 can use its contents for the csig. |
| 2918 """ |
| 2919 try: |
| 2920 return self.cachedir_csig |
| 2921 except AttributeError: |
| 2922 pass |
| 2923 |
| 2924 cachedir, cachefile = self.get_build_env().get_CacheDir().cachepath(self
) |
| 2925 if not self.exists() and cachefile and os.path.exists(cachefile): |
| 2926 self.cachedir_csig = SCons.Util.MD5filesignature(cachefile, \ |
| 2927 SCons.Node.FS.File.md5_chunksize * 1024) |
| 2928 else: |
| 2929 self.cachedir_csig = self.get_csig() |
| 2930 return self.cachedir_csig |
| 2931 |
| 2932 def get_cachedir_bsig(self): |
| 2933 try: |
| 2934 return self.cachesig |
| 2935 except AttributeError: |
| 2936 pass |
| 2937 |
| 2938 # Add the path to the cache signature, because multiple |
| 2939 # targets built by the same action will all have the same |
| 2940 # build signature, and we have to differentiate them somehow. |
| 2941 children = self.children() |
| 2942 executor = self.get_executor() |
| 2943 # sigs = [n.get_cachedir_csig() for n in children] |
| 2944 sigs = [n.get_cachedir_csig() for n in children] |
| 2945 sigs.append(SCons.Util.MD5signature(executor.get_contents())) |
| 2946 sigs.append(self.path) |
| 2947 result = self.cachesig = SCons.Util.MD5collect(sigs) |
| 2948 return result |
| 2949 |
| 2950 |
| 2951 default_fs = None |
| 2952 |
| 2953 def get_default_fs(): |
| 2954 global default_fs |
| 2955 if not default_fs: |
| 2956 default_fs = FS() |
| 2957 return default_fs |
| 2958 |
| 2959 class FileFinder(object): |
| 2960 """ |
| 2961 """ |
| 2962 if SCons.Memoize.use_memoizer: |
| 2963 __metaclass__ = SCons.Memoize.Memoized_Metaclass |
| 2964 |
| 2965 memoizer_counters = [] |
| 2966 |
| 2967 def __init__(self): |
| 2968 self._memo = {} |
| 2969 |
| 2970 def filedir_lookup(self, p, fd=None): |
| 2971 """ |
| 2972 A helper method for find_file() that looks up a directory for |
| 2973 a file we're trying to find. This only creates the Dir Node if |
| 2974 it exists on-disk, since if the directory doesn't exist we know |
| 2975 we won't find any files in it... :-) |
| 2976 |
| 2977 It would be more compact to just use this as a nested function |
| 2978 with a default keyword argument (see the commented-out version |
| 2979 below), but that doesn't work unless you have nested scopes, |
| 2980 so we define it here just so this work under Python 1.5.2. |
| 2981 """ |
| 2982 if fd is None: |
| 2983 fd = self.default_filedir |
| 2984 dir, name = os.path.split(fd) |
| 2985 drive, d = os.path.splitdrive(dir) |
| 2986 if not name and d[:1] in ('/', os.sep): |
| 2987 #return p.fs.get_root(drive).dir_on_disk(name) |
| 2988 return p.fs.get_root(drive) |
| 2989 if dir: |
| 2990 p = self.filedir_lookup(p, dir) |
| 2991 if not p: |
| 2992 return None |
| 2993 norm_name = _my_normcase(name) |
| 2994 try: |
| 2995 node = p.entries[norm_name] |
| 2996 except KeyError: |
| 2997 return p.dir_on_disk(name) |
| 2998 if isinstance(node, Dir): |
| 2999 return node |
| 3000 if isinstance(node, Entry): |
| 3001 node.must_be_same(Dir) |
| 3002 return node |
| 3003 return None |
| 3004 |
| 3005 def _find_file_key(self, filename, paths, verbose=None): |
| 3006 return (filename, paths) |
| 3007 |
| 3008 memoizer_counters.append(SCons.Memoize.CountDict('find_file', _find_file_key
)) |
| 3009 |
| 3010 def find_file(self, filename, paths, verbose=None): |
| 3011 """ |
| 3012 find_file(str, [Dir()]) -> [nodes] |
| 3013 |
| 3014 filename - a filename to find |
| 3015 paths - a list of directory path *nodes* to search in. Can be |
| 3016 represented as a list, a tuple, or a callable that is |
| 3017 called with no arguments and returns the list or tuple. |
| 3018 |
| 3019 returns - the node created from the found file. |
| 3020 |
| 3021 Find a node corresponding to either a derived file or a file |
| 3022 that exists already. |
| 3023 |
| 3024 Only the first file found is returned, and none is returned |
| 3025 if no file is found. |
| 3026 """ |
| 3027 memo_key = self._find_file_key(filename, paths) |
| 3028 try: |
| 3029 memo_dict = self._memo['find_file'] |
| 3030 except KeyError: |
| 3031 memo_dict = {} |
| 3032 self._memo['find_file'] = memo_dict |
| 3033 else: |
| 3034 try: |
| 3035 return memo_dict[memo_key] |
| 3036 except KeyError: |
| 3037 pass |
| 3038 |
| 3039 if verbose and not callable(verbose): |
| 3040 if not SCons.Util.is_String(verbose): |
| 3041 verbose = "find_file" |
| 3042 _verbose = u' %s: ' % verbose |
| 3043 verbose = lambda s: sys.stdout.write(_verbose + s) |
| 3044 |
| 3045 filedir, filename = os.path.split(filename) |
| 3046 if filedir: |
| 3047 # More compact code that we can't use until we drop |
| 3048 # support for Python 1.5.2: |
| 3049 # |
| 3050 #def filedir_lookup(p, fd=filedir): |
| 3051 # """ |
| 3052 # A helper function that looks up a directory for a file |
| 3053 # we're trying to find. This only creates the Dir Node |
| 3054 # if it exists on-disk, since if the directory doesn't |
| 3055 # exist we know we won't find any files in it... :-) |
| 3056 # """ |
| 3057 # dir, name = os.path.split(fd) |
| 3058 # if dir: |
| 3059 # p = filedir_lookup(p, dir) |
| 3060 # if not p: |
| 3061 # return None |
| 3062 # norm_name = _my_normcase(name) |
| 3063 # try: |
| 3064 # node = p.entries[norm_name] |
| 3065 # except KeyError: |
| 3066 # return p.dir_on_disk(name) |
| 3067 # if isinstance(node, Dir): |
| 3068 # return node |
| 3069 # if isinstance(node, Entry): |
| 3070 # node.must_be_same(Dir) |
| 3071 # return node |
| 3072 # if isinstance(node, Dir) or isinstance(node, Entry): |
| 3073 # return node |
| 3074 # return None |
| 3075 #paths = [_f for _f in map(filedir_lookup, paths) if _f] |
| 3076 |
| 3077 self.default_filedir = filedir |
| 3078 paths = [_f for _f in map(self.filedir_lookup, paths) if _f] |
| 3079 |
| 3080 result = None |
| 3081 for dir in paths: |
| 3082 if verbose: |
| 3083 verbose("looking for '%s' in '%s' ...\n" % (filename, dir)) |
| 3084 node, d = dir.srcdir_find_file(filename) |
| 3085 if node: |
| 3086 if verbose: |
| 3087 verbose("... FOUND '%s' in '%s'\n" % (filename, d)) |
| 3088 result = node |
| 3089 break |
| 3090 |
| 3091 memo_dict[memo_key] = result |
| 3092 |
| 3093 return result |
| 3094 |
| 3095 find_file = FileFinder().find_file |
| 3096 |
| 3097 |
| 3098 def invalidate_node_memos(targets): |
| 3099 """ |
| 3100 Invalidate the memoized values of all Nodes (files or directories) |
| 3101 that are associated with the given entries. Has been added to |
| 3102 clear the cache of nodes affected by a direct execution of an |
| 3103 action (e.g. Delete/Copy/Chmod). Existing Node caches become |
| 3104 inconsistent if the action is run through Execute(). The argument |
| 3105 `targets` can be a single Node object or filename, or a sequence |
| 3106 of Nodes/filenames. |
| 3107 """ |
| 3108 from traceback import extract_stack |
| 3109 |
| 3110 # First check if the cache really needs to be flushed. Only |
| 3111 # actions run in the SConscript with Execute() seem to be |
| 3112 # affected. XXX The way to check if Execute() is in the stacktrace |
| 3113 # is a very dirty hack and should be replaced by a more sensible |
| 3114 # solution. |
| 3115 for f in extract_stack(): |
| 3116 if f[2] == 'Execute' and f[0][-14:] == 'Environment.py': |
| 3117 break |
| 3118 else: |
| 3119 # Dont have to invalidate, so return |
| 3120 return |
| 3121 |
| 3122 if not SCons.Util.is_List(targets): |
| 3123 targets = [targets] |
| 3124 |
| 3125 for entry in targets: |
| 3126 # If the target is a Node object, clear the cache. If it is a |
| 3127 # filename, look up potentially existing Node object first. |
| 3128 try: |
| 3129 entry.clear_memoized_values() |
| 3130 except AttributeError: |
| 3131 # Not a Node object, try to look up Node by filename. XXX |
| 3132 # This creates Node objects even for those filenames which |
| 3133 # do not correspond to an existing Node object. |
| 3134 node = get_default_fs().Entry(entry) |
| 3135 if node: |
| 3136 node.clear_memoized_values() |
| 3137 |
| 3138 # Local Variables: |
| 3139 # tab-width:4 |
| 3140 # indent-tabs-mode:nil |
| 3141 # End: |
| 3142 # vim: set expandtab tabstop=4 shiftwidth=4: |
OLD | NEW |