Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(94)

Side by Side Diff: lib/build/import_crawler.dart

Issue 978453003: add option to ImportCrawler to start from an already parsed document (Closed) Base URL: git@github.com:dart-lang/web-components.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « CHANGELOG.md ('k') | pubspec.yaml » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 library web_components.build.import_crawler; 4 library web_components.build.import_crawler;
5 5
6 import 'dart:async'; 6 import 'dart:async';
7 import 'dart:collection' show LinkedHashMap; 7 import 'dart:collection' show LinkedHashMap;
8 import 'package:code_transformers/assets.dart'; 8 import 'package:code_transformers/assets.dart';
9 import 'package:code_transformers/messages/build_logger.dart'; 9 import 'package:code_transformers/messages/build_logger.dart';
10 import 'package:barback/barback.dart'; 10 import 'package:barback/barback.dart';
(...skipping 12 matching lines...) Expand all
23 ImportData(this.document, this.element); 23 ImportData(this.document, this.element);
24 } 24 }
25 25
26 /// A crawler for html imports. 26 /// A crawler for html imports.
27 class ImportCrawler { 27 class ImportCrawler {
28 // Can be either an AggregateTransform or Transform. 28 // Can be either an AggregateTransform or Transform.
29 final _transform; 29 final _transform;
30 final BuildLogger _logger; 30 final BuildLogger _logger;
31 final AssetId _primaryInputId; 31 final AssetId _primaryInputId;
32 32
33 ImportCrawler(this._transform, this._primaryInputId, this._logger); 33 // Optional parsed document for the primary id if available.
34 final Document _primaryDocument;
35
36 ImportCrawler(this._transform, this._primaryInputId, this._logger,
37 {Document primaryDocument})
38 : _primaryDocument = primaryDocument;
34 39
35 /// Returns a post-ordered map of [AssetId]'s to [ImportData]. The [AssetId]'s 40 /// Returns a post-ordered map of [AssetId]'s to [ImportData]. The [AssetId]'s
36 /// represent an asset which was discovered via an html import, and the 41 /// represent an asset which was discovered via an html import, and the
37 /// [ImportData] represents the [Document] where it was discovered and the 42 /// [ImportData] represents the [Document] where it was discovered and the
38 /// html import [Element] itself. 43 /// html import [Element] itself.
39 Future<LinkedHashMap<AssetId, ImportData>> crawlImports() { 44 Future<LinkedHashMap<AssetId, ImportData>> crawlImports() {
40 var documents = new LinkedHashMap<AssetId, ImportData>(); 45 var documents = new LinkedHashMap<AssetId, ImportData>();
41 var seen = new Set<AssetId>(); 46 var seen = new Set<AssetId>();
42 47
43 Future doCrawl(AssetId assetId, [Element import]) { 48 Future doCrawl(AssetId assetId, [Element import, Document document]) {
44 if (seen.contains(assetId)) return null; 49 if (seen.contains(assetId)) return null;
45 seen.add(assetId); 50 seen.add(assetId);
46 51
47 return _transform.readInputAsString(assetId).then((html) { 52 Future crawlImports(Document document) {
48 var document = parseHtml(html, assetId.path);
49
50 var imports = document.querySelectorAll('link[rel="import"]'); 53 var imports = document.querySelectorAll('link[rel="import"]');
51 var done = 54 var done =
52 Future.forEach(imports, (i) => doCrawl(_importId(assetId, i), i)); 55 Future.forEach(imports, (i) => doCrawl(_importId(assetId, i), i));
53 56
54 // Add this document after its dependencies. 57 // Add this document after its dependencies.
55 return done.then((_) { 58 return done.then((_) {
56 documents[assetId] = new ImportData(document, import); 59 documents[assetId] = new ImportData(document, import);
57 }); 60 });
58 }).catchError((error) { 61 }
59 var span; 62
60 if (import != null) span = import.sourceSpan; 63 if (document != null) {
61 _logger.error(inlineImportFail.create({'error': error}), span: span); 64 return crawlImports(document);
62 }); 65 } else {
66 return _transform.readInputAsString(assetId).then((html) {
67 return crawlImports(parseHtml(html, assetId.path));
68 }).catchError((error) {
69 var span;
70 if (import != null) span = import.sourceSpan;
71 _logger.error(inlineImportFail.create({'error': error}), span: span);
72 });
73 }
63 } 74 }
64 75
65 return doCrawl(_primaryInputId).then((_) => documents); 76 return
77 doCrawl(_primaryInputId, null, _primaryDocument).then((_) => documents);
66 } 78 }
67 79
68 AssetId _importId(AssetId source, Element import) { 80 AssetId _importId(AssetId source, Element import) {
69 var url = import.attributes['href']; 81 var url = import.attributes['href'];
70 return uriToAssetId(source, url, _transform.logger, import.sourceSpan); 82 return uriToAssetId(source, url, _transform.logger, import.sourceSpan);
71 } 83 }
72 } 84 }
OLDNEW
« no previous file with comments | « CHANGELOG.md ('k') | pubspec.yaml » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698