Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(403)

Side by Side Diff: native_client_sdk/src/doc/doxygen/doxy_cleanup.py

Issue 136033007: [NaCl SDK Docs] Simplify PPAPI documentation generation. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 2
3 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 3 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 '''This utility cleans up the html files as emitted by doxygen so 7 '''This utility cleans up the html files as emitted by doxygen so
8 that they are suitable for publication on a Google documentation site. 8 that they are suitable for publication on a Google documentation site.
9 ''' 9 '''
10 10
11 import glob
11 import optparse 12 import optparse
12 import os 13 import os
13 import re 14 import re
14 import shutil 15 import shutil
15 import string
16 import sys 16 import sys
17 try: 17 try:
18 from BeautifulSoup import BeautifulSoup, Tag 18 from BeautifulSoup import BeautifulSoup, Tag
19 except (ImportError, NotImplementedError): 19 except (ImportError, NotImplementedError):
20 print ("This tool requires the BeautifulSoup package " 20 print ("This tool requires the BeautifulSoup package "
21 "(see http://www.crummy.com/software/BeautifulSoup/).\n" 21 "(see http://www.crummy.com/software/BeautifulSoup/).\n"
22 "Make sure that the file BeautifulSoup.py is either in this directory " 22 "Make sure that the file BeautifulSoup.py is either in this directory "
23 "or is available in your PYTHON_PATH") 23 "or is available in your PYTHON_PATH")
24 raise 24 raise
25 25
26 26
27 def Trace(msg):
28 if Trace.verbose:
29 sys.stderr.write(str(msg) + '\n')
30
31 Trace.verbose = False
32
33
34 FILES_TO_REMOVE = [
35 '*.css',
36 '*.map',
37 '*.md5',
38 'annotated.html',
39 'bc_s.png',
40 'classes.html',
41 'closed.png',
42 'doxygen.png',
43 'files.html',
44 'functions*.html',
45 'globals_0x*.html',
46 'globals_enum.html',
47 'globals_eval.html',
48 'globals_func.html',
49 'globals.html',
50 'globals_type.html',
51 'globals_vars.html',
52 'graph_legend.html',
53 'graph_legend.png',
54 'hierarchy.html',
55 'index_8dox.html',
56 'index.html',
57 'modules.html',
58 'namespacemembers_func.html',
59 'namespacemembers.html',
60 'namespaces.html',
61 'nav_f.png',
62 'nav_h.png',
63 'open.png',
64 'tab_a.png',
65 'tab_b.png',
66 'tab_h.png',
67 'tab_s.png',
68 ]
69
70
27 class HTMLFixer(object): 71 class HTMLFixer(object):
28 '''This class cleans up the html strings as produced by Doxygen 72 '''This class cleans up the html strings as produced by Doxygen
29 ''' 73 '''
30 74
31 def __init__(self, html): 75 def __init__(self, html):
32 self.soup = BeautifulSoup(html) 76 self.soup = BeautifulSoup(html)
33 77
34 def FixTableHeadings(self): 78 def FixTableHeadings(self):
35 '''Fixes the doxygen table headings. 79 '''Fixes the doxygen table headings.
36 80
(...skipping 22 matching lines...) Expand all
59 if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']: 103 if tag.td and tag.td.h2 and tag.td.h2.a and tag.td.h2.a['name']:
60 #tag['id'] = tag.td.h2.a['name'] 104 #tag['id'] = tag.td.h2.a['name']
61 tag.string = tag.td.h2.a.next 105 tag.string = tag.td.h2.a.next
62 tag.name = 'h2' 106 tag.name = 'h2'
63 table_headers.append(tag) 107 table_headers.append(tag)
64 108
65 # reverse the list so that earlier tags don't delete later tags 109 # reverse the list so that earlier tags don't delete later tags
66 table_headers.reverse() 110 table_headers.reverse()
67 # Split up tables that have multiple table header (th) rows 111 # Split up tables that have multiple table header (th) rows
68 for tag in table_headers: 112 for tag in table_headers:
69 print "Header tag: %s is %s" % (tag.name, tag.string.strip()) 113 Trace("Header tag: %s is %s" % (tag.name, tag.string.strip()))
70 # Is this a heading in the middle of a table? 114 # Is this a heading in the middle of a table?
71 if tag.findPreviousSibling('tr') and tag.parent.name == 'table': 115 if tag.findPreviousSibling('tr') and tag.parent.name == 'table':
72 print "Splitting Table named %s" % tag.string.strip() 116 Trace("Splitting Table named %s" % tag.string.strip())
73 table = tag.parent 117 table = tag.parent
74 table_parent = table.parent 118 table_parent = table.parent
75 table_index = table_parent.contents.index(table) 119 table_index = table_parent.contents.index(table)
76 new_table = Tag(self.soup, name='table', attrs=table.attrs) 120 new_table = Tag(self.soup, name='table', attrs=table.attrs)
77 table_parent.insert(table_index + 1, new_table) 121 table_parent.insert(table_index + 1, new_table)
78 tag_index = table.contents.index(tag) 122 tag_index = table.contents.index(tag)
79 for index, row in enumerate(table.contents[tag_index:]): 123 for index, row in enumerate(table.contents[tag_index:]):
80 new_table.insert(index, row) 124 new_table.insert(index, row)
81 # Now move the <h2> tag to be in front of the <table> tag 125 # Now move the <h2> tag to be in front of the <table> tag
82 assert tag.parent.name == 'table' 126 assert tag.parent.name == 'table'
(...skipping 10 matching lines...) Expand all
93 [tag.extract() for tag in header_tags] 137 [tag.extract() for tag in header_tags]
94 138
95 def FixAll(self): 139 def FixAll(self):
96 self.FixTableHeadings() 140 self.FixTableHeadings()
97 self.RemoveTopHeadings() 141 self.RemoveTopHeadings()
98 142
99 def __str__(self): 143 def __str__(self):
100 return str(self.soup) 144 return str(self.soup)
101 145
102 146
103 def main(): 147 def main(argv):
104 '''Main entry for the doxy_cleanup utility 148 """Main entry for the doxy_cleanup utility
105 149
106 doxy_cleanup takes a list of html files and modifies them in place.''' 150 doxy_cleanup cleans up the html files generated by doxygen.
151 """
107 152
108 parser = optparse.OptionParser(usage='Usage: %prog [options] files...') 153 parser = optparse.OptionParser(usage='Usage: %prog [options] directory')
154 parser.add_option('-v', '--verbose', help='verbose output.',
155 action='store_true')
156 options, files = parser.parse_args(argv)
109 157
110 parser.add_option('-m', '--move', dest='move', action='store_true', 158 if len(files) != 1:
111 default=False, help='move html files to "original_html"') 159 parser.error('Expected one directory')
112 160
113 options, files = parser.parse_args() 161 if options.verbose:
162 Trace.verbose = True
114 163
115 if not files: 164 root_dir = files[0]
116 parser.print_usage() 165 html_dir = os.path.join(root_dir, 'html')
117 return 1
118 166
119 for filename in files: 167 # Doxygen puts all files in an 'html' directory.
120 try: 168 # First, move all files from that directory to root_dir.
121 with open(filename, 'r') as file: 169 for filename in glob.glob(os.path.join(html_dir, '*')):
122 html = file.read() 170 Trace('Moving %s -> %s' % (filename, root_dir))
171 shutil.move(filename, root_dir)
123 172
124 print "Processing %s" % filename 173 # Now remove the 'html' directory.
125 fixer = HTMLFixer(html) 174 Trace('Removing %s' % html_dir)
126 fixer.FixAll() 175 os.rmdir(html_dir)
127 with open(filename, 'w') as file: 176
128 file.write(str(fixer)) 177 # Then remove unneeded files.
129 if options.move: 178 for wildcard in FILES_TO_REMOVE:
130 new_directory = os.path.join( 179 Trace('Removing "%s":' % wildcard)
131 os.path.dirname(os.path.dirname(filename)), 'original_html') 180 path = os.path.join(root_dir, wildcard)
132 if not os.path.exists(new_directory): 181 for filename in glob.glob(path):
133 os.mkdir(new_directory) 182 Trace(' Removing "%s"' % filename)
134 shutil.move(filename, new_directory) 183 os.remove(filename)
135 except: 184
136 print "Error while processing %s" % filename 185 # Now, fix the HTML files we've kept.
137 raise 186 Trace('Fixing HTML files...')
187 for root, _, files in os.walk(root_dir):
188 for filename in files:
189 if not os.path.splitext(filename)[1] == '.html':
190 Trace('Skipping %s' % filename)
191 continue
192
193 filename = os.path.join(root, filename)
194 Trace('Processing "%s"...' % filename)
195 try:
196 with open(filename) as f:
197 html = f.read()
198
199 fixer = HTMLFixer(html)
200 fixer.FixAll()
201 with open(filename, 'w') as f:
202 f.write(str(fixer))
203 except:
204 sys.stderr.write("Error while processing %s\n" % filename)
205 raise
138 206
139 return 0 207 return 0
140 208
141 if __name__ == '__main__': 209 if __name__ == '__main__':
142 sys.exit(main()) 210 try:
211 rtn = main(sys.argv[1:])
212 except KeyboardInterrupt:
213 sys.stderr.write('%s: interrupted\n' % os.path.basename(__file__))
214 rtn = 1
215 sys.exit(rtn)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698