| OLD | NEW |
| 1 # Copyright 2013 the V8 project authors. All rights reserved. | 1 # Copyright 2013 the V8 project authors. All rights reserved. |
| 2 # Redistribution and use in source and binary forms, with or without | 2 # Redistribution and use in source and binary forms, with or without |
| 3 # modification, are permitted provided that the following conditions are | 3 # modification, are permitted provided that the following conditions are |
| 4 # met: | 4 # met: |
| 5 # | 5 # |
| 6 # * Redistributions of source code must retain the above copyright | 6 # * Redistributions of source code must retain the above copyright |
| 7 # notice, this list of conditions and the following disclaimer. | 7 # notice, this list of conditions and the following disclaimer. |
| 8 # * Redistributions in binary form must reproduce the above | 8 # * Redistributions in binary form must reproduce the above |
| 9 # copyright notice, this list of conditions and the following | 9 # copyright notice, this list of conditions and the following |
| 10 # disclaimer in the documentation and/or other materials provided | 10 # disclaimer in the documentation and/or other materials provided |
| 11 # with the distribution. | 11 # with the distribution. |
| 12 # * Neither the name of Google Inc. nor the names of its | 12 # * Neither the name of Google Inc. nor the names of its |
| 13 # contributors may be used to endorse or promote products derived | 13 # contributors may be used to endorse or promote products derived |
| 14 # from this software without specific prior written permission. | 14 # from this software without specific prior written permission. |
| 15 # | 15 # |
| 16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 import argparse | 28 import argparse |
| 29 import subprocess | 29 import subprocess |
| 30 import select |
| 30 import sys | 31 import sys |
| 31 import time | 32 import time |
| 32 import logging | 33 import logging |
| 33 | 34 |
| 34 class ProcessRunner: | 35 class ProcessRunner: |
| 35 | 36 |
| 36 def __init__(self, files, args): | 37 def __init__(self, files, args): |
| 37 self.files = files | 38 self.files = files |
| 38 self.process_map = {} | 39 self.process_map = {} |
| 39 self.complete_processes = {} | 40 self.complete_processes = {} |
| 40 self.left_path = args.left_path | 41 self.left_path = args.left_path |
| 41 self.right_path = args.right_path | 42 self.right_path = args.right_path |
| 42 self.max_process_count = args.parallel_process_count | 43 self.max_process_count = args.parallel_process_count |
| 44 self.buffer_size = 16*1024 |
| 43 self.args = ['--break-after-illegal'] | 45 self.args = ['--break-after-illegal'] |
| 44 if args.use_harmony: | 46 if args.use_harmony: |
| 45 self.args.append('--use-harmony') | 47 self.args.append('--use-harmony') |
| 46 self.args.append('--%s' % args.encoding) | 48 self.args.append('--%s' % args.encoding) |
| 47 if self.right_path: | 49 if self.right_path: |
| 48 self.args.append('--print-tokens') | 50 self.args.append('--print-tokens-for-compare') |
| 49 | 51 |
| 50 def build_process_map(self): | 52 def build_process_map(self): |
| 51 process_map = self.process_map | 53 process_map = self.process_map |
| 52 for i, f in enumerate(self.files): | 54 for i, f in enumerate(self.files): |
| 53 process_map[2 * i] = { | 55 def data(path, cmp_id): |
| 54 'file': f, 'path' : self.left_path, 'type' : 'left' } | 56 return {'file': f, 'path' : path, 'cmp_id' : cmp_id, 'buffer' : [] } |
| 57 process_map[2 * i] = data(self.left_path, 2 * i + 1) |
| 55 if self.right_path: | 58 if self.right_path: |
| 56 process_map[2 * i + 1] = { | 59 process_map[2 * i + 1] = data(self.right_path, 2 * i) |
| 57 'file': f, 'path' : self.right, 'type' : 'right' } | 60 |
| 61 def read_running_processes(self, running_processes): |
| 62 if not self.right_path: |
| 63 return |
| 64 stdouts = { |
| 65 self.process_map[i]['process'].stdout : self.process_map[i]['buffer'] |
| 66 for i in running_processes } |
| 67 while True: |
| 68 ready = select.select(stdouts.iterkeys(), [], [], 0)[0] |
| 69 if not ready: |
| 70 return |
| 71 did_something = False |
| 72 for fd in ready: |
| 73 c = fd.read(self.buffer_size) |
| 74 if c == "": |
| 75 continue |
| 76 did_something = True |
| 77 stdouts[fd].append(c) |
| 78 if not did_something: |
| 79 break |
| 58 | 80 |
| 59 def wait_processes(self, running_processes): | 81 def wait_processes(self, running_processes): |
| 60 complete_ids = [] | 82 complete_ids = [] |
| 61 while True: | 83 while True: |
| 84 self.read_running_processes(running_processes) |
| 62 for i in running_processes: | 85 for i in running_processes: |
| 63 data = self.process_map[i] | 86 data = self.process_map[i] |
| 64 response = data['process'].poll() | 87 response = data['process'].poll() |
| 65 if response == None: | 88 if response == None: |
| 66 continue | 89 continue |
| 67 self.complete_processes[i] = data | 90 self.complete_processes[i] = data |
| 68 complete_ids.append(i) | 91 complete_ids.append(i) |
| 69 if complete_ids: | 92 if complete_ids: |
| 70 break | 93 break |
| 71 time.sleep(0.001) | 94 time.sleep(0.001) |
| 72 for i in complete_ids: | 95 for i in complete_ids: |
| 73 running_processes.remove(i) | 96 running_processes.remove(i) |
| 74 del self.process_map[i] | 97 del self.process_map[i] |
| 75 | 98 |
| 99 @staticmethod |
| 100 def crashed(data): |
| 101 return data['process'].returncode != 0 |
| 102 |
| 103 @staticmethod |
| 104 def buffer_contents(data): |
| 105 data['buffer'].append(data['process'].stdout.read()) |
| 106 return ''.join(data['buffer']) |
| 107 |
| 108 def compare_results(self, left, right): |
| 109 f = left['file'] |
| 110 assert f == right['file'] |
| 111 logging.info('checking results for %s' % f) |
| 112 if self.crashed(left) or self.crashed(right): |
| 113 print "%s failed" % f |
| 114 return |
| 115 if left['path'] == self.right_path: |
| 116 left, right = right, left |
| 117 left_data = self.buffer_contents(left) |
| 118 right_data = self.buffer_contents(right) |
| 119 if left_data != right_data: |
| 120 # TODO(dcarney): analyse differences |
| 121 print "%s failed" % f |
| 122 return |
| 123 print "%s succeeded" % f |
| 124 |
| 76 def process_complete_processes(self): | 125 def process_complete_processes(self): |
| 77 complete_processes = self.complete_processes | 126 complete_processes = self.complete_processes |
| 78 complete_ids = [] | 127 complete_ids = [] |
| 79 for i, data in complete_processes.iteritems(): | 128 for i, data in complete_processes.iteritems(): |
| 80 p = data['process'] | |
| 81 if not self.right_path: | 129 if not self.right_path: |
| 82 if p.returncode: | 130 assert not i in complete_ids |
| 131 if self.crashed(data): |
| 83 print "%s failed" % data['file'] | 132 print "%s failed" % data['file'] |
| 84 else: | 133 else: |
| 85 print "%s succeeded" % data['file'] | 134 print "%s succeeded" % data['file'] |
| 86 complete_ids.append(i) | 135 complete_ids.append(i) |
| 87 else: | 136 else: |
| 88 # TODO(dcarney): perform compare | 137 if i in complete_ids: |
| 89 pass | 138 continue |
| 139 cmp_id = data['cmp_id'] |
| 140 if not cmp_id in complete_processes: |
| 141 continue |
| 142 complete_ids.append(i) |
| 143 complete_ids.append(cmp_id) |
| 144 self.compare_results(data, complete_processes[cmp_id]) |
| 90 # clear processed data | 145 # clear processed data |
| 91 for i in complete_ids: | 146 for i in complete_ids: |
| 92 del complete_processes[i] | 147 del complete_processes[i] |
| 93 | 148 |
| 94 def run(self): | 149 def run(self): |
| 95 assert not self.process_map | 150 assert not self.process_map |
| 96 self.build_process_map() | 151 self.build_process_map() |
| 97 process_map = self.process_map | 152 process_map = self.process_map |
| 98 complete_processes = self.complete_processes | 153 complete_processes = self.complete_processes |
| 99 running_processes = set() | 154 running_processes = set() |
| 100 with open('/dev/null', 'w') as dev_null: | 155 with open('/dev/null', 'w') as dev_null: |
| 101 while True: | 156 while True: |
| 102 for id, data in process_map.iteritems(): | 157 for id, data in process_map.iteritems(): |
| 103 if id in running_processes: | 158 if id in running_processes: |
| 104 continue | 159 continue |
| 105 if len(running_processes) == self.max_process_count: | 160 if len(running_processes) == self.max_process_count: |
| 106 break | 161 break |
| 107 out = sys.PIPE if self.right_path else dev_null | 162 out = subprocess.PIPE if self.right_path else dev_null |
| 108 args = [data['path'], data['file']] + self.args | 163 args = [data['path'], data['file']] + self.args |
| 109 logging.info("running [%s]" % ' '.join(args)) | 164 logging.info("running [%s]" % ' '.join(args)) |
| 110 data['process'] = subprocess.Popen(args, | 165 data['process'] = subprocess.Popen(args, |
| 111 stdout=out, | 166 stdout=out, |
| 112 stderr=dev_null, | 167 stderr=dev_null, |
| 113 bufsize=16*1024) | 168 bufsize=self.buffer_size) |
| 114 running_processes.add(id) | 169 running_processes.add(id) |
| 115 if not running_processes: | 170 if not running_processes: |
| 116 break | 171 break |
| 117 self.wait_processes(running_processes) | 172 self.wait_processes(running_processes) |
| 118 self.process_complete_processes() | 173 self.process_complete_processes() |
| 119 assert not running_processes | 174 assert not running_processes |
| 120 assert not self.process_map | 175 assert not self.process_map |
| 121 assert not self.complete_processes | 176 assert not self.complete_processes |
| 122 | 177 |
| 123 if __name__ == '__main__': | 178 if __name__ == '__main__': |
| (...skipping 16 matching lines...) Expand all Loading... |
| 140 files = [] | 195 files = [] |
| 141 if args.input_files_path: | 196 if args.input_files_path: |
| 142 with open(args.input_files_path, 'r') as f: | 197 with open(args.input_files_path, 'r') as f: |
| 143 files = [filename for filename in f.read().split('\n') if filename] | 198 files = [filename for filename in f.read().split('\n') if filename] |
| 144 if args.single_file: | 199 if args.single_file: |
| 145 files.append(args.single_file) | 200 files.append(args.single_file) |
| 146 assert files | 201 assert files |
| 147 | 202 |
| 148 process_runner = ProcessRunner(files, args) | 203 process_runner = ProcessRunner(files, args) |
| 149 process_runner.run() | 204 process_runner.run() |
| OLD | NEW |