Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(409)

Side by Side Diff: LayoutTests/http/tests/w3c/webperf/approved/UserTiming/test_user_timing_measure.htm

Issue 1191043004: Import hr-time and user-timing tests, remove redundant webperf copies (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Add idlharness result Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 <!DOCTYPE html>
2 <html>
3 <head>
4 <meta charset="UTF-8" />
5 <title>window.performance User Timing measure() method is working proper ly</title>
6 <link rel="author" title="Microsoft" href="http://www.microsoft.com/" />
7 <link rel="help" href="http://127.0.0.1:8000/webperf/specs/UserTiming/#d om-performance-measure"/>
8 <script src="/w3c/resources/testharness.js"></script>
9 <script src="/w3c/resources/testharnessreport.js"></script>
10 <script src="/w3c/webperf/resources/webperftestharness.js"></script>
11
12 <script type="text/javascript">
13 // test data
14 var startMarkName = "mark_start";
15 var startMarkValue;
16 var endMarkName = "mark_end";
17 var endMarkValue;
18 var measures;
19 var testThreshold = 20;
20
21 // test measures
22 var measureTestDelay = 200;
23 var TEST_MEASURES =
24 [
25 {
26 name: "measure_no_start_no_end",
27 startMark: undefined,
28 endMark: undefined,
29 startTime: undefined,
30 duration: undefined,
31 entryType: "measure",
32 entryMatch: undefined,
33 order: undefined,
34 found: false
35 },
36 {
37 name: "measure_start_no_end",
38 startMark: "mark_start",
39 endMark: undefined,
40 startTime: undefined,
41 duration: undefined,
42 entryType: "measure",
43 entryMatch: undefined,
44 order: undefined,
45 found: false
46 },
47 {
48 name: "measure_start_end",
49 startMark: "mark_start",
50 endMark: "mark_end",
51 startTime: undefined,
52 duration: undefined,
53 entryType: "measure",
54 entryMatch: undefined,
55 order: undefined,
56 found: false
57 },
58 {
59 name: "measure_no_start_no_end",
60 startMark: undefined,
61 endMark: undefined,
62 startTime: undefined,
63 duration: undefined,
64 entryType: "measure",
65 entryMatch: undefined,
66 order: undefined,
67 found: false
68 }
69 ];
70
71 setup({timeout:1000, explicit_done: true});
72
73 test_namespace();
74
75 function onload_test()
76 {
77 // test for existance of User Timing and Performance Timeline interf ace
78 if (window.performance.mark == undefined ||
79 window.performance.clearMarks == undefined ||
80 window.performance.measure == undefined ||
81 window.performance.clearMeasures == undefined ||
82 window.performance.getEntriesByName == undefined ||
83 window.performance.getEntriesByType == undefined ||
84 window.performance.getEntries == undefined)
85 {
86 test_true(false,
87 "The User Timing and Performance Timeline interfaces, which are required for this test, " +
88 "are defined.");
89
90 done();
91 }
92 else
93 {
94 // create the start mark for the test measures
95 window.performance.mark(startMarkName);
96
97 // get the start mark's value
98 startMarkValue = window.performance.getEntriesByName(startMarkNa me)[0].startTime;
99
100 // create the test end mark using the test delay; this will allo w for a significant difference between
101 // the mark values that should be represented in the duration of measures using these marks
102 setTimeout(measure_test_cb, measureTestDelay);
103 }
104 }
105
106 function measure_test_cb()
107 {
108 // create the end mark for the test measures
109 window.performance.mark(endMarkName);
110
111 // get the end mark's value
112 endMarkValue = window.performance.getEntriesByName(endMarkName)[0].s tartTime;
113
114 // loop through all measure scenarios and create the corresponding m easures
115 for (var i in TEST_MEASURES)
116 {
117 var scenario = TEST_MEASURES[i];
118
119 if (scenario.startMark == undefined && scenario.endMark == undef ined)
120 {
121 // both startMark and endMark are undefined, don't provide e ither parameters
122 window.performance.measure(scenario.name);
123
124 // when startMark isn't provided to the measure() call, a DO MHighResTimeStamp corresponding
125 // to the navigationStart attribute with a timebase of the s ame attribute is used; this is
126 // equivalent to 0
127 scenario.startTime = 0;
128
129 // when endMark isn't provided to the measure() call, a DOMH ighResTimeStamp corresponding to
130 // the current time with a timebase of the navigationStart a ttribute is used
131 scenario.duration = (new Date()) - window.performance.timing .navigationStart;
132 }
133 else if (scenario.startMark != undefined && scenario.endMark == undefined)
134 {
135 // only startMark is defined, provide startMark and don't pr ovide endMark
136 window.performance.measure(scenario.name, scenario.startMark );
137
138 // when startMark is provided to the measure() call, the val ue of the mark whose name is
139 // provided is used for the startMark
140 scenario.startTime = startMarkValue;
141
142 // when endMark isn't provided to the measure() call, a DOMH ighResTimeStamp corresponding to
143 // the current time with a timebase of the navigationStart a ttribute is used
144 scenario.duration = ((new Date()) - window.performance.timin g.navigationStart) -
145 startMarkValue;
146 }
147 else if (scenario.startMark != undefined && scenario.endMark != undefined)
148 {
149 // both startMark and endMark are defined, provide both para meters
150 window.performance.measure(scenario.name, scenario.startMark , scenario.endMark);
151
152 // when startMark is provided to the measure() call, the val ue of the mark whose name is
153 // provided is used for the startMark
154 scenario.startTime = startMarkValue;
155
156 // when endMark is provided to the measure() call, the value of the mark whose name is
157 // provided is used for the startMark
158 scenario.duration = endMarkValue - startMarkValue;
159 }
160 }
161
162 // test that expected measures are returned by getEntriesByName
163 for (var i in TEST_MEASURES)
164 {
165 entries = window.performance.getEntriesByName(TEST_MEASURES[i].n ame);
166 // for all test measures, the test will be validate the test mea sure against the first entry returned
167 // by getEntriesByName(), except for the last measure, where sin ce it is a duplicate measure, the test
168 // will validate it against the second entry returned by getEntr iesByName()
169 test_measure(entries[(i == 3 ? 1 : 0)],
170 "window.performance.getEntriesByName(\"" + TEST_MEAS URES[i].name + "\")[" +
171 (i == 3 ? 1 : 0) + "]",
172 TEST_MEASURES[i].name,
173 TEST_MEASURES[i].startTime,
174 TEST_MEASURES[i].duration);
175 TEST_MEASURES[i].entryMatch = entries[(i == 3 ? 1 : 0)];
176 }
177
178 // test that expected measures are returned by getEntriesByName with the entryType parameter provided
179 for (var i in TEST_MEASURES)
180 {
181 entries = window.performance.getEntriesByName(TEST_MEASURES[i].n ame, "measure");
182
183 test_true(match_entries(entries[(i == 3 ? 1 : 0)], TEST_MEASURES [i].entryMatch),
184 "window.performance.getEntriesByName(\"" + TEST_MEASUR ES[i].name + "\", \"measure\")[" +
185 (i == 3 ? 1 : 0) + "] returns an object containing the \"" + TEST_MEASURES[i].name +
186 "\" measure in the correct order, and its value matche s the \"" + TEST_MEASURES[i].name +
187 "\" measure returned by window.performance.getEntriesB yName(\"" + TEST_MEASURES[i].name +
188 "\")");
189 }
190
191 // test that expected measures are returned by getEntries
192 entries = get_test_entries(window.performance.getEntries(), "measure ");
193
194 test_measure_list(entries, "window.performance.getEntries()", TEST_M EASURES);
195
196 // test that expected measures are returned by getEntriesByType
197 entries = window.performance.getEntriesByType("measure");
198
199 test_measure_list(entries, "window.performance.getEntriesByType(\"me asure\")", TEST_MEASURES);
200
201 done();
202 }
203
204 function match_entries(entry1, entry2, threshold)
205 {
206 if (threshold == undefined)
207 {
208 threshold = 0;
209 }
210
211 var pass = true;
212
213 // match name
214 pass = pass && (entry1.name == entry2.name);
215
216 // match startTime
217 pass = pass && (Math.abs(entry1.startTime - entry2.startTime) <= tes tThreshold);
218
219 // match entryType
220 pass = pass && (entry1.entryType == entry2.entryType);
221
222 // match duration
223 pass = pass && (Math.abs(entry1.duration - entry2.duration) <= testT hreshold);
224
225 return pass;
226 }
227
228 function test_measure(measureEntry, measureEntryCommand, expectedName, e xpectedStartTime, expectedDuration)
229 {
230 // test name
231 test_true(measureEntry.name == expectedName, measureEntryCommand + " .name == \"" + expectedName + "\"");
232
233 // test startTime; since for a mark, the startTime is always equal t o a mark's value or the value of a
234 // navigation timing attribute, the actual startTime should match th e expected value exactly
235 test_true(Math.abs(measureEntry.startTime - expectedStartTime) == 0,
236 measureEntryCommand + ".startTime == " );
237
238 // test entryType
239 test_true(measureEntry.entryType == "measure", measureEntryCommand + ".entryType == \"measure\"");
240
241 // test duration, allow for an acceptable threshold in the differenc e between the actual duration and the
242 // expected value for the duration
243 test_true(Math.abs(measureEntry.duration - expectedDuration) <= test Threshold, measureEntryCommand +
244 ".duration ~== " + " (up to " + testThreshold + "ms diffe rence allowed)");
245 }
246
247 function test_measure_list(measureEntryList, measureEntryListCommand, me asureScenarios)
248 {
249 // give all entries a "found" property that can be set to ensure it isn't tested twice
250 for (var i in measureEntryList)
251 {
252 measureEntryList[i].found = false;
253 }
254
255 for (var i in measureScenarios)
256 {
257 measureScenarios[i].found = false;
258
259 for (var j in measureEntryList)
260 {
261 if (match_entries(measureEntryList[j], measureScenarios[i]) && !measureEntryList[j].found)
262 {
263 test_true(match_entries(measureEntryList[j], measureScen arios[i].entryMatch),
264 measureEntryListCommand + " returns an object containing the \"" +
265 measureScenarios[i].name + "\" measure, and it 's value matches the measure " +
266 "returned by window.performance.getEntriesByNa me(\"" + measureScenarios[i].name +
267 "\")[" + (i == 3 ? 1 : 0) + "].");
268
269 measureEntryList[j].found = true;
270 measureScenarios[i].found = true;
271 break;
272 }
273 }
274
275 if (!measureScenarios[i].found)
276 {
277 test_true(false,
278 measureEntryListCommand + " returns an object cont aining the \"" +
279 measureScenarios[i].name + "\" measure.");
280 }
281 }
282
283 // verify order of output of getEntriesByType
284 var startTimeCurr = 0;
285 var pass = true;
286 for (var i in measureEntryList)
287 {
288 if (measureEntryList[i].startTime < startTimeCurr)
289 {
290 pass = false;
291 }
292 startTimeCurr = measureEntryList[i].startTime;
293 }
294 test_true(pass,
295 measureEntryListCommand + " returns an object containing a ll test " +
296 "measures in order.");
297 }
298
299 function get_test_entries(entryList, entryType)
300 {
301 var testEntries = new Array();
302
303 // filter entryList
304 for (var i in entryList)
305 {
306 if (entryList[i].entryType == entryType)
307 {
308 testEntries.push(entryList[i]);
309 }
310 }
311
312 return testEntries;
313 }
314 </script>
315 </head>
316 <body onload="onload_test();">
317 <h1>Description</h1>
318 <p>This test validates that the performance.measure() method is working properly. This test creates the
319 following measures to test this method:
320 <ul>
321 <li>"measure_no_start_no_end": created using a measure() call wi thout a startMark or endMark
322 provided</li>
323 <li>"measure_start_no_end": created using a measure() call with only the startMark provided</li>
324 <li>"measure_start_end": created using a measure() call with bot h a startMark or endMark provided</li>
325 <li>"measure_no_start_no_end": duplicate of the first measure, u sed to confirm names can be re-used</li>
326 </ul>
327 After creating each measure, the existence of these measures is valid ated by calling
328 performance.getEntriesByName() (both with and without the entryType p arameter provided),
329 performance.getEntriesByType(), and performance.getEntries()
330 </p>
331
332 <div id="log"></div>
333 </body>
334 </html>
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698