| OLD | NEW |
| (Empty) | |
| 1 'use strict'; |
| 2 |
| 3 if (self.importScripts) { |
| 4 self.importScripts('/resources/testharness.js'); |
| 5 self.importScripts('../resources/test-utils.js'); |
| 6 self.importScripts('../resources/rs-utils.js'); |
| 7 self.importScripts('../resources/recording-streams.js'); |
| 8 } |
| 9 |
| 10 const error1 = new Error('error1!'); |
| 11 error1.name = 'error1'; |
| 12 |
| 13 promise_test(t => { |
| 14 |
| 15 const rs = recordingReadableStream({ |
| 16 start(controller) { |
| 17 controller.enqueue('a'); |
| 18 controller.enqueue('b'); |
| 19 controller.close(); |
| 20 } |
| 21 }); |
| 22 |
| 23 const ws = recordingWritableStream(undefined, new CountQueuingStrategy({ highW
aterMark: 0 })); |
| 24 |
| 25 const pipePromise = rs.pipeTo(ws, { preventCancel: true }); |
| 26 |
| 27 // Wait and make sure it doesn't do any reading. |
| 28 return flushAsyncEvents().then(() => { |
| 29 ws.controller.error(error1); |
| 30 }) |
| 31 .then(() => promise_rejects(t, error1, pipePromise, 'pipeTo must reject with t
he same error')) |
| 32 .then(() => { |
| 33 assert_array_equals(rs.eventsWithoutPulls, []); |
| 34 assert_array_equals(ws.events, []); |
| 35 }) |
| 36 .then(() => readableStreamToArray(rs)) |
| 37 .then(chunksNotPreviouslyRead => { |
| 38 assert_array_equals(chunksNotPreviouslyRead, ['a', 'b']); |
| 39 }); |
| 40 |
| 41 }, 'Piping from a non-empty ReadableStream into a WritableStream that does not d
esire chunks'); |
| 42 |
| 43 promise_test(() => { |
| 44 |
| 45 const rs = recordingReadableStream({ |
| 46 start(controller) { |
| 47 controller.enqueue('b'); |
| 48 controller.close(); |
| 49 } |
| 50 }); |
| 51 |
| 52 let resolveWritePromise; |
| 53 const ws = recordingWritableStream({ |
| 54 write() { |
| 55 if (!resolveWritePromise) { |
| 56 // first write |
| 57 return new Promise(resolve => { |
| 58 resolveWritePromise = resolve; |
| 59 }); |
| 60 } |
| 61 return undefined; |
| 62 } |
| 63 }); |
| 64 |
| 65 const writer = ws.getWriter(); |
| 66 const firstWritePromise = writer.write('a'); |
| 67 assert_equals(writer.desiredSize, 0, 'after writing the writer\'s desiredSize
must be 0'); |
| 68 writer.releaseLock(); |
| 69 |
| 70 // firstWritePromise won't settle until we call resolveWritePromise. |
| 71 |
| 72 const pipePromise = rs.pipeTo(ws); |
| 73 |
| 74 return flushAsyncEvents().then(() => resolveWritePromise()) |
| 75 .then(() => Promise.all([firstWritePromise, pipePromise])) |
| 76 .then(() => { |
| 77 assert_array_equals(rs.eventsWithoutPulls, []); |
| 78 assert_array_equals(ws.events, ['write', 'a', 'write', 'b', 'close']); |
| 79 }); |
| 80 |
| 81 }, 'Piping from a non-empty ReadableStream into a WritableStream that does not d
esire chunks, but then does'); |
| 82 |
| 83 promise_test(() => { |
| 84 |
| 85 const rs = recordingReadableStream(); |
| 86 |
| 87 const startPromise = Promise.resolve(); |
| 88 let resolveWritePromise; |
| 89 const ws = recordingWritableStream({ |
| 90 start() { |
| 91 return startPromise; |
| 92 }, |
| 93 write() { |
| 94 if (!resolveWritePromise) { |
| 95 // first write |
| 96 return new Promise(resolve => { |
| 97 resolveWritePromise = resolve; |
| 98 }); |
| 99 } |
| 100 return undefined; |
| 101 } |
| 102 }); |
| 103 |
| 104 const writer = ws.getWriter(); |
| 105 writer.write('a'); |
| 106 |
| 107 return startPromise.then(() => { |
| 108 assert_array_equals(ws.events, ['write', 'a']); |
| 109 assert_equals(writer.desiredSize, 0, 'after writing the writer\'s desiredSiz
e must be 0'); |
| 110 writer.releaseLock(); |
| 111 |
| 112 const pipePromise = rs.pipeTo(ws); |
| 113 |
| 114 rs.controller.enqueue('b'); |
| 115 resolveWritePromise(); |
| 116 rs.controller.close(); |
| 117 |
| 118 return pipePromise.then(() => { |
| 119 assert_array_equals(rs.eventsWithoutPulls, []); |
| 120 assert_array_equals(ws.events, ['write', 'a', 'write', 'b', 'close']); |
| 121 }); |
| 122 }); |
| 123 |
| 124 }, 'Piping from an empty ReadableStream into a WritableStream that does not desi
re chunks, but then the readable ' + |
| 125 'stream becomes non-empty and the writable stream starts desiring chunks'); |
| 126 |
| 127 promise_test(() => { |
| 128 |
| 129 const desiredSizes = []; |
| 130 const rs = recordingReadableStream({ |
| 131 start(controller) { |
| 132 delay(100).then(() => enqueue('a')); |
| 133 delay(200).then(() => enqueue('b')); |
| 134 delay(300).then(() => enqueue('c')); |
| 135 delay(400).then(() => enqueue('d')); |
| 136 delay(500).then(() => controller.close()); |
| 137 |
| 138 function enqueue(chunk) { |
| 139 controller.enqueue(chunk); |
| 140 desiredSizes.push(controller.desiredSize); |
| 141 } |
| 142 } |
| 143 }); |
| 144 |
| 145 const chunksFinishedWriting = []; |
| 146 const writableStartPromise = Promise.resolve(); |
| 147 const ws = recordingWritableStream({ |
| 148 start() { |
| 149 return writableStartPromise; |
| 150 }, |
| 151 write(chunk) { |
| 152 return delay(350).then(() => { |
| 153 chunksFinishedWriting.push(chunk); |
| 154 }); |
| 155 } |
| 156 }); |
| 157 |
| 158 return writableStartPromise.then(() => { |
| 159 return Promise.all([ |
| 160 rs.pipeTo(ws).then(() => { |
| 161 assert_array_equals(desiredSizes, [1, 0, -1, -2], 'backpressure must hav
e been exerted at the source'); |
| 162 assert_array_equals(chunksFinishedWriting, ['a', 'b', 'c', 'd'], 'all ch
unks started writing'); |
| 163 |
| 164 assert_array_equals(rs.eventsWithoutPulls, [], 'nothing unexpected shoul
d happen to the ReadableStream'); |
| 165 assert_array_equals(ws.events, ['write', 'a', 'write', 'b', 'write', 'c'
, 'write', 'd', 'close'], |
| 166 'all chunks were written (and the WritableStream closed)'); |
| 167 }), |
| 168 |
| 169 delay(125).then(() => { |
| 170 assert_array_equals(chunksFinishedWriting, [], 'at t = 125 ms, zero chun
ks must have finished writing'); |
| 171 assert_array_equals(ws.events, ['write', 'a'], 'at t = 125 ms, one chunk
must have been written'); |
| 172 |
| 173 // When 'a' (the very first chunk) was enqueued, it was immediately used
to fulfill the outstanding read request |
| 174 // promise, leaving the queue empty. |
| 175 assert_array_equals(desiredSizes, [1], |
| 176 'at t = 125 ms, the desiredSize at the last enqueue (100 ms) must have
been 1'); |
| 177 assert_equals(rs.controller.desiredSize, 1, 'at t = 125 ms, the current
desiredSize must be 1'); |
| 178 }), |
| 179 |
| 180 delay(225).then(() => { |
| 181 assert_array_equals(chunksFinishedWriting, [], 'at t = 225 ms, zero chun
ks must have finished writing'); |
| 182 assert_array_equals(ws.events, ['write', 'a'], 'at t = 225 ms, one chunk
must have been written'); |
| 183 |
| 184 // When 'b' was enqueued at 200 ms, the queue was also empty, since imme
diately after enqueuing 'a' at |
| 185 // t = 100 ms, it was dequeued in order to fulfill the read() call that
was made at time t = 0. Thus the queue |
| 186 // had size 1 (thus desiredSize of 0). |
| 187 assert_array_equals(desiredSizes, [1, 0], |
| 188 'at t = 225 ms, the desiredSize at the last enqueue (200 ms) must have
been 0'); |
| 189 assert_equals(rs.controller.desiredSize, 0, 'at t = 225 ms, the current
desiredSize must be 0'); |
| 190 }), |
| 191 |
| 192 delay(325).then(() => { |
| 193 assert_array_equals(chunksFinishedWriting, [], 'at t = 325 ms, zero chun
ks must have finished writing'); |
| 194 assert_array_equals(ws.events, ['write', 'a'], 'at t = 325 ms, one chunk
must have been written'); |
| 195 |
| 196 // When 'c' was enqueued at 300 ms, the queue was not empty; it had 'b'
in it, since 'b' will not be read until |
| 197 // the first write completes at 450 ms. Thus, the queue size is 2 after
enqueuing 'c', giving a desiredSize of |
| 198 // -1. |
| 199 assert_array_equals(desiredSizes, [1, 0, -1], |
| 200 'at t = 325 ms, the desiredSize at the last enqueue (300 ms) must have
been -1'); |
| 201 assert_equals(rs.controller.desiredSize, -1, 'at t = 325 ms, the current
desiredSize must be -1'); |
| 202 }), |
| 203 |
| 204 delay(425).then(() => { |
| 205 assert_array_equals(chunksFinishedWriting, [], 'at t = 425 ms, zero chun
ks must have finished writing'); |
| 206 assert_array_equals(ws.events, ['write', 'a'], 'at t = 425 ms, one chunk
must have been written'); |
| 207 |
| 208 // When 'd' was enqueued at 400 ms, the situation is the same as before,
leading to a queue containing 'b', 'c', |
| 209 // and 'd'. (Remember the first write will only finish at 100 ms + 350 m
s = 450 ms.) |
| 210 assert_array_equals(desiredSizes, [1, 0, -1, -2], |
| 211 'at t = 425 ms, the desiredSize at the last enqueue (400 ms) must have
been -2'); |
| 212 assert_equals(rs.controller.desiredSize, -2, 'at t = 425 ms, the current
desiredSize must be -2'); |
| 213 }), |
| 214 |
| 215 delay(475).then(() => { |
| 216 assert_array_equals(chunksFinishedWriting, ['a'], 'at t = 475 ms, one ch
unk must have finished writing'); |
| 217 assert_array_equals(ws.events, ['write', 'a', 'write', 'b'], |
| 218 'at t = 475 ms, two chunks must have been written'); |
| 219 |
| 220 assert_equals(rs.controller.desiredSize, -1, 'at t = 475 ms, the current
desiredSize must be -1'); |
| 221 }) |
| 222 ]); |
| 223 }); |
| 224 }, 'Piping to a WritableStream that does not consume the writes fast enough exer
ts backpressure on the ReadableStream'); |
| 225 |
| 226 done(); |
| OLD | NEW |