OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google, Inc. All Rights Reserved. | 2 * Copyright (C) 2013 Google, Inc. All Rights Reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions | 5 * modification, are permitted provided that the following conditions |
6 * are met: | 6 * are met: |
7 * 1. Redistributions of source code must retain the above copyright | 7 * 1. Redistributions of source code must retain the above copyright |
8 * notice, this list of conditions and the following disclaimer. | 8 * notice, this list of conditions and the following disclaimer. |
9 * 2. Redistributions in binary form must reproduce the above copyright | 9 * 2. Redistributions in binary form must reproduce the above copyright |
10 * notice, this list of conditions and the following disclaimer in the | 10 * notice, this list of conditions and the following disclaimer in the |
(...skipping 26 matching lines...) Expand all Loading... | |
37 // On a network with high latency and high bandwidth, using a device | 37 // On a network with high latency and high bandwidth, using a device |
38 // with a fast CPU, we could end up speculatively tokenizing | 38 // with a fast CPU, we could end up speculatively tokenizing |
39 // the whole document, well ahead of when the main-thread actually needs it. | 39 // the whole document, well ahead of when the main-thread actually needs it. |
40 // This is a waste of memory (and potentially time if the speculation fails). | 40 // This is a waste of memory (and potentially time if the speculation fails). |
41 // So we limit our outstanding tokens arbitrarily to 10,000. | 41 // So we limit our outstanding tokens arbitrarily to 10,000. |
42 // Our maximal memory spent speculating will be approximately: | 42 // Our maximal memory spent speculating will be approximately: |
43 // (outstandingTokenLimit + pendingTokenLimit) * sizeof(CompactToken) | 43 // (outstandingTokenLimit + pendingTokenLimit) * sizeof(CompactToken) |
44 // We use a separate low and high water mark to avoid constantly topping | 44 // We use a separate low and high water mark to avoid constantly topping |
45 // off the main thread's token buffer. | 45 // off the main thread's token buffer. |
46 // At time of writing, this is (10000 + 1000) * 28 bytes = ~308kb of memory. | 46 // At time of writing, this is (10000 + 1000) * 28 bytes = ~308kb of memory. |
47 // These numbers have not been tuned. | 47 // These numbers have not been tuned. |
abarth-chromium
2014/04/29 18:29:00
Can you update the math in this comment?
| |
48 static const size_t outstandingTokenLimit = 10000; | 48 static const size_t outstandingTokenLimit = 1000; |
eseidel
2014/04/29 00:23:13
I didn't mean to leave these in.
eseidel
2014/04/29 00:24:12
These aren't bad. I think they can just be deferr
| |
49 | 49 |
50 // We limit our chucks to 1000 tokens, to make sure the main | 50 // We limit our chucks to 1000 tokens, to make sure the main |
abarth-chromium
2014/04/29 18:29:00
This comment is now out of date.
| |
51 // thread is never waiting on the parser thread for tokens. | 51 // thread is never waiting on the parser thread for tokens. |
52 // This was tuned in https://bugs.webkit.org/show_bug.cgi?id=110408. | 52 // This was tuned in https://bugs.webkit.org/show_bug.cgi?id=110408. |
53 static const size_t pendingTokenLimit = 1000; | 53 static const size_t pendingTokenLimit = 100; |
54 | 54 |
55 using namespace HTMLNames; | 55 using namespace HTMLNames; |
56 | 56 |
57 #ifndef NDEBUG | 57 #ifndef NDEBUG |
58 | 58 |
59 static void checkThatTokensAreSafeToSendToAnotherThread(const CompactHTMLTokenSt ream* tokens) | 59 static void checkThatTokensAreSafeToSendToAnotherThread(const CompactHTMLTokenSt ream* tokens) |
60 { | 60 { |
61 for (size_t i = 0; i < tokens->size(); ++i) | 61 for (size_t i = 0; i < tokens->size(); ++i) |
62 ASSERT(tokens->at(i).isSafeToSendToAnotherThread()); | 62 ASSERT(tokens->at(i).isSafeToSendToAnotherThread()); |
63 } | 63 } |
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
252 chunk->treeBuilderState = m_treeBuilderSimulator.state(); | 252 chunk->treeBuilderState = m_treeBuilderSimulator.state(); |
253 chunk->inputCheckpoint = m_input.createCheckpoint(m_pendingTokens->size()); | 253 chunk->inputCheckpoint = m_input.createCheckpoint(m_pendingTokens->size()); |
254 chunk->preloadScannerCheckpoint = m_preloadScanner->createCheckpoint(); | 254 chunk->preloadScannerCheckpoint = m_preloadScanner->createCheckpoint(); |
255 chunk->tokens = m_pendingTokens.release(); | 255 chunk->tokens = m_pendingTokens.release(); |
256 callOnMainThread(bind(&HTMLDocumentParser::didReceiveParsedChunkFromBackgrou ndParser, m_parser, chunk.release())); | 256 callOnMainThread(bind(&HTMLDocumentParser::didReceiveParsedChunkFromBackgrou ndParser, m_parser, chunk.release())); |
257 | 257 |
258 m_pendingTokens = adoptPtr(new CompactHTMLTokenStream); | 258 m_pendingTokens = adoptPtr(new CompactHTMLTokenStream); |
259 } | 259 } |
260 | 260 |
261 } | 261 } |
OLD | NEW |