OLD | NEW |
---|---|
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "nacl_io/memfs/mem_fs_node.h" | 5 #include "nacl_io/memfs/mem_fs_node.h" |
6 | 6 |
7 #include <errno.h> | 7 #include <errno.h> |
8 #include <string.h> | 8 #include <string.h> |
9 | 9 |
10 #include <algorithm> | 10 #include <algorithm> |
11 | 11 |
12 #include "nacl_io/kernel_handle.h" | 12 #include "nacl_io/kernel_handle.h" |
13 #include "nacl_io/osstat.h" | 13 #include "nacl_io/osstat.h" |
14 #include "sdk_util/auto_lock.h" | 14 #include "sdk_util/auto_lock.h" |
15 | 15 |
16 namespace nacl_io { | 16 namespace nacl_io { |
17 | 17 |
18 namespace { | 18 namespace { |
19 | 19 |
20 // The maximum size to reserve in addition to the requested size. Resize() will | 20 // The maximum size to reserve in addition to the requested size. Resize() will |
21 // allocate twice as much as requested, up to this value. | 21 // allocate twice as much as requested, up to this value. |
22 const size_t kMaxResizeIncrement = 16 * 1024 * 1024; | 22 const size_t kMaxResizeIncrement = 16 * 1024 * 1024; |
23 | 23 |
24 } // namespace | 24 } // namespace |
25 | 25 |
26 MemFsNode::MemFsNode(Filesystem* filesystem) : Node(filesystem) { | 26 MemFsNode::MemFsNode(Filesystem* filesystem) |
27 : Node(filesystem), | |
28 data_(NULL), | |
29 data_len_(0) { | |
27 SetType(S_IFREG); | 30 SetType(S_IFREG); |
28 } | 31 } |
29 | 32 |
30 MemFsNode::~MemFsNode() { | 33 MemFsNode::~MemFsNode() { |
31 } | 34 } |
32 | 35 |
33 Error MemFsNode::Read(const HandleAttr& attr, | 36 Error MemFsNode::Read(const HandleAttr& attr, |
34 void* buf, | 37 void* buf, |
35 size_t count, | 38 size_t count, |
36 int* out_bytes) { | 39 int* out_bytes) { |
37 *out_bytes = 0; | 40 *out_bytes = 0; |
38 | 41 |
39 AUTO_LOCK(node_lock_); | 42 AUTO_LOCK(node_lock_); |
40 if (count == 0) | 43 if (count == 0) |
41 return 0; | 44 return 0; |
42 | 45 |
43 size_t size = stat_.st_size; | 46 size_t size = stat_.st_size; |
44 | 47 |
45 if (attr.offs + count > size) { | 48 if (attr.offs + count > size) { |
46 count = size - attr.offs; | 49 count = size - attr.offs; |
47 } | 50 } |
48 | 51 |
49 memcpy(buf, &data_[attr.offs], count); | 52 memcpy(buf, data_ + attr.offs, count); |
50 *out_bytes = static_cast<int>(count); | 53 *out_bytes = static_cast<int>(count); |
51 return 0; | 54 return 0; |
52 } | 55 } |
53 | 56 |
54 Error MemFsNode::Write(const HandleAttr& attr, | 57 Error MemFsNode::Write(const HandleAttr& attr, |
55 const void* buf, | 58 const void* buf, |
56 size_t count, | 59 size_t count, |
57 int* out_bytes) { | 60 int* out_bytes) { |
58 *out_bytes = 0; | 61 *out_bytes = 0; |
59 AUTO_LOCK(node_lock_); | 62 AUTO_LOCK(node_lock_); |
60 | 63 |
61 if (count == 0) | 64 if (count == 0) |
62 return 0; | 65 return 0; |
63 | 66 |
64 if (count + attr.offs > static_cast<size_t>(stat_.st_size)) { | 67 if (count + attr.offs > static_cast<size_t>(stat_.st_size)) { |
65 Resize(count + attr.offs); | 68 Resize(count + attr.offs); |
66 count = stat_.st_size - attr.offs; | 69 count = stat_.st_size - attr.offs; |
67 } | 70 } |
68 | 71 |
69 memcpy(&data_[attr.offs], buf, count); | 72 memcpy(data_ + attr.offs, buf, count); |
70 *out_bytes = static_cast<int>(count); | 73 *out_bytes = static_cast<int>(count); |
71 return 0; | 74 return 0; |
72 } | 75 } |
73 | 76 |
74 Error MemFsNode::FTruncate(off_t new_size) { | 77 Error MemFsNode::FTruncate(off_t new_size) { |
75 AUTO_LOCK(node_lock_); | 78 AUTO_LOCK(node_lock_); |
76 Resize(new_size); | 79 Resize(new_size); |
77 return 0; | 80 return 0; |
78 } | 81 } |
79 | 82 |
80 void MemFsNode::Resize(off_t new_size) { | 83 void MemFsNode::Resize(off_t new_size) { |
81 if (new_size > static_cast<off_t>(data_.capacity())) { | 84 // While the node size is small, grow exponentially. When it starts to get |
82 // While the node size is small, grow exponentially. When it starts to get | 85 // larger, grow linearly. |
83 // larger, grow linearly. | 86 size_t extra = std::min<size_t>(new_size, kMaxResizeIncrement); |
84 size_t extra = std::min<size_t>(new_size, kMaxResizeIncrement); | 87 data_len_ = new_size + extra; |
binji
2014/08/05 19:30:29
We don't want to always grow. What if new_size < d
Sam Clegg
2014/08/06 09:10:32
Done.
| |
85 data_.reserve(new_size + extra); | 88 |
86 } else if (new_size < stat_.st_size) { | 89 data_ = (char*)realloc(data_, data_len_); |
87 // Shrink to fit. std::vector usually doesn't reduce allocation size, so | 90 if (data_len_ > new_size) |
88 // use the swap trick. | 91 memset(data_ + new_size, 0, data_len_ - new_size); |
binji
2014/08/05 19:30:29
Why? I think it makes more sense to zero from the
Sam Clegg
2014/08/06 09:10:32
Done.
| |
89 std::vector<char>(data_).swap(data_); | |
90 } | |
91 data_.resize(new_size); | |
92 stat_.st_size = new_size; | 92 stat_.st_size = new_size; |
93 } | 93 } |
94 | 94 |
95 } // namespace nacl_io | 95 } // namespace nacl_io |
OLD | NEW |