| OLD | NEW |
| 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "chrome/common/descriptor_set_posix.h" | 5 #include "chrome/common/descriptor_set_posix.h" |
| 6 | 6 |
| 7 #include "base/logging.h" | 7 #include "base/logging.h" |
| 8 | 8 |
| 9 DescriptorSet::DescriptorSet() | 9 DescriptorSet::DescriptorSet() |
| 10 : next_descriptor_(0) { | 10 : consumed_descriptor_highwater_(0) { |
| 11 } | 11 } |
| 12 | 12 |
| 13 DescriptorSet::~DescriptorSet() { | 13 DescriptorSet::~DescriptorSet() { |
| 14 if (next_descriptor_ == descriptors_.size()) | 14 if (consumed_descriptor_highwater_ == descriptors_.size()) |
| 15 return; | 15 return; |
| 16 | 16 |
| 17 LOG(WARNING) << "DescriptorSet destroyed with unconsumed descriptors"; | 17 LOG(WARNING) << "DescriptorSet destroyed with unconsumed descriptors"; |
| 18 // We close all the descriptors where the close flag is set. If this | 18 // We close all the descriptors where the close flag is set. If this |
| 19 // message should have been transmitted, then closing those with close | 19 // message should have been transmitted, then closing those with close |
| 20 // flags set mirrors the expected behaviour. | 20 // flags set mirrors the expected behaviour. |
| 21 // | 21 // |
| 22 // If this message was received with more descriptors than expected | 22 // If this message was received with more descriptors than expected |
| 23 // (which could a DOS against the browser by a rouge renderer) then all | 23 // (which could a DOS against the browser by a rogue renderer) then all |
| 24 // the descriptors have their close flag set and we free all the extra | 24 // the descriptors have their close flag set and we free all the extra |
| 25 // kernel resources. | 25 // kernel resources. |
| 26 for (unsigned i = next_descriptor_; i < descriptors_.size(); ++i) { | 26 for (unsigned i = consumed_descriptor_highwater_; |
| 27 i < descriptors_.size(); ++i) { |
| 27 if (descriptors_[i].auto_close) | 28 if (descriptors_[i].auto_close) |
| 28 close(descriptors_[i].fd); | 29 close(descriptors_[i].fd); |
| 29 } | 30 } |
| 30 } | 31 } |
| 31 | 32 |
| 32 bool DescriptorSet::Add(int fd) { | 33 bool DescriptorSet::Add(int fd) { |
| 33 if (descriptors_.size() == MAX_DESCRIPTORS_PER_MESSAGE) | 34 if (descriptors_.size() == MAX_DESCRIPTORS_PER_MESSAGE) |
| 34 return false; | 35 return false; |
| 35 | 36 |
| 36 struct base::FileDescriptor sd; | 37 struct base::FileDescriptor sd; |
| 37 sd.fd = fd; | 38 sd.fd = fd; |
| 38 sd.auto_close = false; | 39 sd.auto_close = false; |
| 39 descriptors_.push_back(sd); | 40 descriptors_.push_back(sd); |
| 40 return true; | 41 return true; |
| 41 } | 42 } |
| 42 | 43 |
| 43 bool DescriptorSet::AddAndAutoClose(int fd) { | 44 bool DescriptorSet::AddAndAutoClose(int fd) { |
| 44 if (descriptors_.size() == MAX_DESCRIPTORS_PER_MESSAGE) | 45 if (descriptors_.size() == MAX_DESCRIPTORS_PER_MESSAGE) |
| 45 return false; | 46 return false; |
| 46 | 47 |
| 47 struct base::FileDescriptor sd; | 48 struct base::FileDescriptor sd; |
| 48 sd.fd = fd; | 49 sd.fd = fd; |
| 49 sd.auto_close = true; | 50 sd.auto_close = true; |
| 50 descriptors_.push_back(sd); | 51 descriptors_.push_back(sd); |
| 51 DCHECK(descriptors_.size() <= MAX_DESCRIPTORS_PER_MESSAGE); | 52 DCHECK(descriptors_.size() <= MAX_DESCRIPTORS_PER_MESSAGE); |
| 52 return true; | 53 return true; |
| 53 } | 54 } |
| 54 | 55 |
| 55 int DescriptorSet::NextDescriptor() { | 56 int DescriptorSet::GetDescriptorAt(unsigned index) const { |
| 56 if (next_descriptor_ == descriptors_.size()) | 57 if (index >= descriptors_.size()) |
| 57 return -1; | 58 return -1; |
| 58 | 59 |
| 59 return descriptors_[next_descriptor_++].fd; | 60 // We should always walk the descriptors in order, so it's reasonable to |
| 61 // enforce this. Consider the case where a compromised renderer sends us |
| 62 // the following message: |
| 63 // |
| 64 // ExampleMsg: |
| 65 // num_fds:2 msg:FD(index = 1) control:SCM_RIGHTS {n, m} |
| 66 // |
| 67 // Here the renderer sent us a message which should have a descriptor, but |
| 68 // actually sent two in an attempt to fill our fd table and kill us. By |
| 69 // setting the index of the descriptor in the message to 1 (it should be |
| 70 // 0), we would record a highwater of 1 and then consider all the |
| 71 // descriptors to have been used. |
| 72 // |
| 73 // So we can either track of the use of each descriptor in a bitset, or we |
| 74 // can enforce that we walk the indexes strictly in order. |
| 75 // |
| 76 // There's one more wrinkle: When logging messages, we may reparse them. So |
| 77 // we have an exception: When the consumed_descriptor_highwater_ is at the |
| 78 // end of the array and index 0 is requested, we reset the highwater value. |
| 79 if (index == 0 && consumed_descriptor_highwater_ == descriptors_.size()) |
| 80 consumed_descriptor_highwater_ = 0; |
| 81 |
| 82 if (index != consumed_descriptor_highwater_) |
| 83 return -1; |
| 84 |
| 85 consumed_descriptor_highwater_ = index + 1; |
| 86 return descriptors_[index].fd; |
| 60 } | 87 } |
| 61 | 88 |
| 62 void DescriptorSet::GetDescriptors(int* buffer) const { | 89 void DescriptorSet::GetDescriptors(int* buffer) const { |
| 63 DCHECK_EQ(next_descriptor_, 0u); | |
| 64 | |
| 65 for (std::vector<base::FileDescriptor>::const_iterator | 90 for (std::vector<base::FileDescriptor>::const_iterator |
| 66 i = descriptors_.begin(); i != descriptors_.end(); ++i) { | 91 i = descriptors_.begin(); i != descriptors_.end(); ++i) { |
| 67 *(buffer++) = i->fd; | 92 *(buffer++) = i->fd; |
| 68 } | 93 } |
| 69 } | 94 } |
| 70 | 95 |
| 71 void DescriptorSet::CommitAll() { | 96 void DescriptorSet::CommitAll() { |
| 72 for (std::vector<base::FileDescriptor>::iterator | 97 for (std::vector<base::FileDescriptor>::iterator |
| 73 i = descriptors_.begin(); i != descriptors_.end(); ++i) { | 98 i = descriptors_.begin(); i != descriptors_.end(); ++i) { |
| 74 if (i->auto_close) | 99 if (i->auto_close) |
| 75 close(i->fd); | 100 close(i->fd); |
| 76 } | 101 } |
| 77 descriptors_.clear(); | 102 descriptors_.clear(); |
| 78 next_descriptor_ = 0; | 103 consumed_descriptor_highwater_ = 0; |
| 79 } | 104 } |
| 80 | 105 |
| 81 void DescriptorSet::SetDescriptors(const int* buffer, unsigned count) { | 106 void DescriptorSet::SetDescriptors(const int* buffer, unsigned count) { |
| 82 DCHECK(count <= MAX_DESCRIPTORS_PER_MESSAGE); | 107 DCHECK_LE(count, MAX_DESCRIPTORS_PER_MESSAGE); |
| 83 DCHECK(descriptors_.size() == 0); | 108 DCHECK_EQ(descriptors_.size(), 0u); |
| 109 DCHECK_EQ(consumed_descriptor_highwater_, 0u); |
| 84 | 110 |
| 85 descriptors_.reserve(count); | 111 descriptors_.reserve(count); |
| 86 for (unsigned i = 0; i < count; ++i) { | 112 for (unsigned i = 0; i < count; ++i) { |
| 87 struct base::FileDescriptor sd; | 113 struct base::FileDescriptor sd; |
| 88 sd.fd = buffer[i]; | 114 sd.fd = buffer[i]; |
| 89 sd.auto_close = true; | 115 sd.auto_close = true; |
| 90 descriptors_.push_back(sd); | 116 descriptors_.push_back(sd); |
| 91 } | 117 } |
| 92 } | 118 } |
| 93 | |
| 94 void DescriptorSet::TakeFrom(DescriptorSet* other) { | |
| 95 DCHECK(descriptors_.size() == 0); | |
| 96 | |
| 97 descriptors_.swap(other->descriptors_); | |
| 98 next_descriptor_ = other->next_descriptor_; | |
| 99 other->next_descriptor_ = 0; | |
| 100 } | |
| OLD | NEW |