| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "sandbox/linux/seccomp-bpf/codegen.h" | 5 #include "sandbox/linux/seccomp-bpf/codegen.h" |
| 6 | 6 |
| 7 #include <linux/filter.h> | 7 #include <linux/filter.h> |
| 8 | 8 |
| 9 #include <limits> | 9 #include <limits> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 // sequences. | 42 // sequences. |
| 43 | 43 |
| 44 namespace sandbox { | 44 namespace sandbox { |
| 45 | 45 |
| 46 // kBranchRange is the maximum value that can be stored in | 46 // kBranchRange is the maximum value that can be stored in |
| 47 // sock_filter's 8-bit jt and jf fields. | 47 // sock_filter's 8-bit jt and jf fields. |
| 48 const size_t kBranchRange = std::numeric_limits<uint8_t>::max(); | 48 const size_t kBranchRange = std::numeric_limits<uint8_t>::max(); |
| 49 | 49 |
| 50 const CodeGen::Node CodeGen::kNullNode; | 50 const CodeGen::Node CodeGen::kNullNode; |
| 51 | 51 |
| 52 CodeGen::CodeGen() : program_(), memos_() { | 52 CodeGen::CodeGen() : program_(), equivalent_(), memos_() { |
| 53 } | 53 } |
| 54 | 54 |
| 55 CodeGen::~CodeGen() { | 55 CodeGen::~CodeGen() { |
| 56 } | 56 } |
| 57 | 57 |
| 58 void CodeGen::Compile(CodeGen::Node head, Program* out) { | 58 void CodeGen::Compile(CodeGen::Node head, Program* out) { |
| 59 DCHECK(out); | 59 DCHECK(out); |
| 60 out->assign(program_.rbegin() + Offset(head), program_.rend()); | 60 out->assign(program_.rbegin() + Offset(head), program_.rend()); |
| 61 } | 61 } |
| 62 | 62 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 74 return *node; | 74 return *node; |
| 75 } | 75 } |
| 76 | 76 |
| 77 CodeGen::Node CodeGen::AppendInstruction(uint16_t code, | 77 CodeGen::Node CodeGen::AppendInstruction(uint16_t code, |
| 78 uint32_t k, | 78 uint32_t k, |
| 79 Node jt, | 79 Node jt, |
| 80 Node jf) { | 80 Node jf) { |
| 81 if (BPF_CLASS(code) == BPF_JMP) { | 81 if (BPF_CLASS(code) == BPF_JMP) { |
| 82 CHECK_NE(BPF_JA, BPF_OP(code)) << "CodeGen inserts JAs as needed"; | 82 CHECK_NE(BPF_JA, BPF_OP(code)) << "CodeGen inserts JAs as needed"; |
| 83 | 83 |
| 84 // We need to check |jt| twice because it might get pushed | 84 // Optimally adding jumps is rather tricky, so we use a quick |
| 85 // out-of-range by appending a jump for |jf|. | 85 // approximation: by artificially reducing |jt|'s range, |jt| will |
| 86 jt = WithinRange(jt, kBranchRange); | 86 // stay within its true range even if we add a jump for |jf|. |
| 87 jt = WithinRange(jt, kBranchRange - 1); |
| 87 jf = WithinRange(jf, kBranchRange); | 88 jf = WithinRange(jf, kBranchRange); |
| 88 jt = WithinRange(jt, kBranchRange); | |
| 89 return Append(code, k, Offset(jt), Offset(jf)); | 89 return Append(code, k, Offset(jt), Offset(jf)); |
| 90 } | 90 } |
| 91 | 91 |
| 92 CHECK_EQ(kNullNode, jf) << "Non-branch instructions shouldn't provide jf"; | 92 CHECK_EQ(kNullNode, jf) << "Non-branch instructions shouldn't provide jf"; |
| 93 if (BPF_CLASS(code) == BPF_RET) { | 93 if (BPF_CLASS(code) == BPF_RET) { |
| 94 CHECK_EQ(kNullNode, jt) << "Return instructions shouldn't provide jt"; | 94 CHECK_EQ(kNullNode, jt) << "Return instructions shouldn't provide jt"; |
| 95 } else { | 95 } else { |
| 96 // For non-branch/non-return instructions, execution always | 96 // For non-branch/non-return instructions, execution always |
| 97 // proceeds to the next instruction; so we need to arrange for | 97 // proceeds to the next instruction; so we need to arrange for |
| 98 // that to be |jt|. | 98 // that to be |jt|. |
| 99 jt = WithinRange(jt, 0); | 99 jt = WithinRange(jt, 0); |
| 100 CHECK_EQ(0U, Offset(jt)) << "ICE: Failed to setup next instruction"; |
| 100 } | 101 } |
| 101 return Append(code, k, 0, 0); | 102 return Append(code, k, 0, 0); |
| 102 } | 103 } |
| 103 | 104 |
| 104 CodeGen::Node CodeGen::WithinRange(Node target, size_t range) { | 105 CodeGen::Node CodeGen::WithinRange(Node target, size_t range) { |
| 105 if (Offset(target) > range) { | 106 // Just use |target| if it's already within range. |
| 106 // TODO(mdempsky): If |range > 0|, we might be able to reuse an | 107 if (Offset(target) <= range) { |
| 107 // existing instruction within that range. | 108 return target; |
| 108 | |
| 109 // TODO(mdempsky): If |target| is a branch or return, it might be | |
| 110 // possible to duplicate that instruction rather than jump to it. | |
| 111 | |
| 112 // Fall back to emitting a jump instruction. | |
| 113 target = Append(BPF_JMP | BPF_JA, Offset(target), 0, 0); | |
| 114 } | 109 } |
| 115 | 110 |
| 116 CHECK_LE(Offset(target), range) << "ICE: Failed to bring target within range"; | 111 // Alternatively, look for an equivalent instruction within range. |
| 117 return target; | 112 if (Offset(equivalent_.at(target)) <= range) { |
| 113 return equivalent_.at(target); |
| 114 } |
| 115 |
| 116 // Otherwise, fall back to emitting a jump instruction. |
| 117 Node jump = Append(BPF_JMP | BPF_JA, Offset(target), 0, 0); |
| 118 equivalent_.at(target) = jump; |
| 119 return jump; |
| 118 } | 120 } |
| 119 | 121 |
| 120 CodeGen::Node CodeGen::Append(uint16_t code, uint32_t k, size_t jt, size_t jf) { | 122 CodeGen::Node CodeGen::Append(uint16_t code, uint32_t k, size_t jt, size_t jf) { |
| 121 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) { | 123 if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_JA) { |
| 122 CHECK_LE(jt, kBranchRange); | 124 CHECK_LE(jt, kBranchRange); |
| 123 CHECK_LE(jf, kBranchRange); | 125 CHECK_LE(jf, kBranchRange); |
| 124 } else { | 126 } else { |
| 125 CHECK_EQ(0U, jt); | 127 CHECK_EQ(0U, jt); |
| 126 CHECK_EQ(0U, jf); | 128 CHECK_EQ(0U, jf); |
| 127 } | 129 } |
| 128 | 130 |
| 129 CHECK_LT(program_.size(), static_cast<size_t>(BPF_MAXINSNS)); | 131 CHECK_LT(program_.size(), static_cast<size_t>(BPF_MAXINSNS)); |
| 132 CHECK_EQ(program_.size(), equivalent_.size()); |
| 133 |
| 134 Node res = program_.size(); |
| 130 program_.push_back(sock_filter{code, jt, jf, k}); | 135 program_.push_back(sock_filter{code, jt, jf, k}); |
| 131 return program_.size() - 1; | 136 equivalent_.push_back(res); |
| 137 return res; |
| 132 } | 138 } |
| 133 | 139 |
| 134 size_t CodeGen::Offset(Node target) const { | 140 size_t CodeGen::Offset(Node target) const { |
| 135 CHECK_LT(target, program_.size()) << "Bogus offset target node"; | 141 CHECK_LT(target, program_.size()) << "Bogus offset target node"; |
| 136 return (program_.size() - 1) - target; | 142 return (program_.size() - 1) - target; |
| 137 } | 143 } |
| 138 | 144 |
| 139 // TODO(mdempsky): Move into a general base::Tuple helper library. | 145 // TODO(mdempsky): Move into a general base::Tuple helper library. |
| 140 bool CodeGen::MemoKeyLess::operator()(const MemoKey& lhs, | 146 bool CodeGen::MemoKeyLess::operator()(const MemoKey& lhs, |
| 141 const MemoKey& rhs) const { | 147 const MemoKey& rhs) const { |
| 142 if (lhs.a != rhs.a) | 148 if (lhs.a != rhs.a) |
| 143 return lhs.a < rhs.a; | 149 return lhs.a < rhs.a; |
| 144 if (lhs.b != rhs.b) | 150 if (lhs.b != rhs.b) |
| 145 return lhs.b < rhs.b; | 151 return lhs.b < rhs.b; |
| 146 if (lhs.c != rhs.c) | 152 if (lhs.c != rhs.c) |
| 147 return lhs.c < rhs.c; | 153 return lhs.c < rhs.c; |
| 148 if (lhs.d != rhs.d) | 154 if (lhs.d != rhs.d) |
| 149 return lhs.d < rhs.d; | 155 return lhs.d < rhs.d; |
| 150 return false; | 156 return false; |
| 151 } | 157 } |
| 152 | 158 |
| 153 } // namespace sandbox | 159 } // namespace sandbox |
| OLD | NEW |