OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * |
| 3 * Copyright 2015, Google Inc. |
| 4 * All rights reserved. |
| 5 * |
| 6 * Redistribution and use in source and binary forms, with or without |
| 7 * modification, are permitted provided that the following conditions are |
| 8 * met: |
| 9 * |
| 10 * * Redistributions of source code must retain the above copyright |
| 11 * notice, this list of conditions and the following disclaimer. |
| 12 * * Redistributions in binary form must reproduce the above |
| 13 * copyright notice, this list of conditions and the following disclaimer |
| 14 * in the documentation and/or other materials provided with the |
| 15 * distribution. |
| 16 * * Neither the name of Google Inc. nor the names of its |
| 17 * contributors may be used to endorse or promote products derived from |
| 18 * this software without specific prior written permission. |
| 19 * |
| 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 * |
| 32 */ |
| 33 |
| 34 #include "src/core/channel/channel_stack.h" |
| 35 #include <grpc/support/log.h> |
| 36 |
| 37 #include <stdlib.h> |
| 38 #include <string.h> |
| 39 |
| 40 int grpc_trace_channel = 0; |
| 41 |
| 42 /* Memory layouts. |
| 43 |
| 44 Channel stack is laid out as: { |
| 45 grpc_channel_stack stk; |
| 46 padding to GPR_MAX_ALIGNMENT |
| 47 grpc_channel_element[stk.count]; |
| 48 per-filter memory, aligned to GPR_MAX_ALIGNMENT |
| 49 } |
| 50 |
| 51 Call stack is laid out as: { |
| 52 grpc_call_stack stk; |
| 53 padding to GPR_MAX_ALIGNMENT |
| 54 grpc_call_element[stk.count]; |
| 55 per-filter memory, aligned to GPR_MAX_ALIGNMENT |
| 56 } */ |
| 57 |
| 58 /* Given a size, round up to the next multiple of sizeof(void*) */ |
| 59 #define ROUND_UP_TO_ALIGNMENT_SIZE(x) \ |
| 60 (((x) + GPR_MAX_ALIGNMENT - 1u) & ~(GPR_MAX_ALIGNMENT - 1u)) |
| 61 |
| 62 size_t grpc_channel_stack_size(const grpc_channel_filter **filters, |
| 63 size_t filter_count) { |
| 64 /* always need the header, and size for the channel elements */ |
| 65 size_t size = |
| 66 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_channel_stack)) + |
| 67 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element)); |
| 68 size_t i; |
| 69 |
| 70 GPR_ASSERT((GPR_MAX_ALIGNMENT & (GPR_MAX_ALIGNMENT - 1)) == 0 && |
| 71 "GPR_MAX_ALIGNMENT must be a power of two"); |
| 72 |
| 73 /* add the size for each filter */ |
| 74 for (i = 0; i < filter_count; i++) { |
| 75 size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data); |
| 76 } |
| 77 |
| 78 return size; |
| 79 } |
| 80 |
| 81 #define CHANNEL_ELEMS_FROM_STACK(stk) \ |
| 82 ((grpc_channel_element *)((char *)(stk) + ROUND_UP_TO_ALIGNMENT_SIZE( \ |
| 83 sizeof(grpc_channel_stack)))) |
| 84 |
| 85 #define CALL_ELEMS_FROM_STACK(stk) \ |
| 86 ((grpc_call_element *)((char *)(stk) + \ |
| 87 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)))) |
| 88 |
| 89 grpc_channel_element *grpc_channel_stack_element( |
| 90 grpc_channel_stack *channel_stack, size_t index) { |
| 91 return CHANNEL_ELEMS_FROM_STACK(channel_stack) + index; |
| 92 } |
| 93 |
| 94 grpc_channel_element *grpc_channel_stack_last_element( |
| 95 grpc_channel_stack *channel_stack) { |
| 96 return grpc_channel_stack_element(channel_stack, channel_stack->count - 1); |
| 97 } |
| 98 |
| 99 grpc_call_element *grpc_call_stack_element(grpc_call_stack *call_stack, |
| 100 size_t index) { |
| 101 return CALL_ELEMS_FROM_STACK(call_stack) + index; |
| 102 } |
| 103 |
| 104 void grpc_channel_stack_init(grpc_exec_ctx *exec_ctx, int initial_refs, |
| 105 grpc_iomgr_cb_func destroy, void *destroy_arg, |
| 106 const grpc_channel_filter **filters, |
| 107 size_t filter_count, |
| 108 const grpc_channel_args *channel_args, |
| 109 const char *name, grpc_channel_stack *stack) { |
| 110 size_t call_size = |
| 111 ROUND_UP_TO_ALIGNMENT_SIZE(sizeof(grpc_call_stack)) + |
| 112 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_call_element)); |
| 113 grpc_channel_element *elems; |
| 114 grpc_channel_element_args args; |
| 115 char *user_data; |
| 116 size_t i; |
| 117 |
| 118 stack->count = filter_count; |
| 119 GRPC_STREAM_REF_INIT(&stack->refcount, initial_refs, destroy, destroy_arg, |
| 120 name); |
| 121 elems = CHANNEL_ELEMS_FROM_STACK(stack); |
| 122 user_data = |
| 123 ((char *)elems) + |
| 124 ROUND_UP_TO_ALIGNMENT_SIZE(filter_count * sizeof(grpc_channel_element)); |
| 125 |
| 126 /* init per-filter data */ |
| 127 for (i = 0; i < filter_count; i++) { |
| 128 args.channel_stack = stack; |
| 129 args.channel_args = channel_args; |
| 130 args.is_first = i == 0; |
| 131 args.is_last = i == (filter_count - 1); |
| 132 elems[i].filter = filters[i]; |
| 133 elems[i].channel_data = user_data; |
| 134 elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args); |
| 135 user_data += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_channel_data); |
| 136 call_size += ROUND_UP_TO_ALIGNMENT_SIZE(filters[i]->sizeof_call_data); |
| 137 } |
| 138 |
| 139 GPR_ASSERT(user_data > (char *)stack); |
| 140 GPR_ASSERT((uintptr_t)(user_data - (char *)stack) == |
| 141 grpc_channel_stack_size(filters, filter_count)); |
| 142 |
| 143 stack->call_stack_size = call_size; |
| 144 } |
| 145 |
| 146 void grpc_channel_stack_destroy(grpc_exec_ctx *exec_ctx, |
| 147 grpc_channel_stack *stack) { |
| 148 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(stack); |
| 149 size_t count = stack->count; |
| 150 size_t i; |
| 151 |
| 152 /* destroy per-filter data */ |
| 153 for (i = 0; i < count; i++) { |
| 154 channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]); |
| 155 } |
| 156 } |
| 157 |
| 158 void grpc_call_stack_init(grpc_exec_ctx *exec_ctx, |
| 159 grpc_channel_stack *channel_stack, int initial_refs, |
| 160 grpc_iomgr_cb_func destroy, void *destroy_arg, |
| 161 grpc_call_context_element *context, |
| 162 const void *transport_server_data, |
| 163 grpc_call_stack *call_stack) { |
| 164 grpc_channel_element *channel_elems = CHANNEL_ELEMS_FROM_STACK(channel_stack); |
| 165 grpc_call_element_args args; |
| 166 size_t count = channel_stack->count; |
| 167 grpc_call_element *call_elems; |
| 168 char *user_data; |
| 169 size_t i; |
| 170 |
| 171 call_stack->count = count; |
| 172 GRPC_STREAM_REF_INIT(&call_stack->refcount, initial_refs, destroy, |
| 173 destroy_arg, "CALL_STACK"); |
| 174 call_elems = CALL_ELEMS_FROM_STACK(call_stack); |
| 175 user_data = ((char *)call_elems) + |
| 176 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element)); |
| 177 |
| 178 /* init per-filter data */ |
| 179 for (i = 0; i < count; i++) { |
| 180 args.call_stack = call_stack; |
| 181 args.server_transport_data = transport_server_data; |
| 182 args.context = context; |
| 183 call_elems[i].filter = channel_elems[i].filter; |
| 184 call_elems[i].channel_data = channel_elems[i].channel_data; |
| 185 call_elems[i].call_data = user_data; |
| 186 call_elems[i].filter->init_call_elem(exec_ctx, &call_elems[i], &args); |
| 187 user_data += |
| 188 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data); |
| 189 } |
| 190 } |
| 191 |
| 192 void grpc_call_stack_set_pollset(grpc_exec_ctx *exec_ctx, |
| 193 grpc_call_stack *call_stack, |
| 194 grpc_pollset *pollset) { |
| 195 size_t count = call_stack->count; |
| 196 grpc_call_element *call_elems; |
| 197 char *user_data; |
| 198 size_t i; |
| 199 |
| 200 call_elems = CALL_ELEMS_FROM_STACK(call_stack); |
| 201 user_data = ((char *)call_elems) + |
| 202 ROUND_UP_TO_ALIGNMENT_SIZE(count * sizeof(grpc_call_element)); |
| 203 |
| 204 /* init per-filter data */ |
| 205 for (i = 0; i < count; i++) { |
| 206 call_elems[i].filter->set_pollset(exec_ctx, &call_elems[i], pollset); |
| 207 user_data += |
| 208 ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data); |
| 209 } |
| 210 } |
| 211 |
| 212 void grpc_call_stack_ignore_set_pollset(grpc_exec_ctx *exec_ctx, |
| 213 grpc_call_element *elem, |
| 214 grpc_pollset *pollset) {} |
| 215 |
| 216 void grpc_call_stack_destroy(grpc_exec_ctx *exec_ctx, grpc_call_stack *stack) { |
| 217 grpc_call_element *elems = CALL_ELEMS_FROM_STACK(stack); |
| 218 size_t count = stack->count; |
| 219 size_t i; |
| 220 |
| 221 /* destroy per-filter data */ |
| 222 for (i = 0; i < count; i++) { |
| 223 elems[i].filter->destroy_call_elem(exec_ctx, &elems[i]); |
| 224 } |
| 225 } |
| 226 |
| 227 void grpc_call_next_op(grpc_exec_ctx *exec_ctx, grpc_call_element *elem, |
| 228 grpc_transport_stream_op *op) { |
| 229 grpc_call_element *next_elem = elem + 1; |
| 230 next_elem->filter->start_transport_stream_op(exec_ctx, next_elem, op); |
| 231 } |
| 232 |
| 233 char *grpc_call_next_get_peer(grpc_exec_ctx *exec_ctx, |
| 234 grpc_call_element *elem) { |
| 235 grpc_call_element *next_elem = elem + 1; |
| 236 return next_elem->filter->get_peer(exec_ctx, next_elem); |
| 237 } |
| 238 |
| 239 void grpc_channel_next_op(grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, |
| 240 grpc_transport_op *op) { |
| 241 grpc_channel_element *next_elem = elem + 1; |
| 242 next_elem->filter->start_transport_op(exec_ctx, next_elem, op); |
| 243 } |
| 244 |
| 245 grpc_channel_stack *grpc_channel_stack_from_top_element( |
| 246 grpc_channel_element *elem) { |
| 247 return (grpc_channel_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE( |
| 248 sizeof(grpc_channel_stack))); |
| 249 } |
| 250 |
| 251 grpc_call_stack *grpc_call_stack_from_top_element(grpc_call_element *elem) { |
| 252 return (grpc_call_stack *)((char *)(elem)-ROUND_UP_TO_ALIGNMENT_SIZE( |
| 253 sizeof(grpc_call_stack))); |
| 254 } |
| 255 |
| 256 void grpc_call_element_send_cancel(grpc_exec_ctx *exec_ctx, |
| 257 grpc_call_element *cur_elem) { |
| 258 grpc_transport_stream_op op; |
| 259 memset(&op, 0, sizeof(op)); |
| 260 op.cancel_with_status = GRPC_STATUS_CANCELLED; |
| 261 grpc_call_next_op(exec_ctx, cur_elem, &op); |
| 262 } |
OLD | NEW |