Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(677)

Side by Side Diff: third_party/grpc/src/core/channel/client_channel.c

Issue 1932353002: Initial checkin of gRPC to third_party/ Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 *
3 * Copyright 2015-2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34 #include "src/core/channel/client_channel.h"
35
36 #include <stdio.h>
37 #include <string.h>
38
39 #include <grpc/support/alloc.h>
40 #include <grpc/support/log.h>
41 #include <grpc/support/sync.h>
42 #include <grpc/support/useful.h>
43
44 #include "src/core/channel/channel_args.h"
45 #include "src/core/channel/connected_channel.h"
46 #include "src/core/channel/subchannel_call_holder.h"
47 #include "src/core/iomgr/iomgr.h"
48 #include "src/core/profiling/timers.h"
49 #include "src/core/support/string.h"
50 #include "src/core/surface/channel.h"
51 #include "src/core/transport/connectivity_state.h"
52
53 /* Client channel implementation */
54
55 typedef grpc_subchannel_call_holder call_data;
56
57 typedef struct client_channel_channel_data {
58 /** resolver for this channel */
59 grpc_resolver *resolver;
60 /** have we started resolving this channel */
61 int started_resolving;
62
63 /** mutex protecting client configuration, including all
64 variables below in this data structure */
65 gpr_mu mu_config;
66 /** currently active load balancer - guarded by mu_config */
67 grpc_lb_policy *lb_policy;
68 /** incoming configuration - set by resolver.next
69 guarded by mu_config */
70 grpc_client_config *incoming_configuration;
71 /** a list of closures that are all waiting for config to come in */
72 grpc_closure_list waiting_for_config_closures;
73 /** resolver callback */
74 grpc_closure on_config_changed;
75 /** connectivity state being tracked */
76 grpc_connectivity_state_tracker state_tracker;
77 /** when an lb_policy arrives, should we try to exit idle */
78 int exit_idle_when_lb_policy_arrives;
79 /** owning stack */
80 grpc_channel_stack *owning_stack;
81 /** interested parties (owned) */
82 grpc_pollset_set *interested_parties;
83 } channel_data;
84
85 /** We create one watcher for each new lb_policy that is returned from a
86 resolver,
87 to watch for state changes from the lb_policy. When a state change is seen,
88 we
89 update the channel, and create a new watcher */
90 typedef struct {
91 channel_data *chand;
92 grpc_closure on_changed;
93 grpc_connectivity_state state;
94 grpc_lb_policy *lb_policy;
95 } lb_policy_connectivity_watcher;
96
97 typedef struct {
98 grpc_closure closure;
99 grpc_call_element *elem;
100 } waiting_call;
101
102 static char *cc_get_peer(grpc_exec_ctx *exec_ctx, grpc_call_element *elem) {
103 return grpc_subchannel_call_holder_get_peer(exec_ctx, elem->call_data);
104 }
105
106 static void cc_start_transport_stream_op(grpc_exec_ctx *exec_ctx,
107 grpc_call_element *elem,
108 grpc_transport_stream_op *op) {
109 GRPC_CALL_LOG_OP(GPR_INFO, elem, op);
110 grpc_subchannel_call_holder_perform_op(exec_ctx, elem->call_data, op);
111 }
112
113 static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
114 grpc_lb_policy *lb_policy,
115 grpc_connectivity_state current_state);
116
117 static void on_lb_policy_state_changed_locked(
118 grpc_exec_ctx *exec_ctx, lb_policy_connectivity_watcher *w) {
119 grpc_connectivity_state publish_state = w->state;
120 /* check if the notification is for a stale policy */
121 if (w->lb_policy != w->chand->lb_policy) return;
122
123 if (publish_state == GRPC_CHANNEL_FATAL_FAILURE &&
124 w->chand->resolver != NULL) {
125 publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
126 grpc_resolver_channel_saw_error(exec_ctx, w->chand->resolver);
127 GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
128 w->chand->lb_policy = NULL;
129 }
130 grpc_connectivity_state_set(exec_ctx, &w->chand->state_tracker, publish_state,
131 "lb_changed");
132 if (w->state != GRPC_CHANNEL_FATAL_FAILURE) {
133 watch_lb_policy(exec_ctx, w->chand, w->lb_policy, w->state);
134 }
135 }
136
137 static void on_lb_policy_state_changed(grpc_exec_ctx *exec_ctx, void *arg,
138 bool iomgr_success) {
139 lb_policy_connectivity_watcher *w = arg;
140
141 gpr_mu_lock(&w->chand->mu_config);
142 on_lb_policy_state_changed_locked(exec_ctx, w);
143 gpr_mu_unlock(&w->chand->mu_config);
144
145 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
146 gpr_free(w);
147 }
148
149 static void watch_lb_policy(grpc_exec_ctx *exec_ctx, channel_data *chand,
150 grpc_lb_policy *lb_policy,
151 grpc_connectivity_state current_state) {
152 lb_policy_connectivity_watcher *w = gpr_malloc(sizeof(*w));
153 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
154
155 w->chand = chand;
156 grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
157 w->state = current_state;
158 w->lb_policy = lb_policy;
159 grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
160 &w->on_changed);
161 }
162
163 static void cc_on_config_changed(grpc_exec_ctx *exec_ctx, void *arg,
164 bool iomgr_success) {
165 channel_data *chand = arg;
166 grpc_lb_policy *lb_policy = NULL;
167 grpc_lb_policy *old_lb_policy;
168 grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
169 int exit_idle = 0;
170
171 if (chand->incoming_configuration != NULL) {
172 lb_policy = grpc_client_config_get_lb_policy(chand->incoming_configuration);
173 if (lb_policy != NULL) {
174 GRPC_LB_POLICY_REF(lb_policy, "channel");
175 GRPC_LB_POLICY_REF(lb_policy, "config_change");
176 state = grpc_lb_policy_check_connectivity(exec_ctx, lb_policy);
177 }
178
179 grpc_client_config_unref(exec_ctx, chand->incoming_configuration);
180 }
181
182 chand->incoming_configuration = NULL;
183
184 if (lb_policy != NULL) {
185 grpc_pollset_set_add_pollset_set(exec_ctx, lb_policy->interested_parties,
186 chand->interested_parties);
187 }
188
189 gpr_mu_lock(&chand->mu_config);
190 old_lb_policy = chand->lb_policy;
191 chand->lb_policy = lb_policy;
192 if (lb_policy != NULL || chand->resolver == NULL /* disconnected */) {
193 grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
194 NULL);
195 }
196 if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
197 GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
198 exit_idle = 1;
199 chand->exit_idle_when_lb_policy_arrives = 0;
200 }
201
202 if (iomgr_success && chand->resolver) {
203 grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state,
204 "new_lb+resolver");
205 if (lb_policy != NULL) {
206 watch_lb_policy(exec_ctx, chand, lb_policy, state);
207 }
208 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
209 grpc_resolver_next(exec_ctx, chand->resolver,
210 &chand->incoming_configuration,
211 &chand->on_config_changed);
212 gpr_mu_unlock(&chand->mu_config);
213 } else {
214 if (chand->resolver != NULL) {
215 grpc_resolver_shutdown(exec_ctx, chand->resolver);
216 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
217 chand->resolver = NULL;
218 }
219 grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
220 GRPC_CHANNEL_FATAL_FAILURE, "resolver_gone");
221 gpr_mu_unlock(&chand->mu_config);
222 }
223
224 if (exit_idle) {
225 grpc_lb_policy_exit_idle(exec_ctx, lb_policy);
226 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "exit_idle");
227 }
228
229 if (old_lb_policy != NULL) {
230 grpc_pollset_set_del_pollset_set(
231 exec_ctx, old_lb_policy->interested_parties, chand->interested_parties);
232 GRPC_LB_POLICY_UNREF(exec_ctx, old_lb_policy, "channel");
233 }
234
235 if (lb_policy != NULL) {
236 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "config_change");
237 }
238
239 GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
240 }
241
242 static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
243 grpc_channel_element *elem,
244 grpc_transport_op *op) {
245 channel_data *chand = elem->channel_data;
246
247 grpc_exec_ctx_enqueue(exec_ctx, op->on_consumed, true, NULL);
248
249 GPR_ASSERT(op->set_accept_stream == false);
250 if (op->bind_pollset != NULL) {
251 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
252 op->bind_pollset);
253 }
254
255 gpr_mu_lock(&chand->mu_config);
256 if (op->on_connectivity_state_change != NULL) {
257 grpc_connectivity_state_notify_on_state_change(
258 exec_ctx, &chand->state_tracker, op->connectivity_state,
259 op->on_connectivity_state_change);
260 op->on_connectivity_state_change = NULL;
261 op->connectivity_state = NULL;
262 }
263
264 if (op->send_ping != NULL) {
265 if (chand->lb_policy == NULL) {
266 grpc_exec_ctx_enqueue(exec_ctx, op->send_ping, false, NULL);
267 } else {
268 grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
269 op->bind_pollset = NULL;
270 }
271 op->send_ping = NULL;
272 }
273
274 if (op->disconnect && chand->resolver != NULL) {
275 grpc_connectivity_state_set(exec_ctx, &chand->state_tracker,
276 GRPC_CHANNEL_FATAL_FAILURE, "disconnect");
277 grpc_resolver_shutdown(exec_ctx, chand->resolver);
278 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
279 chand->resolver = NULL;
280 if (chand->lb_policy != NULL) {
281 grpc_pollset_set_del_pollset_set(exec_ctx,
282 chand->lb_policy->interested_parties,
283 chand->interested_parties);
284 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
285 chand->lb_policy = NULL;
286 }
287 }
288 gpr_mu_unlock(&chand->mu_config);
289 }
290
291 typedef struct {
292 grpc_metadata_batch *initial_metadata;
293 grpc_connected_subchannel **connected_subchannel;
294 grpc_closure *on_ready;
295 grpc_call_element *elem;
296 grpc_closure closure;
297 } continue_picking_args;
298
299 static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *arg,
300 grpc_metadata_batch *initial_metadata,
301 grpc_connected_subchannel **connected_subchannel,
302 grpc_closure *on_ready);
303
304 static void continue_picking(grpc_exec_ctx *exec_ctx, void *arg, bool success) {
305 continue_picking_args *cpa = arg;
306 if (!success) {
307 grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
308 } else if (cpa->connected_subchannel == NULL) {
309 /* cancelled, do nothing */
310 } else if (cc_pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
311 cpa->connected_subchannel, cpa->on_ready)) {
312 grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, true, NULL);
313 }
314 gpr_free(cpa);
315 }
316
317 static int cc_pick_subchannel(grpc_exec_ctx *exec_ctx, void *elemp,
318 grpc_metadata_batch *initial_metadata,
319 grpc_connected_subchannel **connected_subchannel,
320 grpc_closure *on_ready) {
321 grpc_call_element *elem = elemp;
322 channel_data *chand = elem->channel_data;
323 call_data *calld = elem->call_data;
324 continue_picking_args *cpa;
325 grpc_closure *closure;
326
327 GPR_ASSERT(connected_subchannel);
328
329 gpr_mu_lock(&chand->mu_config);
330 if (initial_metadata == NULL) {
331 if (chand->lb_policy != NULL) {
332 grpc_lb_policy_cancel_pick(exec_ctx, chand->lb_policy,
333 connected_subchannel);
334 }
335 for (closure = chand->waiting_for_config_closures.head; closure != NULL;
336 closure = grpc_closure_next(closure)) {
337 cpa = closure->cb_arg;
338 if (cpa->connected_subchannel == connected_subchannel) {
339 cpa->connected_subchannel = NULL;
340 grpc_exec_ctx_enqueue(exec_ctx, cpa->on_ready, false, NULL);
341 }
342 }
343 gpr_mu_unlock(&chand->mu_config);
344 return 1;
345 }
346 if (chand->lb_policy != NULL) {
347 grpc_lb_policy *lb_policy = chand->lb_policy;
348 int r;
349 GRPC_LB_POLICY_REF(lb_policy, "cc_pick_subchannel");
350 gpr_mu_unlock(&chand->mu_config);
351 r = grpc_lb_policy_pick(exec_ctx, lb_policy, calld->pollset,
352 initial_metadata, connected_subchannel, on_ready);
353 GRPC_LB_POLICY_UNREF(exec_ctx, lb_policy, "cc_pick_subchannel");
354 return r;
355 }
356 if (chand->resolver != NULL && !chand->started_resolving) {
357 chand->started_resolving = 1;
358 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
359 grpc_resolver_next(exec_ctx, chand->resolver,
360 &chand->incoming_configuration,
361 &chand->on_config_changed);
362 }
363 cpa = gpr_malloc(sizeof(*cpa));
364 cpa->initial_metadata = initial_metadata;
365 cpa->connected_subchannel = connected_subchannel;
366 cpa->on_ready = on_ready;
367 cpa->elem = elem;
368 grpc_closure_init(&cpa->closure, continue_picking, cpa);
369 grpc_closure_list_add(&chand->waiting_for_config_closures, &cpa->closure, 1);
370 gpr_mu_unlock(&chand->mu_config);
371 return 0;
372 }
373
374 /* Constructor for call_data */
375 static void init_call_elem(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
376 grpc_call_element_args *args) {
377 grpc_subchannel_call_holder_init(elem->call_data, cc_pick_subchannel, elem,
378 args->call_stack);
379 }
380
381 /* Destructor for call_data */
382 static void destroy_call_elem(grpc_exec_ctx *exec_ctx,
383 grpc_call_element *elem) {
384 grpc_subchannel_call_holder_destroy(exec_ctx, elem->call_data);
385 }
386
387 /* Constructor for channel_data */
388 static void init_channel_elem(grpc_exec_ctx *exec_ctx,
389 grpc_channel_element *elem,
390 grpc_channel_element_args *args) {
391 channel_data *chand = elem->channel_data;
392
393 memset(chand, 0, sizeof(*chand));
394
395 GPR_ASSERT(args->is_last);
396 GPR_ASSERT(elem->filter == &grpc_client_channel_filter);
397
398 gpr_mu_init(&chand->mu_config);
399 grpc_closure_init(&chand->on_config_changed, cc_on_config_changed, chand);
400 chand->owning_stack = args->channel_stack;
401
402 grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
403 "client_channel");
404 chand->interested_parties = grpc_pollset_set_create();
405 }
406
407 /* Destructor for channel_data */
408 static void destroy_channel_elem(grpc_exec_ctx *exec_ctx,
409 grpc_channel_element *elem) {
410 channel_data *chand = elem->channel_data;
411
412 if (chand->resolver != NULL) {
413 grpc_resolver_shutdown(exec_ctx, chand->resolver);
414 GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
415 }
416 if (chand->lb_policy != NULL) {
417 grpc_pollset_set_del_pollset_set(exec_ctx,
418 chand->lb_policy->interested_parties,
419 chand->interested_parties);
420 GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
421 }
422 grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
423 grpc_pollset_set_destroy(chand->interested_parties);
424 gpr_mu_destroy(&chand->mu_config);
425 }
426
427 static void cc_set_pollset(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
428 grpc_pollset *pollset) {
429 call_data *calld = elem->call_data;
430 calld->pollset = pollset;
431 }
432
433 const grpc_channel_filter grpc_client_channel_filter = {
434 cc_start_transport_stream_op, cc_start_transport_op, sizeof(call_data),
435 init_call_elem, cc_set_pollset, destroy_call_elem, sizeof(channel_data),
436 init_channel_elem, destroy_channel_elem, cc_get_peer, "client-channel",
437 };
438
439 void grpc_client_channel_set_resolver(grpc_exec_ctx *exec_ctx,
440 grpc_channel_stack *channel_stack,
441 grpc_resolver *resolver) {
442 /* post construction initialization: set the transport setup pointer */
443 grpc_channel_element *elem = grpc_channel_stack_last_element(channel_stack);
444 channel_data *chand = elem->channel_data;
445 gpr_mu_lock(&chand->mu_config);
446 GPR_ASSERT(!chand->resolver);
447 chand->resolver = resolver;
448 GRPC_RESOLVER_REF(resolver, "channel");
449 if (!grpc_closure_list_empty(chand->waiting_for_config_closures) ||
450 chand->exit_idle_when_lb_policy_arrives) {
451 chand->started_resolving = 1;
452 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
453 grpc_resolver_next(exec_ctx, resolver, &chand->incoming_configuration,
454 &chand->on_config_changed);
455 }
456 gpr_mu_unlock(&chand->mu_config);
457 }
458
459 grpc_connectivity_state grpc_client_channel_check_connectivity_state(
460 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
461 channel_data *chand = elem->channel_data;
462 grpc_connectivity_state out;
463 gpr_mu_lock(&chand->mu_config);
464 out = grpc_connectivity_state_check(&chand->state_tracker);
465 if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
466 if (chand->lb_policy != NULL) {
467 grpc_lb_policy_exit_idle(exec_ctx, chand->lb_policy);
468 } else {
469 chand->exit_idle_when_lb_policy_arrives = 1;
470 if (!chand->started_resolving && chand->resolver != NULL) {
471 GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
472 chand->started_resolving = 1;
473 grpc_resolver_next(exec_ctx, chand->resolver,
474 &chand->incoming_configuration,
475 &chand->on_config_changed);
476 }
477 }
478 }
479 gpr_mu_unlock(&chand->mu_config);
480 return out;
481 }
482
483 typedef struct {
484 channel_data *chand;
485 grpc_pollset *pollset;
486 grpc_closure *on_complete;
487 grpc_closure my_closure;
488 } external_connectivity_watcher;
489
490 static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
491 bool iomgr_success) {
492 external_connectivity_watcher *w = arg;
493 grpc_closure *follow_up = w->on_complete;
494 grpc_pollset_set_del_pollset(exec_ctx, w->chand->interested_parties,
495 w->pollset);
496 GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
497 "external_connectivity_watcher");
498 gpr_free(w);
499 follow_up->cb(exec_ctx, follow_up->cb_arg, iomgr_success);
500 }
501
502 void grpc_client_channel_watch_connectivity_state(
503 grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, grpc_pollset *pollset,
504 grpc_connectivity_state *state, grpc_closure *on_complete) {
505 channel_data *chand = elem->channel_data;
506 external_connectivity_watcher *w = gpr_malloc(sizeof(*w));
507 w->chand = chand;
508 w->pollset = pollset;
509 w->on_complete = on_complete;
510 grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
511 grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
512 GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
513 "external_connectivity_watcher");
514 gpr_mu_lock(&chand->mu_config);
515 grpc_connectivity_state_notify_on_state_change(
516 exec_ctx, &chand->state_tracker, state, &w->my_closure);
517 gpr_mu_unlock(&chand->mu_config);
518 }
OLDNEW
« no previous file with comments | « third_party/grpc/src/core/channel/client_channel.h ('k') | third_party/grpc/src/core/channel/client_uchannel.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698