Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(295)

Side by Side Diff: generated/googleapis/lib/vision/v1.dart

Issue 2734843002: Api-roll 46: 2017-03-06 (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // This is a generated file (see the discoveryapis_generator project). 1 // This is a generated file (see the discoveryapis_generator project).
2 2
3 library googleapis.vision.v1; 3 library googleapis.vision.v1;
4 4
5 import 'dart:core' as core; 5 import 'dart:core' as core;
6 import 'dart:async' as async; 6 import 'dart:async' as async;
7 import 'dart:convert' as convert; 7 import 'dart:convert' as convert;
8 8
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons;
10 import 'package:http/http.dart' as http; 10 import 'package:http/http.dart' as http;
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
118 } 118 }
119 if (imageContext != null) { 119 if (imageContext != null) {
120 _json["imageContext"] = (imageContext).toJson(); 120 _json["imageContext"] = (imageContext).toJson();
121 } 121 }
122 return _json; 122 return _json;
123 } 123 }
124 } 124 }
125 125
126 /** Response to an image annotation request. */ 126 /** Response to an image annotation request. */
127 class AnnotateImageResponse { 127 class AnnotateImageResponse {
128 /** If present, crop hints have completed successfully. */
129 CropHintsAnnotation cropHintsAnnotation;
128 /** 130 /**
129 * If set, represents the error message for the operation. 131 * If set, represents the error message for the operation.
130 * Note that filled-in image annotations are guaranteed to be 132 * Note that filled-in image annotations are guaranteed to be
131 * correct, even when `error` is set. 133 * correct, even when `error` is set.
132 */ 134 */
133 Status error; 135 Status error;
134 /** If present, face detection has completed successfully. */ 136 /** If present, face detection has completed successfully. */
135 core.List<FaceAnnotation> faceAnnotations; 137 core.List<FaceAnnotation> faceAnnotations;
138 /**
139 * If present, text (OCR) detection or document (OCR) text detection has
140 * completed successfully.
141 * This annotation provides the structural hierarchy for the OCR detected
142 * text.
143 */
144 TextAnnotation fullTextAnnotation;
136 /** If present, image properties were extracted successfully. */ 145 /** If present, image properties were extracted successfully. */
137 ImageProperties imagePropertiesAnnotation; 146 ImageProperties imagePropertiesAnnotation;
138 /** If present, label detection has completed successfully. */ 147 /** If present, label detection has completed successfully. */
139 core.List<EntityAnnotation> labelAnnotations; 148 core.List<EntityAnnotation> labelAnnotations;
140 /** If present, landmark detection has completed successfully. */ 149 /** If present, landmark detection has completed successfully. */
141 core.List<EntityAnnotation> landmarkAnnotations; 150 core.List<EntityAnnotation> landmarkAnnotations;
142 /** If present, logo detection has completed successfully. */ 151 /** If present, logo detection has completed successfully. */
143 core.List<EntityAnnotation> logoAnnotations; 152 core.List<EntityAnnotation> logoAnnotations;
144 /** If present, safe-search annotation has completed successfully. */ 153 /** If present, safe-search annotation has completed successfully. */
145 SafeSearchAnnotation safeSearchAnnotation; 154 SafeSearchAnnotation safeSearchAnnotation;
146 /** 155 /** If present, text (OCR) detection has completed successfully. */
147 * If present, text (OCR) detection or document (OCR) text detection has
148 * completed successfully.
149 */
150 core.List<EntityAnnotation> textAnnotations; 156 core.List<EntityAnnotation> textAnnotations;
157 /** If present, web detection has completed successfully. */
158 WebDetection webDetection;
151 159
152 AnnotateImageResponse(); 160 AnnotateImageResponse();
153 161
154 AnnotateImageResponse.fromJson(core.Map _json) { 162 AnnotateImageResponse.fromJson(core.Map _json) {
163 if (_json.containsKey("cropHintsAnnotation")) {
164 cropHintsAnnotation = new CropHintsAnnotation.fromJson(_json["cropHintsAnn otation"]);
165 }
155 if (_json.containsKey("error")) { 166 if (_json.containsKey("error")) {
156 error = new Status.fromJson(_json["error"]); 167 error = new Status.fromJson(_json["error"]);
157 } 168 }
158 if (_json.containsKey("faceAnnotations")) { 169 if (_json.containsKey("faceAnnotations")) {
159 faceAnnotations = _json["faceAnnotations"].map((value) => new FaceAnnotati on.fromJson(value)).toList(); 170 faceAnnotations = _json["faceAnnotations"].map((value) => new FaceAnnotati on.fromJson(value)).toList();
160 } 171 }
172 if (_json.containsKey("fullTextAnnotation")) {
173 fullTextAnnotation = new TextAnnotation.fromJson(_json["fullTextAnnotation "]);
174 }
161 if (_json.containsKey("imagePropertiesAnnotation")) { 175 if (_json.containsKey("imagePropertiesAnnotation")) {
162 imagePropertiesAnnotation = new ImageProperties.fromJson(_json["imagePrope rtiesAnnotation"]); 176 imagePropertiesAnnotation = new ImageProperties.fromJson(_json["imagePrope rtiesAnnotation"]);
163 } 177 }
164 if (_json.containsKey("labelAnnotations")) { 178 if (_json.containsKey("labelAnnotations")) {
165 labelAnnotations = _json["labelAnnotations"].map((value) => new EntityAnno tation.fromJson(value)).toList(); 179 labelAnnotations = _json["labelAnnotations"].map((value) => new EntityAnno tation.fromJson(value)).toList();
166 } 180 }
167 if (_json.containsKey("landmarkAnnotations")) { 181 if (_json.containsKey("landmarkAnnotations")) {
168 landmarkAnnotations = _json["landmarkAnnotations"].map((value) => new Enti tyAnnotation.fromJson(value)).toList(); 182 landmarkAnnotations = _json["landmarkAnnotations"].map((value) => new Enti tyAnnotation.fromJson(value)).toList();
169 } 183 }
170 if (_json.containsKey("logoAnnotations")) { 184 if (_json.containsKey("logoAnnotations")) {
171 logoAnnotations = _json["logoAnnotations"].map((value) => new EntityAnnota tion.fromJson(value)).toList(); 185 logoAnnotations = _json["logoAnnotations"].map((value) => new EntityAnnota tion.fromJson(value)).toList();
172 } 186 }
173 if (_json.containsKey("safeSearchAnnotation")) { 187 if (_json.containsKey("safeSearchAnnotation")) {
174 safeSearchAnnotation = new SafeSearchAnnotation.fromJson(_json["safeSearch Annotation"]); 188 safeSearchAnnotation = new SafeSearchAnnotation.fromJson(_json["safeSearch Annotation"]);
175 } 189 }
176 if (_json.containsKey("textAnnotations")) { 190 if (_json.containsKey("textAnnotations")) {
177 textAnnotations = _json["textAnnotations"].map((value) => new EntityAnnota tion.fromJson(value)).toList(); 191 textAnnotations = _json["textAnnotations"].map((value) => new EntityAnnota tion.fromJson(value)).toList();
178 } 192 }
193 if (_json.containsKey("webDetection")) {
194 webDetection = new WebDetection.fromJson(_json["webDetection"]);
195 }
179 } 196 }
180 197
181 core.Map toJson() { 198 core.Map toJson() {
182 var _json = new core.Map(); 199 var _json = new core.Map();
200 if (cropHintsAnnotation != null) {
201 _json["cropHintsAnnotation"] = (cropHintsAnnotation).toJson();
202 }
183 if (error != null) { 203 if (error != null) {
184 _json["error"] = (error).toJson(); 204 _json["error"] = (error).toJson();
185 } 205 }
186 if (faceAnnotations != null) { 206 if (faceAnnotations != null) {
187 _json["faceAnnotations"] = faceAnnotations.map((value) => (value).toJson() ).toList(); 207 _json["faceAnnotations"] = faceAnnotations.map((value) => (value).toJson() ).toList();
188 } 208 }
209 if (fullTextAnnotation != null) {
210 _json["fullTextAnnotation"] = (fullTextAnnotation).toJson();
211 }
189 if (imagePropertiesAnnotation != null) { 212 if (imagePropertiesAnnotation != null) {
190 _json["imagePropertiesAnnotation"] = (imagePropertiesAnnotation).toJson(); 213 _json["imagePropertiesAnnotation"] = (imagePropertiesAnnotation).toJson();
191 } 214 }
192 if (labelAnnotations != null) { 215 if (labelAnnotations != null) {
193 _json["labelAnnotations"] = labelAnnotations.map((value) => (value).toJson ()).toList(); 216 _json["labelAnnotations"] = labelAnnotations.map((value) => (value).toJson ()).toList();
194 } 217 }
195 if (landmarkAnnotations != null) { 218 if (landmarkAnnotations != null) {
196 _json["landmarkAnnotations"] = landmarkAnnotations.map((value) => (value). toJson()).toList(); 219 _json["landmarkAnnotations"] = landmarkAnnotations.map((value) => (value). toJson()).toList();
197 } 220 }
198 if (logoAnnotations != null) { 221 if (logoAnnotations != null) {
199 _json["logoAnnotations"] = logoAnnotations.map((value) => (value).toJson() ).toList(); 222 _json["logoAnnotations"] = logoAnnotations.map((value) => (value).toJson() ).toList();
200 } 223 }
201 if (safeSearchAnnotation != null) { 224 if (safeSearchAnnotation != null) {
202 _json["safeSearchAnnotation"] = (safeSearchAnnotation).toJson(); 225 _json["safeSearchAnnotation"] = (safeSearchAnnotation).toJson();
203 } 226 }
204 if (textAnnotations != null) { 227 if (textAnnotations != null) {
205 _json["textAnnotations"] = textAnnotations.map((value) => (value).toJson() ).toList(); 228 _json["textAnnotations"] = textAnnotations.map((value) => (value).toJson() ).toList();
206 } 229 }
230 if (webDetection != null) {
231 _json["webDetection"] = (webDetection).toJson();
232 }
207 return _json; 233 return _json;
208 } 234 }
209 } 235 }
210 236
211 /** 237 /**
212 * Multiple image annotation requests are batched into a single service call. 238 * Multiple image annotation requests are batched into a single service call.
213 */ 239 */
214 class BatchAnnotateImagesRequest { 240 class BatchAnnotateImagesRequest {
215 /** Individual image annotation requests for this batch. */ 241 /** Individual image annotation requests for this batch. */
216 core.List<AnnotateImageRequest> requests; 242 core.List<AnnotateImageRequest> requests;
(...skipping 30 matching lines...) Expand all
247 273
248 core.Map toJson() { 274 core.Map toJson() {
249 var _json = new core.Map(); 275 var _json = new core.Map();
250 if (responses != null) { 276 if (responses != null) {
251 _json["responses"] = responses.map((value) => (value).toJson()).toList(); 277 _json["responses"] = responses.map((value) => (value).toJson()).toList();
252 } 278 }
253 return _json; 279 return _json;
254 } 280 }
255 } 281 }
256 282
283 /** Logical element on the page. */
284 class Block {
285 /**
286 * Detected block type (text, image etc) for this block.
287 * Possible string values are:
288 * - "UNKNOWN" : Unknown block type.
289 * - "TEXT" : Regular text block.
290 * - "TABLE" : Table block.
291 * - "PICTURE" : Image block.
292 * - "RULER" : Horizontal/vertical line box.
293 * - "BARCODE" : Barcode block.
294 */
295 core.String blockType;
296 /**
297 * The bounding box for the block.
298 * The vertices are in the order of top-left, top-right, bottom-right,
299 * bottom-left. When a rotation of the bounding box is detected the rotation
300 * is represented as around the top-left corner as defined when the text is
301 * read in the 'natural' orientation.
302 * For example:
303 * * when the text is horizontal it might look like:
304 * 0----1
305 * | |
306 * 3----2
307 * * when it's rotated 180 degrees around the top-left corner it becomes:
308 * 2----3
309 * | |
310 * 1----0
311 * and the vertice order will still be (0, 1, 2, 3).
312 */
313 BoundingPoly boundingBox;
314 /** List of paragraphs in this block (if this blocks is of type text). */
315 core.List<Paragraph> paragraphs;
316 /** Additional information detected for the block. */
317 TextProperty property;
318
319 Block();
320
321 Block.fromJson(core.Map _json) {
322 if (_json.containsKey("blockType")) {
323 blockType = _json["blockType"];
324 }
325 if (_json.containsKey("boundingBox")) {
326 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]);
327 }
328 if (_json.containsKey("paragraphs")) {
329 paragraphs = _json["paragraphs"].map((value) => new Paragraph.fromJson(val ue)).toList();
330 }
331 if (_json.containsKey("property")) {
332 property = new TextProperty.fromJson(_json["property"]);
333 }
334 }
335
336 core.Map toJson() {
337 var _json = new core.Map();
338 if (blockType != null) {
339 _json["blockType"] = blockType;
340 }
341 if (boundingBox != null) {
342 _json["boundingBox"] = (boundingBox).toJson();
343 }
344 if (paragraphs != null) {
345 _json["paragraphs"] = paragraphs.map((value) => (value).toJson()).toList() ;
346 }
347 if (property != null) {
348 _json["property"] = (property).toJson();
349 }
350 return _json;
351 }
352 }
353
257 /** A bounding polygon for the detected image annotation. */ 354 /** A bounding polygon for the detected image annotation. */
258 class BoundingPoly { 355 class BoundingPoly {
259 /** The bounding polygon vertices. */ 356 /** The bounding polygon vertices. */
260 core.List<Vertex> vertices; 357 core.List<Vertex> vertices;
261 358
262 BoundingPoly(); 359 BoundingPoly();
263 360
264 BoundingPoly.fromJson(core.Map _json) { 361 BoundingPoly.fromJson(core.Map _json) {
265 if (_json.containsKey("vertices")) { 362 if (_json.containsKey("vertices")) {
266 vertices = _json["vertices"].map((value) => new Vertex.fromJson(value)).to List(); 363 vertices = _json["vertices"].map((value) => new Vertex.fromJson(value)).to List();
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after
486 if (pixelFraction != null) { 583 if (pixelFraction != null) {
487 _json["pixelFraction"] = pixelFraction; 584 _json["pixelFraction"] = pixelFraction;
488 } 585 }
489 if (score != null) { 586 if (score != null) {
490 _json["score"] = score; 587 _json["score"] = score;
491 } 588 }
492 return _json; 589 return _json;
493 } 590 }
494 } 591 }
495 592
593 /**
594 * Single crop hint that is used to generate a new crop when serving an image.
595 */
596 class CropHint {
597 /**
598 * The bounding polygon for the crop region. The coordinates of the bounding
599 * box are in the original image's scale, as returned in `ImageParams`.
600 */
601 BoundingPoly boundingPoly;
602 /** Confidence of this being a salient region. Range [0, 1]. */
603 core.double confidence;
604 /**
605 * Fraction of importance of this salient region with respect to the original
606 * image.
607 */
608 core.double importanceFraction;
609
610 CropHint();
611
612 CropHint.fromJson(core.Map _json) {
613 if (_json.containsKey("boundingPoly")) {
614 boundingPoly = new BoundingPoly.fromJson(_json["boundingPoly"]);
615 }
616 if (_json.containsKey("confidence")) {
617 confidence = _json["confidence"];
618 }
619 if (_json.containsKey("importanceFraction")) {
620 importanceFraction = _json["importanceFraction"];
621 }
622 }
623
624 core.Map toJson() {
625 var _json = new core.Map();
626 if (boundingPoly != null) {
627 _json["boundingPoly"] = (boundingPoly).toJson();
628 }
629 if (confidence != null) {
630 _json["confidence"] = confidence;
631 }
632 if (importanceFraction != null) {
633 _json["importanceFraction"] = importanceFraction;
634 }
635 return _json;
636 }
637 }
638
639 /**
640 * Set of crop hints that are used to generate new crops when serving images.
641 */
642 class CropHintsAnnotation {
643 /** Crop hint results. */
644 core.List<CropHint> cropHints;
645
646 CropHintsAnnotation();
647
648 CropHintsAnnotation.fromJson(core.Map _json) {
649 if (_json.containsKey("cropHints")) {
650 cropHints = _json["cropHints"].map((value) => new CropHint.fromJson(value) ).toList();
651 }
652 }
653
654 core.Map toJson() {
655 var _json = new core.Map();
656 if (cropHints != null) {
657 _json["cropHints"] = cropHints.map((value) => (value).toJson()).toList();
658 }
659 return _json;
660 }
661 }
662
663 /** Parameters for crop hints annotation request. */
664 class CropHintsParams {
665 /**
666 * Aspect ratios in floats, representing the ratio of the width to the height
667 * of the image. For example, if the desired aspect ratio is 4/3, the
668 * corresponding float value should be 1.33333. If not specified, the
669 * best possible crop is returned. The number of provided aspect ratios is
670 * limited to a maximum of 16; any aspect ratios provided after the 16th are
671 * ignored.
672 */
673 core.List<core.double> aspectRatios;
674
675 CropHintsParams();
676
677 CropHintsParams.fromJson(core.Map _json) {
678 if (_json.containsKey("aspectRatios")) {
679 aspectRatios = _json["aspectRatios"];
680 }
681 }
682
683 core.Map toJson() {
684 var _json = new core.Map();
685 if (aspectRatios != null) {
686 _json["aspectRatios"] = aspectRatios;
687 }
688 return _json;
689 }
690 }
691
692 /** Detected start or end of a structural component. */
693 class DetectedBreak {
694 /** True if break prepends the element. */
695 core.bool isPrefix;
696 /**
697 * Detected break type.
698 * Possible string values are:
699 * - "UNKNOWN" : Unknown break label type.
700 * - "SPACE" : Regular space.
701 * - "SURE_SPACE" : Sure space (very wide).
702 * - "EOL_SURE_SPACE" : Line-wrapping break.
703 * - "HYPHEN" : End-line hyphen that is not present in text; does
704 * - "LINE_BREAK" : not co-occur with SPACE, LEADER_SPACE, or
705 * LINE_BREAK.
706 * Line break that ends a paragraph.
707 */
708 core.String type;
709
710 DetectedBreak();
711
712 DetectedBreak.fromJson(core.Map _json) {
713 if (_json.containsKey("isPrefix")) {
714 isPrefix = _json["isPrefix"];
715 }
716 if (_json.containsKey("type")) {
717 type = _json["type"];
718 }
719 }
720
721 core.Map toJson() {
722 var _json = new core.Map();
723 if (isPrefix != null) {
724 _json["isPrefix"] = isPrefix;
725 }
726 if (type != null) {
727 _json["type"] = type;
728 }
729 return _json;
730 }
731 }
732
733 /** Detected language for a structural component. */
734 class DetectedLanguage {
735 /** Confidence of detected language. Range [0, 1]. */
736 core.double confidence;
737 /**
738 * The BCP-47 language code, such as "en-US" or "sr-Latn". For more
739 * information, see
740 * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier.
741 */
742 core.String languageCode;
743
744 DetectedLanguage();
745
746 DetectedLanguage.fromJson(core.Map _json) {
747 if (_json.containsKey("confidence")) {
748 confidence = _json["confidence"];
749 }
750 if (_json.containsKey("languageCode")) {
751 languageCode = _json["languageCode"];
752 }
753 }
754
755 core.Map toJson() {
756 var _json = new core.Map();
757 if (confidence != null) {
758 _json["confidence"] = confidence;
759 }
760 if (languageCode != null) {
761 _json["languageCode"] = languageCode;
762 }
763 return _json;
764 }
765 }
766
496 /** Set of dominant colors and their corresponding scores. */ 767 /** Set of dominant colors and their corresponding scores. */
497 class DominantColorsAnnotation { 768 class DominantColorsAnnotation {
498 /** RGB color values with their score and pixel fraction. */ 769 /** RGB color values with their score and pixel fraction. */
499 core.List<ColorInfo> colors; 770 core.List<ColorInfo> colors;
500 771
501 DominantColorsAnnotation(); 772 DominantColorsAnnotation();
502 773
503 DominantColorsAnnotation.fromJson(core.Map _json) { 774 DominantColorsAnnotation.fromJson(core.Map _json) {
504 if (_json.containsKey("colors")) { 775 if (_json.containsKey("colors")) {
505 colors = _json["colors"].map((value) => new ColorInfo.fromJson(value)).toL ist(); 776 colors = _json["colors"].map((value) => new ColorInfo.fromJson(value)).toL ist();
(...skipping 389 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 core.int maxResults; 1166 core.int maxResults;
896 /** 1167 /**
897 * The feature type. 1168 * The feature type.
898 * Possible string values are: 1169 * Possible string values are:
899 * - "TYPE_UNSPECIFIED" : Unspecified feature type. 1170 * - "TYPE_UNSPECIFIED" : Unspecified feature type.
900 * - "FACE_DETECTION" : Run face detection. 1171 * - "FACE_DETECTION" : Run face detection.
901 * - "LANDMARK_DETECTION" : Run landmark detection. 1172 * - "LANDMARK_DETECTION" : Run landmark detection.
902 * - "LOGO_DETECTION" : Run logo detection. 1173 * - "LOGO_DETECTION" : Run logo detection.
903 * - "LABEL_DETECTION" : Run label detection. 1174 * - "LABEL_DETECTION" : Run label detection.
904 * - "TEXT_DETECTION" : Run OCR. 1175 * - "TEXT_DETECTION" : Run OCR.
1176 * - "DOCUMENT_TEXT_DETECTION" : Run dense text document OCR. Takes precedence
1177 * when both
1178 * DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present.
905 * - "SAFE_SEARCH_DETECTION" : Run computer vision models to compute image 1179 * - "SAFE_SEARCH_DETECTION" : Run computer vision models to compute image
906 * safe-search properties. 1180 * safe-search properties.
907 * - "IMAGE_PROPERTIES" : Compute a set of image properties, such as the 1181 * - "IMAGE_PROPERTIES" : Compute a set of image properties, such as the
908 * image's dominant colors. 1182 * image's dominant colors.
1183 * - "CROP_HINTS" : Run crop hints.
1184 * - "WEB_DETECTION" : Run web detection.
909 */ 1185 */
910 core.String type; 1186 core.String type;
911 1187
912 Feature(); 1188 Feature();
913 1189
914 Feature.fromJson(core.Map _json) { 1190 Feature.fromJson(core.Map _json) {
915 if (_json.containsKey("maxResults")) { 1191 if (_json.containsKey("maxResults")) {
916 maxResults = _json["maxResults"]; 1192 maxResults = _json["maxResults"];
917 } 1193 }
918 if (_json.containsKey("type")) { 1194 if (_json.containsKey("type")) {
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
972 } 1248 }
973 if (source != null) { 1249 if (source != null) {
974 _json["source"] = (source).toJson(); 1250 _json["source"] = (source).toJson();
975 } 1251 }
976 return _json; 1252 return _json;
977 } 1253 }
978 } 1254 }
979 1255
980 /** Image context and/or feature-specific parameters. */ 1256 /** Image context and/or feature-specific parameters. */
981 class ImageContext { 1257 class ImageContext {
1258 /** Parameters for crop hints annotation request. */
1259 CropHintsParams cropHintsParams;
982 /** 1260 /**
983 * List of languages to use for TEXT_DETECTION. In most cases, an empty value 1261 * List of languages to use for TEXT_DETECTION. In most cases, an empty value
984 * yields the best results since it enables automatic language detection. For 1262 * yields the best results since it enables automatic language detection. For
985 * languages based on the Latin alphabet, setting `language_hints` is not 1263 * languages based on the Latin alphabet, setting `language_hints` is not
986 * needed. In rare cases, when the language of the text in the image is known, 1264 * needed. In rare cases, when the language of the text in the image is known,
987 * setting a hint will help get better results (although it will be a 1265 * setting a hint will help get better results (although it will be a
988 * significant hindrance if the hint is wrong). Text detection returns an 1266 * significant hindrance if the hint is wrong). Text detection returns an
989 * error if one or more of the specified languages is not one of the 1267 * error if one or more of the specified languages is not one of the
990 * [supported languages](/vision/docs/languages). 1268 * [supported languages](/vision/docs/languages).
991 */ 1269 */
992 core.List<core.String> languageHints; 1270 core.List<core.String> languageHints;
993 /** lat/long rectangle that specifies the location of the image. */ 1271 /** lat/long rectangle that specifies the location of the image. */
994 LatLongRect latLongRect; 1272 LatLongRect latLongRect;
995 1273
996 ImageContext(); 1274 ImageContext();
997 1275
998 ImageContext.fromJson(core.Map _json) { 1276 ImageContext.fromJson(core.Map _json) {
1277 if (_json.containsKey("cropHintsParams")) {
1278 cropHintsParams = new CropHintsParams.fromJson(_json["cropHintsParams"]);
1279 }
999 if (_json.containsKey("languageHints")) { 1280 if (_json.containsKey("languageHints")) {
1000 languageHints = _json["languageHints"]; 1281 languageHints = _json["languageHints"];
1001 } 1282 }
1002 if (_json.containsKey("latLongRect")) { 1283 if (_json.containsKey("latLongRect")) {
1003 latLongRect = new LatLongRect.fromJson(_json["latLongRect"]); 1284 latLongRect = new LatLongRect.fromJson(_json["latLongRect"]);
1004 } 1285 }
1005 } 1286 }
1006 1287
1007 core.Map toJson() { 1288 core.Map toJson() {
1008 var _json = new core.Map(); 1289 var _json = new core.Map();
1290 if (cropHintsParams != null) {
1291 _json["cropHintsParams"] = (cropHintsParams).toJson();
1292 }
1009 if (languageHints != null) { 1293 if (languageHints != null) {
1010 _json["languageHints"] = languageHints; 1294 _json["languageHints"] = languageHints;
1011 } 1295 }
1012 if (latLongRect != null) { 1296 if (latLongRect != null) {
1013 _json["latLongRect"] = (latLongRect).toJson(); 1297 _json["latLongRect"] = (latLongRect).toJson();
1014 } 1298 }
1015 return _json; 1299 return _json;
1016 } 1300 }
1017 } 1301 }
1018 1302
(...skipping 23 matching lines...) Expand all
1042 class ImageSource { 1326 class ImageSource {
1043 /** 1327 /**
1044 * NOTE: For new code `image_uri` below is preferred. 1328 * NOTE: For new code `image_uri` below is preferred.
1045 * Google Cloud Storage image URI, which must be in the following form: 1329 * Google Cloud Storage image URI, which must be in the following form:
1046 * `gs://bucket_name/object_name` (for details, see 1330 * `gs://bucket_name/object_name` (for details, see
1047 * [Google Cloud Storage Request 1331 * [Google Cloud Storage Request
1048 * URIs](https://cloud.google.com/storage/docs/reference-uris)). 1332 * URIs](https://cloud.google.com/storage/docs/reference-uris)).
1049 * NOTE: Cloud Storage object versioning is not supported. 1333 * NOTE: Cloud Storage object versioning is not supported.
1050 */ 1334 */
1051 core.String gcsImageUri; 1335 core.String gcsImageUri;
1336 /**
1337 * Image URI which supports:
1338 * 1) Google Cloud Storage image URI, which must be in the following form:
1339 * `gs://bucket_name/object_name` (for details, see
1340 * [Google Cloud Storage Request
1341 * URIs](https://cloud.google.com/storage/docs/reference-uris)).
1342 * NOTE: Cloud Storage object versioning is not supported.
1343 * 2) Publicly accessible image HTTP/HTTPS URL.
1344 * This is preferred over the legacy `gcs_image_uri` above. When both
1345 * `gcs_image_uri` and `image_uri` are specified, `image_uri` takes
1346 * precedence.
1347 */
1348 core.String imageUri;
1052 1349
1053 ImageSource(); 1350 ImageSource();
1054 1351
1055 ImageSource.fromJson(core.Map _json) { 1352 ImageSource.fromJson(core.Map _json) {
1056 if (_json.containsKey("gcsImageUri")) { 1353 if (_json.containsKey("gcsImageUri")) {
1057 gcsImageUri = _json["gcsImageUri"]; 1354 gcsImageUri = _json["gcsImageUri"];
1058 } 1355 }
1356 if (_json.containsKey("imageUri")) {
1357 imageUri = _json["imageUri"];
1358 }
1059 } 1359 }
1060 1360
1061 core.Map toJson() { 1361 core.Map toJson() {
1062 var _json = new core.Map(); 1362 var _json = new core.Map();
1063 if (gcsImageUri != null) { 1363 if (gcsImageUri != null) {
1064 _json["gcsImageUri"] = gcsImageUri; 1364 _json["gcsImageUri"] = gcsImageUri;
1065 } 1365 }
1366 if (imageUri != null) {
1367 _json["imageUri"] = imageUri;
1368 }
1066 return _json; 1369 return _json;
1067 } 1370 }
1068 } 1371 }
1069 1372
1070 /** 1373 /**
1071 * A face-specific landmark (for example, a face feature). 1374 * A face-specific landmark (for example, a face feature).
1072 * Landmark positions may fall outside the bounds of the image 1375 * Landmark positions may fall outside the bounds of the image
1073 * if the face is near one or more edges of the image. 1376 * if the face is near one or more edges of the image.
1074 * Therefore it is NOT guaranteed that `0 <= x < width` or 1377 * Therefore it is NOT guaranteed that `0 <= x < width` or
1075 * `0 <= y < height`. 1378 * `0 <= y < height`.
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
1260 1563
1261 core.Map toJson() { 1564 core.Map toJson() {
1262 var _json = new core.Map(); 1565 var _json = new core.Map();
1263 if (latLng != null) { 1566 if (latLng != null) {
1264 _json["latLng"] = (latLng).toJson(); 1567 _json["latLng"] = (latLng).toJson();
1265 } 1568 }
1266 return _json; 1569 return _json;
1267 } 1570 }
1268 } 1571 }
1269 1572
1573 /** Detected page from OCR. */
1574 class Page {
1575 /** List of blocks of text, images etc on this page. */
1576 core.List<Block> blocks;
1577 /** Page height in pixels. */
1578 core.int height;
1579 /** Additional information detected on the page. */
1580 TextProperty property;
1581 /** Page width in pixels. */
1582 core.int width;
1583
1584 Page();
1585
1586 Page.fromJson(core.Map _json) {
1587 if (_json.containsKey("blocks")) {
1588 blocks = _json["blocks"].map((value) => new Block.fromJson(value)).toList( );
1589 }
1590 if (_json.containsKey("height")) {
1591 height = _json["height"];
1592 }
1593 if (_json.containsKey("property")) {
1594 property = new TextProperty.fromJson(_json["property"]);
1595 }
1596 if (_json.containsKey("width")) {
1597 width = _json["width"];
1598 }
1599 }
1600
1601 core.Map toJson() {
1602 var _json = new core.Map();
1603 if (blocks != null) {
1604 _json["blocks"] = blocks.map((value) => (value).toJson()).toList();
1605 }
1606 if (height != null) {
1607 _json["height"] = height;
1608 }
1609 if (property != null) {
1610 _json["property"] = (property).toJson();
1611 }
1612 if (width != null) {
1613 _json["width"] = width;
1614 }
1615 return _json;
1616 }
1617 }
1618
1619 /** Structural unit of text representing a number of words in certain order. */
1620 class Paragraph {
1621 /**
1622 * The bounding box for the paragraph.
1623 * The vertices are in the order of top-left, top-right, bottom-right,
1624 * bottom-left. When a rotation of the bounding box is detected the rotation
1625 * is represented as around the top-left corner as defined when the text is
1626 * read in the 'natural' orientation.
1627 * For example:
1628 * * when the text is horizontal it might look like:
1629 * 0----1
1630 * | |
1631 * 3----2
1632 * * when it's rotated 180 degrees around the top-left corner it becomes:
1633 * 2----3
1634 * | |
1635 * 1----0
1636 * and the vertice order will still be (0, 1, 2, 3).
1637 */
1638 BoundingPoly boundingBox;
1639 /** Additional information detected for the paragraph. */
1640 TextProperty property;
1641 /** List of words in this paragraph. */
1642 core.List<Word> words;
1643
1644 Paragraph();
1645
1646 Paragraph.fromJson(core.Map _json) {
1647 if (_json.containsKey("boundingBox")) {
1648 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]);
1649 }
1650 if (_json.containsKey("property")) {
1651 property = new TextProperty.fromJson(_json["property"]);
1652 }
1653 if (_json.containsKey("words")) {
1654 words = _json["words"].map((value) => new Word.fromJson(value)).toList();
1655 }
1656 }
1657
1658 core.Map toJson() {
1659 var _json = new core.Map();
1660 if (boundingBox != null) {
1661 _json["boundingBox"] = (boundingBox).toJson();
1662 }
1663 if (property != null) {
1664 _json["property"] = (property).toJson();
1665 }
1666 if (words != null) {
1667 _json["words"] = words.map((value) => (value).toJson()).toList();
1668 }
1669 return _json;
1670 }
1671 }
1672
1270 /** 1673 /**
1271 * A 3D position in the image, used primarily for Face detection landmarks. 1674 * A 3D position in the image, used primarily for Face detection landmarks.
1272 * A valid Position must have both x and y coordinates. 1675 * A valid Position must have both x and y coordinates.
1273 * The position coordinates are in the same scale as the original image. 1676 * The position coordinates are in the same scale as the original image.
1274 */ 1677 */
1275 class Position { 1678 class Position {
1276 /** X coordinate. */ 1679 /** X coordinate. */
1277 core.double x; 1680 core.double x;
1278 /** Y coordinate. */ 1681 /** Y coordinate. */
1279 core.double y; 1682 core.double y;
(...skipping 26 matching lines...) Expand all
1306 _json["z"] = z; 1709 _json["z"] = z;
1307 } 1710 }
1308 return _json; 1711 return _json;
1309 } 1712 }
1310 } 1713 }
1311 1714
1312 /** A `Property` consists of a user-supplied name/value pair. */ 1715 /** A `Property` consists of a user-supplied name/value pair. */
1313 class Property { 1716 class Property {
1314 /** Name of the property. */ 1717 /** Name of the property. */
1315 core.String name; 1718 core.String name;
1719 /** Value of numeric properties. */
1720 core.String uint64Value;
1316 /** Value of the property. */ 1721 /** Value of the property. */
1317 core.String value; 1722 core.String value;
1318 1723
1319 Property(); 1724 Property();
1320 1725
1321 Property.fromJson(core.Map _json) { 1726 Property.fromJson(core.Map _json) {
1322 if (_json.containsKey("name")) { 1727 if (_json.containsKey("name")) {
1323 name = _json["name"]; 1728 name = _json["name"];
1324 } 1729 }
1730 if (_json.containsKey("uint64Value")) {
1731 uint64Value = _json["uint64Value"];
1732 }
1325 if (_json.containsKey("value")) { 1733 if (_json.containsKey("value")) {
1326 value = _json["value"]; 1734 value = _json["value"];
1327 } 1735 }
1328 } 1736 }
1329 1737
1330 core.Map toJson() { 1738 core.Map toJson() {
1331 var _json = new core.Map(); 1739 var _json = new core.Map();
1332 if (name != null) { 1740 if (name != null) {
1333 _json["name"] = name; 1741 _json["name"] = name;
1334 } 1742 }
1743 if (uint64Value != null) {
1744 _json["uint64Value"] = uint64Value;
1745 }
1335 if (value != null) { 1746 if (value != null) {
1336 _json["value"] = value; 1747 _json["value"] = value;
1337 } 1748 }
1338 return _json; 1749 return _json;
1339 } 1750 }
1340 } 1751 }
1341 1752
1342 /** 1753 /**
1343 * Set of features pertaining to the image, computed by computer vision 1754 * Set of features pertaining to the image, computed by computer vision
1344 * methods over safe-search verticals (for example, adult, spoof, medical, 1755 * methods over safe-search verticals (for example, adult, spoof, medical,
(...skipping 194 matching lines...) Expand 10 before | Expand all | Expand 10 after
1539 if (details != null) { 1950 if (details != null) {
1540 _json["details"] = details; 1951 _json["details"] = details;
1541 } 1952 }
1542 if (message != null) { 1953 if (message != null) {
1543 _json["message"] = message; 1954 _json["message"] = message;
1544 } 1955 }
1545 return _json; 1956 return _json;
1546 } 1957 }
1547 } 1958 }
1548 1959
1960 /** A single symbol representation. */
1961 class Symbol {
1962 /**
1963 * The bounding box for the symbol.
1964 * The vertices are in the order of top-left, top-right, bottom-right,
1965 * bottom-left. When a rotation of the bounding box is detected the rotation
1966 * is represented as around the top-left corner as defined when the text is
1967 * read in the 'natural' orientation.
1968 * For example:
1969 * * when the text is horizontal it might look like:
1970 * 0----1
1971 * | |
1972 * 3----2
1973 * * when it's rotated 180 degrees around the top-left corner it becomes:
1974 * 2----3
1975 * | |
1976 * 1----0
1977 * and the vertice order will still be (0, 1, 2, 3).
1978 */
1979 BoundingPoly boundingBox;
1980 /** Additional information detected for the symbol. */
1981 TextProperty property;
1982 /** The actual UTF-8 representation of the symbol. */
1983 core.String text;
1984
1985 Symbol();
1986
1987 Symbol.fromJson(core.Map _json) {
1988 if (_json.containsKey("boundingBox")) {
1989 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]);
1990 }
1991 if (_json.containsKey("property")) {
1992 property = new TextProperty.fromJson(_json["property"]);
1993 }
1994 if (_json.containsKey("text")) {
1995 text = _json["text"];
1996 }
1997 }
1998
1999 core.Map toJson() {
2000 var _json = new core.Map();
2001 if (boundingBox != null) {
2002 _json["boundingBox"] = (boundingBox).toJson();
2003 }
2004 if (property != null) {
2005 _json["property"] = (property).toJson();
2006 }
2007 if (text != null) {
2008 _json["text"] = text;
2009 }
2010 return _json;
2011 }
2012 }
2013
2014 /**
2015 * TextAnnotation contains a structured representation of OCR extracted text.
2016 * The hierarchy of an OCR extracted text structure is like this:
2017 * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol
2018 * Each structural component, starting from Page, may further have their own
2019 * properties. Properties describe detected languages, breaks etc.. Please
2020 * refer to the google.cloud.vision.v1.TextAnnotation.TextProperty message
2021 * definition below for more detail.
2022 */
2023 class TextAnnotation {
2024 /** List of pages detected by OCR. */
2025 core.List<Page> pages;
2026 /** UTF-8 text detected on the pages. */
2027 core.String text;
2028
2029 TextAnnotation();
2030
2031 TextAnnotation.fromJson(core.Map _json) {
2032 if (_json.containsKey("pages")) {
2033 pages = _json["pages"].map((value) => new Page.fromJson(value)).toList();
2034 }
2035 if (_json.containsKey("text")) {
2036 text = _json["text"];
2037 }
2038 }
2039
2040 core.Map toJson() {
2041 var _json = new core.Map();
2042 if (pages != null) {
2043 _json["pages"] = pages.map((value) => (value).toJson()).toList();
2044 }
2045 if (text != null) {
2046 _json["text"] = text;
2047 }
2048 return _json;
2049 }
2050 }
2051
2052 /** Additional information detected on the structural component. */
2053 class TextProperty {
2054 /** Detected start or end of a text segment. */
2055 DetectedBreak detectedBreak;
2056 /** A list of detected languages together with confidence. */
2057 core.List<DetectedLanguage> detectedLanguages;
2058
2059 TextProperty();
2060
2061 TextProperty.fromJson(core.Map _json) {
2062 if (_json.containsKey("detectedBreak")) {
2063 detectedBreak = new DetectedBreak.fromJson(_json["detectedBreak"]);
2064 }
2065 if (_json.containsKey("detectedLanguages")) {
2066 detectedLanguages = _json["detectedLanguages"].map((value) => new Detected Language.fromJson(value)).toList();
2067 }
2068 }
2069
2070 core.Map toJson() {
2071 var _json = new core.Map();
2072 if (detectedBreak != null) {
2073 _json["detectedBreak"] = (detectedBreak).toJson();
2074 }
2075 if (detectedLanguages != null) {
2076 _json["detectedLanguages"] = detectedLanguages.map((value) => (value).toJs on()).toList();
2077 }
2078 return _json;
2079 }
2080 }
2081
1549 /** 2082 /**
1550 * A vertex represents a 2D point in the image. 2083 * A vertex represents a 2D point in the image.
1551 * NOTE: the vertex coordinates are in the same scale as the original image. 2084 * NOTE: the vertex coordinates are in the same scale as the original image.
1552 */ 2085 */
1553 class Vertex { 2086 class Vertex {
1554 /** X coordinate. */ 2087 /** X coordinate. */
1555 core.int x; 2088 core.int x;
1556 /** Y coordinate. */ 2089 /** Y coordinate. */
1557 core.int y; 2090 core.int y;
1558 2091
(...skipping 12 matching lines...) Expand all
1571 var _json = new core.Map(); 2104 var _json = new core.Map();
1572 if (x != null) { 2105 if (x != null) {
1573 _json["x"] = x; 2106 _json["x"] = x;
1574 } 2107 }
1575 if (y != null) { 2108 if (y != null) {
1576 _json["y"] = y; 2109 _json["y"] = y;
1577 } 2110 }
1578 return _json; 2111 return _json;
1579 } 2112 }
1580 } 2113 }
2114
2115 /** Relevant information for the image from the Internet. */
2116 class WebDetection {
2117 /**
2118 * Fully matching images from the Internet.
2119 * They're definite neardups and most often a copy of the query image with
2120 * merely a size change.
2121 */
2122 core.List<WebImage> fullMatchingImages;
2123 /** Web pages containing the matching images from the Internet. */
2124 core.List<WebPage> pagesWithMatchingImages;
2125 /**
2126 * Partial matching images from the Internet.
2127 * Those images are similar enough to share some key-point features. For
2128 * example an original image will likely have partial matching for its crops.
2129 */
2130 core.List<WebImage> partialMatchingImages;
2131 /** Deduced entities from similar images on the Internet. */
2132 core.List<WebEntity> webEntities;
2133
2134 WebDetection();
2135
2136 WebDetection.fromJson(core.Map _json) {
2137 if (_json.containsKey("fullMatchingImages")) {
2138 fullMatchingImages = _json["fullMatchingImages"].map((value) => new WebIma ge.fromJson(value)).toList();
2139 }
2140 if (_json.containsKey("pagesWithMatchingImages")) {
2141 pagesWithMatchingImages = _json["pagesWithMatchingImages"].map((value) => new WebPage.fromJson(value)).toList();
2142 }
2143 if (_json.containsKey("partialMatchingImages")) {
2144 partialMatchingImages = _json["partialMatchingImages"].map((value) => new WebImage.fromJson(value)).toList();
2145 }
2146 if (_json.containsKey("webEntities")) {
2147 webEntities = _json["webEntities"].map((value) => new WebEntity.fromJson(v alue)).toList();
2148 }
2149 }
2150
2151 core.Map toJson() {
2152 var _json = new core.Map();
2153 if (fullMatchingImages != null) {
2154 _json["fullMatchingImages"] = fullMatchingImages.map((value) => (value).to Json()).toList();
2155 }
2156 if (pagesWithMatchingImages != null) {
2157 _json["pagesWithMatchingImages"] = pagesWithMatchingImages.map((value) => (value).toJson()).toList();
2158 }
2159 if (partialMatchingImages != null) {
2160 _json["partialMatchingImages"] = partialMatchingImages.map((value) => (val ue).toJson()).toList();
2161 }
2162 if (webEntities != null) {
2163 _json["webEntities"] = webEntities.map((value) => (value).toJson()).toList ();
2164 }
2165 return _json;
2166 }
2167 }
2168
2169 /** Entity deduced from similar images on the Internet. */
2170 class WebEntity {
2171 /** Canonical description of the entity, in English. */
2172 core.String description;
2173 /** Opaque entity ID. */
2174 core.String entityId;
2175 /**
2176 * Overall relevancy score for the entity.
2177 * Not normalized and not comparable across different image queries.
2178 */
2179 core.double score;
2180
2181 WebEntity();
2182
2183 WebEntity.fromJson(core.Map _json) {
2184 if (_json.containsKey("description")) {
2185 description = _json["description"];
2186 }
2187 if (_json.containsKey("entityId")) {
2188 entityId = _json["entityId"];
2189 }
2190 if (_json.containsKey("score")) {
2191 score = _json["score"];
2192 }
2193 }
2194
2195 core.Map toJson() {
2196 var _json = new core.Map();
2197 if (description != null) {
2198 _json["description"] = description;
2199 }
2200 if (entityId != null) {
2201 _json["entityId"] = entityId;
2202 }
2203 if (score != null) {
2204 _json["score"] = score;
2205 }
2206 return _json;
2207 }
2208 }
2209
2210 /** Metadata for online images. */
2211 class WebImage {
2212 /**
2213 * Overall relevancy score for the image.
2214 * Not normalized and not comparable across different image queries.
2215 */
2216 core.double score;
2217 /** The result image URL. */
2218 core.String url;
2219
2220 WebImage();
2221
2222 WebImage.fromJson(core.Map _json) {
2223 if (_json.containsKey("score")) {
2224 score = _json["score"];
2225 }
2226 if (_json.containsKey("url")) {
2227 url = _json["url"];
2228 }
2229 }
2230
2231 core.Map toJson() {
2232 var _json = new core.Map();
2233 if (score != null) {
2234 _json["score"] = score;
2235 }
2236 if (url != null) {
2237 _json["url"] = url;
2238 }
2239 return _json;
2240 }
2241 }
2242
2243 /** Metadata for web pages. */
2244 class WebPage {
2245 /**
2246 * Overall relevancy score for the web page.
2247 * Not normalized and not comparable across different image queries.
2248 */
2249 core.double score;
2250 /** The result web page URL. */
2251 core.String url;
2252
2253 WebPage();
2254
2255 WebPage.fromJson(core.Map _json) {
2256 if (_json.containsKey("score")) {
2257 score = _json["score"];
2258 }
2259 if (_json.containsKey("url")) {
2260 url = _json["url"];
2261 }
2262 }
2263
2264 core.Map toJson() {
2265 var _json = new core.Map();
2266 if (score != null) {
2267 _json["score"] = score;
2268 }
2269 if (url != null) {
2270 _json["url"] = url;
2271 }
2272 return _json;
2273 }
2274 }
2275
2276 /** A word representation. */
2277 class Word {
2278 /**
2279 * The bounding box for the word.
2280 * The vertices are in the order of top-left, top-right, bottom-right,
2281 * bottom-left. When a rotation of the bounding box is detected the rotation
2282 * is represented as around the top-left corner as defined when the text is
2283 * read in the 'natural' orientation.
2284 * For example:
2285 * * when the text is horizontal it might look like:
2286 * 0----1
2287 * | |
2288 * 3----2
2289 * * when it's rotated 180 degrees around the top-left corner it becomes:
2290 * 2----3
2291 * | |
2292 * 1----0
2293 * and the vertice order will still be (0, 1, 2, 3).
2294 */
2295 BoundingPoly boundingBox;
2296 /** Additional information detected for the word. */
2297 TextProperty property;
2298 /**
2299 * List of symbols in the word.
2300 * The order of the symbols follows the natural reading order.
2301 */
2302 core.List<Symbol> symbols;
2303
2304 Word();
2305
2306 Word.fromJson(core.Map _json) {
2307 if (_json.containsKey("boundingBox")) {
2308 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]);
2309 }
2310 if (_json.containsKey("property")) {
2311 property = new TextProperty.fromJson(_json["property"]);
2312 }
2313 if (_json.containsKey("symbols")) {
2314 symbols = _json["symbols"].map((value) => new Symbol.fromJson(value)).toLi st();
2315 }
2316 }
2317
2318 core.Map toJson() {
2319 var _json = new core.Map();
2320 if (boundingBox != null) {
2321 _json["boundingBox"] = (boundingBox).toJson();
2322 }
2323 if (property != null) {
2324 _json["property"] = (property).toJson();
2325 }
2326 if (symbols != null) {
2327 _json["symbols"] = symbols.map((value) => (value).toJson()).toList();
2328 }
2329 return _json;
2330 }
2331 }
OLDNEW
« no previous file with comments | « generated/googleapis/lib/tracing/v1.dart ('k') | generated/googleapis/lib/youtubereporting/v1.dart » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698