OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis.vision.v1; | 3 library googleapis.vision.v1; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
11 | 11 |
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show | 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' |
13 ApiRequestError, DetailedApiRequestError; | 13 show ApiRequestError, DetailedApiRequestError; |
14 | 14 |
15 const core.String USER_AGENT = 'dart-api-client vision/v1'; | 15 const core.String USER_AGENT = 'dart-api-client vision/v1'; |
16 | 16 |
17 /** | 17 /// Integrates Google Vision features, including image labeling, face, logo, |
18 * Integrates Google Vision features, including image labeling, face, logo, and | 18 /// and landmark detection, optical character recognition (OCR), and detection |
19 * landmark detection, optical character recognition (OCR), and detection of | 19 /// of explicit content, into applications. |
20 * explicit content, into applications. | |
21 */ | |
22 class VisionApi { | 20 class VisionApi { |
23 /** View and manage your data across Google Cloud Platform services */ | 21 /// View and manage your data across Google Cloud Platform services |
24 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf
orm"; | 22 static const CloudPlatformScope = |
| 23 "https://www.googleapis.com/auth/cloud-platform"; |
25 | 24 |
26 /** Apply machine learning models to understand and label images */ | 25 /// Apply machine learning models to understand and label images |
27 static const CloudVisionScope = "https://www.googleapis.com/auth/cloud-vision"
; | 26 static const CloudVisionScope = |
28 | 27 "https://www.googleapis.com/auth/cloud-vision"; |
29 | 28 |
30 final commons.ApiRequester _requester; | 29 final commons.ApiRequester _requester; |
31 | 30 |
32 ImagesResourceApi get images => new ImagesResourceApi(_requester); | 31 ImagesResourceApi get images => new ImagesResourceApi(_requester); |
33 | 32 |
34 VisionApi(http.Client client, {core.String rootUrl: "https://vision.googleapis
.com/", core.String servicePath: ""}) : | 33 VisionApi(http.Client client, |
35 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A
GENT); | 34 {core.String rootUrl: "https://vision.googleapis.com/", |
| 35 core.String servicePath: ""}) |
| 36 : _requester = |
| 37 new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT); |
36 } | 38 } |
37 | 39 |
38 | |
39 class ImagesResourceApi { | 40 class ImagesResourceApi { |
40 final commons.ApiRequester _requester; | 41 final commons.ApiRequester _requester; |
41 | 42 |
42 ImagesResourceApi(commons.ApiRequester client) : | 43 ImagesResourceApi(commons.ApiRequester client) : _requester = client; |
43 _requester = client; | |
44 | 44 |
45 /** | 45 /// Run image detection and annotation for a batch of images. |
46 * Run image detection and annotation for a batch of images. | 46 /// |
47 * | 47 /// [request] - The metadata request object. |
48 * [request] - The metadata request object. | 48 /// |
49 * | 49 /// Request parameters: |
50 * Request parameters: | 50 /// |
51 * | 51 /// Completes with a [BatchAnnotateImagesResponse]. |
52 * Completes with a [BatchAnnotateImagesResponse]. | 52 /// |
53 * | 53 /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
54 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 54 /// an error. |
55 * error. | 55 /// |
56 * | 56 /// If the used [http.Client] completes with an error when making a REST |
57 * If the used [http.Client] completes with an error when making a REST call, | 57 /// call, this method will complete with the same error. |
58 * this method will complete with the same error. | 58 async.Future<BatchAnnotateImagesResponse> annotate( |
59 */ | 59 BatchAnnotateImagesRequest request) { |
60 async.Future<BatchAnnotateImagesResponse> annotate(BatchAnnotateImagesRequest
request) { | |
61 var _url = null; | 60 var _url = null; |
62 var _queryParams = new core.Map(); | 61 var _queryParams = new core.Map(); |
63 var _uploadMedia = null; | 62 var _uploadMedia = null; |
64 var _uploadOptions = null; | 63 var _uploadOptions = null; |
65 var _downloadOptions = commons.DownloadOptions.Metadata; | 64 var _downloadOptions = commons.DownloadOptions.Metadata; |
66 var _body = null; | 65 var _body = null; |
67 | 66 |
68 if (request != null) { | 67 if (request != null) { |
69 _body = convert.JSON.encode((request).toJson()); | 68 _body = convert.JSON.encode((request).toJson()); |
70 } | 69 } |
71 | 70 |
72 _url = 'v1/images:annotate'; | 71 _url = 'v1/images:annotate'; |
73 | 72 |
74 var _response = _requester.request(_url, | 73 var _response = _requester.request(_url, "POST", |
75 "POST", | 74 body: _body, |
76 body: _body, | 75 queryParams: _queryParams, |
77 queryParams: _queryParams, | 76 uploadOptions: _uploadOptions, |
78 uploadOptions: _uploadOptions, | 77 uploadMedia: _uploadMedia, |
79 uploadMedia: _uploadMedia, | 78 downloadOptions: _downloadOptions); |
80 downloadOptions: _downloadOptions); | 79 return _response |
81 return _response.then((data) => new BatchAnnotateImagesResponse.fromJson(dat
a)); | 80 .then((data) => new BatchAnnotateImagesResponse.fromJson(data)); |
82 } | 81 } |
83 | |
84 } | 82 } |
85 | 83 |
| 84 /// Request for performing Google Cloud Vision API tasks over a user-provided |
| 85 /// image, with user-requested features. |
| 86 class AnnotateImageRequest { |
| 87 /// Requested features. |
| 88 core.List<Feature> features; |
86 | 89 |
| 90 /// The image to be processed. |
| 91 Image image; |
87 | 92 |
88 /** | 93 /// Additional context that may accompany the image. |
89 * Request for performing Google Cloud Vision API tasks over a user-provided | |
90 * image, with user-requested features. | |
91 */ | |
92 class AnnotateImageRequest { | |
93 /** Requested features. */ | |
94 core.List<Feature> features; | |
95 /** The image to be processed. */ | |
96 Image image; | |
97 /** Additional context that may accompany the image. */ | |
98 ImageContext imageContext; | 94 ImageContext imageContext; |
99 | 95 |
100 AnnotateImageRequest(); | 96 AnnotateImageRequest(); |
101 | 97 |
102 AnnotateImageRequest.fromJson(core.Map _json) { | 98 AnnotateImageRequest.fromJson(core.Map _json) { |
103 if (_json.containsKey("features")) { | 99 if (_json.containsKey("features")) { |
104 features = _json["features"].map((value) => new Feature.fromJson(value)).t
oList(); | 100 features = _json["features"] |
| 101 .map((value) => new Feature.fromJson(value)) |
| 102 .toList(); |
105 } | 103 } |
106 if (_json.containsKey("image")) { | 104 if (_json.containsKey("image")) { |
107 image = new Image.fromJson(_json["image"]); | 105 image = new Image.fromJson(_json["image"]); |
108 } | 106 } |
109 if (_json.containsKey("imageContext")) { | 107 if (_json.containsKey("imageContext")) { |
110 imageContext = new ImageContext.fromJson(_json["imageContext"]); | 108 imageContext = new ImageContext.fromJson(_json["imageContext"]); |
111 } | 109 } |
112 } | 110 } |
113 | 111 |
114 core.Map<core.String, core.Object> toJson() { | 112 core.Map<core.String, core.Object> toJson() { |
115 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 113 final core.Map<core.String, core.Object> _json = |
| 114 new core.Map<core.String, core.Object>(); |
116 if (features != null) { | 115 if (features != null) { |
117 _json["features"] = features.map((value) => (value).toJson()).toList(); | 116 _json["features"] = features.map((value) => (value).toJson()).toList(); |
118 } | 117 } |
119 if (image != null) { | 118 if (image != null) { |
120 _json["image"] = (image).toJson(); | 119 _json["image"] = (image).toJson(); |
121 } | 120 } |
122 if (imageContext != null) { | 121 if (imageContext != null) { |
123 _json["imageContext"] = (imageContext).toJson(); | 122 _json["imageContext"] = (imageContext).toJson(); |
124 } | 123 } |
125 return _json; | 124 return _json; |
126 } | 125 } |
127 } | 126 } |
128 | 127 |
129 /** Response to an image annotation request. */ | 128 /// Response to an image annotation request. |
130 class AnnotateImageResponse { | 129 class AnnotateImageResponse { |
131 /** If present, crop hints have completed successfully. */ | 130 /// If present, crop hints have completed successfully. |
132 CropHintsAnnotation cropHintsAnnotation; | 131 CropHintsAnnotation cropHintsAnnotation; |
133 /** | 132 |
134 * If set, represents the error message for the operation. | 133 /// If set, represents the error message for the operation. |
135 * Note that filled-in image annotations are guaranteed to be | 134 /// Note that filled-in image annotations are guaranteed to be |
136 * correct, even when `error` is set. | 135 /// correct, even when `error` is set. |
137 */ | |
138 Status error; | 136 Status error; |
139 /** If present, face detection has completed successfully. */ | 137 |
| 138 /// If present, face detection has completed successfully. |
140 core.List<FaceAnnotation> faceAnnotations; | 139 core.List<FaceAnnotation> faceAnnotations; |
141 /** | 140 |
142 * If present, text (OCR) detection or document (OCR) text detection has | 141 /// If present, text (OCR) detection or document (OCR) text detection has |
143 * completed successfully. | 142 /// completed successfully. |
144 * This annotation provides the structural hierarchy for the OCR detected | 143 /// This annotation provides the structural hierarchy for the OCR detected |
145 * text. | 144 /// text. |
146 */ | |
147 TextAnnotation fullTextAnnotation; | 145 TextAnnotation fullTextAnnotation; |
148 /** If present, image properties were extracted successfully. */ | 146 |
| 147 /// If present, image properties were extracted successfully. |
149 ImageProperties imagePropertiesAnnotation; | 148 ImageProperties imagePropertiesAnnotation; |
150 /** If present, label detection has completed successfully. */ | 149 |
| 150 /// If present, label detection has completed successfully. |
151 core.List<EntityAnnotation> labelAnnotations; | 151 core.List<EntityAnnotation> labelAnnotations; |
152 /** If present, landmark detection has completed successfully. */ | 152 |
| 153 /// If present, landmark detection has completed successfully. |
153 core.List<EntityAnnotation> landmarkAnnotations; | 154 core.List<EntityAnnotation> landmarkAnnotations; |
154 /** If present, logo detection has completed successfully. */ | 155 |
| 156 /// If present, logo detection has completed successfully. |
155 core.List<EntityAnnotation> logoAnnotations; | 157 core.List<EntityAnnotation> logoAnnotations; |
156 /** If present, safe-search annotation has completed successfully. */ | 158 |
| 159 /// If present, safe-search annotation has completed successfully. |
157 SafeSearchAnnotation safeSearchAnnotation; | 160 SafeSearchAnnotation safeSearchAnnotation; |
158 /** If present, text (OCR) detection has completed successfully. */ | 161 |
| 162 /// If present, text (OCR) detection has completed successfully. |
159 core.List<EntityAnnotation> textAnnotations; | 163 core.List<EntityAnnotation> textAnnotations; |
160 /** If present, web detection has completed successfully. */ | 164 |
| 165 /// If present, web detection has completed successfully. |
161 WebDetection webDetection; | 166 WebDetection webDetection; |
162 | 167 |
163 AnnotateImageResponse(); | 168 AnnotateImageResponse(); |
164 | 169 |
165 AnnotateImageResponse.fromJson(core.Map _json) { | 170 AnnotateImageResponse.fromJson(core.Map _json) { |
166 if (_json.containsKey("cropHintsAnnotation")) { | 171 if (_json.containsKey("cropHintsAnnotation")) { |
167 cropHintsAnnotation = new CropHintsAnnotation.fromJson(_json["cropHintsAnn
otation"]); | 172 cropHintsAnnotation = |
| 173 new CropHintsAnnotation.fromJson(_json["cropHintsAnnotation"]); |
168 } | 174 } |
169 if (_json.containsKey("error")) { | 175 if (_json.containsKey("error")) { |
170 error = new Status.fromJson(_json["error"]); | 176 error = new Status.fromJson(_json["error"]); |
171 } | 177 } |
172 if (_json.containsKey("faceAnnotations")) { | 178 if (_json.containsKey("faceAnnotations")) { |
173 faceAnnotations = _json["faceAnnotations"].map((value) => new FaceAnnotati
on.fromJson(value)).toList(); | 179 faceAnnotations = _json["faceAnnotations"] |
| 180 .map((value) => new FaceAnnotation.fromJson(value)) |
| 181 .toList(); |
174 } | 182 } |
175 if (_json.containsKey("fullTextAnnotation")) { | 183 if (_json.containsKey("fullTextAnnotation")) { |
176 fullTextAnnotation = new TextAnnotation.fromJson(_json["fullTextAnnotation
"]); | 184 fullTextAnnotation = |
| 185 new TextAnnotation.fromJson(_json["fullTextAnnotation"]); |
177 } | 186 } |
178 if (_json.containsKey("imagePropertiesAnnotation")) { | 187 if (_json.containsKey("imagePropertiesAnnotation")) { |
179 imagePropertiesAnnotation = new ImageProperties.fromJson(_json["imagePrope
rtiesAnnotation"]); | 188 imagePropertiesAnnotation = |
| 189 new ImageProperties.fromJson(_json["imagePropertiesAnnotation"]); |
180 } | 190 } |
181 if (_json.containsKey("labelAnnotations")) { | 191 if (_json.containsKey("labelAnnotations")) { |
182 labelAnnotations = _json["labelAnnotations"].map((value) => new EntityAnno
tation.fromJson(value)).toList(); | 192 labelAnnotations = _json["labelAnnotations"] |
| 193 .map((value) => new EntityAnnotation.fromJson(value)) |
| 194 .toList(); |
183 } | 195 } |
184 if (_json.containsKey("landmarkAnnotations")) { | 196 if (_json.containsKey("landmarkAnnotations")) { |
185 landmarkAnnotations = _json["landmarkAnnotations"].map((value) => new Enti
tyAnnotation.fromJson(value)).toList(); | 197 landmarkAnnotations = _json["landmarkAnnotations"] |
| 198 .map((value) => new EntityAnnotation.fromJson(value)) |
| 199 .toList(); |
186 } | 200 } |
187 if (_json.containsKey("logoAnnotations")) { | 201 if (_json.containsKey("logoAnnotations")) { |
188 logoAnnotations = _json["logoAnnotations"].map((value) => new EntityAnnota
tion.fromJson(value)).toList(); | 202 logoAnnotations = _json["logoAnnotations"] |
| 203 .map((value) => new EntityAnnotation.fromJson(value)) |
| 204 .toList(); |
189 } | 205 } |
190 if (_json.containsKey("safeSearchAnnotation")) { | 206 if (_json.containsKey("safeSearchAnnotation")) { |
191 safeSearchAnnotation = new SafeSearchAnnotation.fromJson(_json["safeSearch
Annotation"]); | 207 safeSearchAnnotation = |
| 208 new SafeSearchAnnotation.fromJson(_json["safeSearchAnnotation"]); |
192 } | 209 } |
193 if (_json.containsKey("textAnnotations")) { | 210 if (_json.containsKey("textAnnotations")) { |
194 textAnnotations = _json["textAnnotations"].map((value) => new EntityAnnota
tion.fromJson(value)).toList(); | 211 textAnnotations = _json["textAnnotations"] |
| 212 .map((value) => new EntityAnnotation.fromJson(value)) |
| 213 .toList(); |
195 } | 214 } |
196 if (_json.containsKey("webDetection")) { | 215 if (_json.containsKey("webDetection")) { |
197 webDetection = new WebDetection.fromJson(_json["webDetection"]); | 216 webDetection = new WebDetection.fromJson(_json["webDetection"]); |
198 } | 217 } |
199 } | 218 } |
200 | 219 |
201 core.Map<core.String, core.Object> toJson() { | 220 core.Map<core.String, core.Object> toJson() { |
202 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 221 final core.Map<core.String, core.Object> _json = |
| 222 new core.Map<core.String, core.Object>(); |
203 if (cropHintsAnnotation != null) { | 223 if (cropHintsAnnotation != null) { |
204 _json["cropHintsAnnotation"] = (cropHintsAnnotation).toJson(); | 224 _json["cropHintsAnnotation"] = (cropHintsAnnotation).toJson(); |
205 } | 225 } |
206 if (error != null) { | 226 if (error != null) { |
207 _json["error"] = (error).toJson(); | 227 _json["error"] = (error).toJson(); |
208 } | 228 } |
209 if (faceAnnotations != null) { | 229 if (faceAnnotations != null) { |
210 _json["faceAnnotations"] = faceAnnotations.map((value) => (value).toJson()
).toList(); | 230 _json["faceAnnotations"] = |
| 231 faceAnnotations.map((value) => (value).toJson()).toList(); |
211 } | 232 } |
212 if (fullTextAnnotation != null) { | 233 if (fullTextAnnotation != null) { |
213 _json["fullTextAnnotation"] = (fullTextAnnotation).toJson(); | 234 _json["fullTextAnnotation"] = (fullTextAnnotation).toJson(); |
214 } | 235 } |
215 if (imagePropertiesAnnotation != null) { | 236 if (imagePropertiesAnnotation != null) { |
216 _json["imagePropertiesAnnotation"] = (imagePropertiesAnnotation).toJson(); | 237 _json["imagePropertiesAnnotation"] = (imagePropertiesAnnotation).toJson(); |
217 } | 238 } |
218 if (labelAnnotations != null) { | 239 if (labelAnnotations != null) { |
219 _json["labelAnnotations"] = labelAnnotations.map((value) => (value).toJson
()).toList(); | 240 _json["labelAnnotations"] = |
| 241 labelAnnotations.map((value) => (value).toJson()).toList(); |
220 } | 242 } |
221 if (landmarkAnnotations != null) { | 243 if (landmarkAnnotations != null) { |
222 _json["landmarkAnnotations"] = landmarkAnnotations.map((value) => (value).
toJson()).toList(); | 244 _json["landmarkAnnotations"] = |
| 245 landmarkAnnotations.map((value) => (value).toJson()).toList(); |
223 } | 246 } |
224 if (logoAnnotations != null) { | 247 if (logoAnnotations != null) { |
225 _json["logoAnnotations"] = logoAnnotations.map((value) => (value).toJson()
).toList(); | 248 _json["logoAnnotations"] = |
| 249 logoAnnotations.map((value) => (value).toJson()).toList(); |
226 } | 250 } |
227 if (safeSearchAnnotation != null) { | 251 if (safeSearchAnnotation != null) { |
228 _json["safeSearchAnnotation"] = (safeSearchAnnotation).toJson(); | 252 _json["safeSearchAnnotation"] = (safeSearchAnnotation).toJson(); |
229 } | 253 } |
230 if (textAnnotations != null) { | 254 if (textAnnotations != null) { |
231 _json["textAnnotations"] = textAnnotations.map((value) => (value).toJson()
).toList(); | 255 _json["textAnnotations"] = |
| 256 textAnnotations.map((value) => (value).toJson()).toList(); |
232 } | 257 } |
233 if (webDetection != null) { | 258 if (webDetection != null) { |
234 _json["webDetection"] = (webDetection).toJson(); | 259 _json["webDetection"] = (webDetection).toJson(); |
235 } | 260 } |
236 return _json; | 261 return _json; |
237 } | 262 } |
238 } | 263 } |
239 | 264 |
240 /** | 265 /// Multiple image annotation requests are batched into a single service call. |
241 * Multiple image annotation requests are batched into a single service call. | |
242 */ | |
243 class BatchAnnotateImagesRequest { | 266 class BatchAnnotateImagesRequest { |
244 /** Individual image annotation requests for this batch. */ | 267 /// Individual image annotation requests for this batch. |
245 core.List<AnnotateImageRequest> requests; | 268 core.List<AnnotateImageRequest> requests; |
246 | 269 |
247 BatchAnnotateImagesRequest(); | 270 BatchAnnotateImagesRequest(); |
248 | 271 |
249 BatchAnnotateImagesRequest.fromJson(core.Map _json) { | 272 BatchAnnotateImagesRequest.fromJson(core.Map _json) { |
250 if (_json.containsKey("requests")) { | 273 if (_json.containsKey("requests")) { |
251 requests = _json["requests"].map((value) => new AnnotateImageRequest.fromJ
son(value)).toList(); | 274 requests = _json["requests"] |
| 275 .map((value) => new AnnotateImageRequest.fromJson(value)) |
| 276 .toList(); |
252 } | 277 } |
253 } | 278 } |
254 | 279 |
255 core.Map<core.String, core.Object> toJson() { | 280 core.Map<core.String, core.Object> toJson() { |
256 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 281 final core.Map<core.String, core.Object> _json = |
| 282 new core.Map<core.String, core.Object>(); |
257 if (requests != null) { | 283 if (requests != null) { |
258 _json["requests"] = requests.map((value) => (value).toJson()).toList(); | 284 _json["requests"] = requests.map((value) => (value).toJson()).toList(); |
259 } | 285 } |
260 return _json; | 286 return _json; |
261 } | 287 } |
262 } | 288 } |
263 | 289 |
264 /** Response to a batch image annotation request. */ | 290 /// Response to a batch image annotation request. |
265 class BatchAnnotateImagesResponse { | 291 class BatchAnnotateImagesResponse { |
266 /** Individual responses to image annotation requests within the batch. */ | 292 /// Individual responses to image annotation requests within the batch. |
267 core.List<AnnotateImageResponse> responses; | 293 core.List<AnnotateImageResponse> responses; |
268 | 294 |
269 BatchAnnotateImagesResponse(); | 295 BatchAnnotateImagesResponse(); |
270 | 296 |
271 BatchAnnotateImagesResponse.fromJson(core.Map _json) { | 297 BatchAnnotateImagesResponse.fromJson(core.Map _json) { |
272 if (_json.containsKey("responses")) { | 298 if (_json.containsKey("responses")) { |
273 responses = _json["responses"].map((value) => new AnnotateImageResponse.fr
omJson(value)).toList(); | 299 responses = _json["responses"] |
| 300 .map((value) => new AnnotateImageResponse.fromJson(value)) |
| 301 .toList(); |
274 } | 302 } |
275 } | 303 } |
276 | 304 |
277 core.Map<core.String, core.Object> toJson() { | 305 core.Map<core.String, core.Object> toJson() { |
278 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 306 final core.Map<core.String, core.Object> _json = |
| 307 new core.Map<core.String, core.Object>(); |
279 if (responses != null) { | 308 if (responses != null) { |
280 _json["responses"] = responses.map((value) => (value).toJson()).toList(); | 309 _json["responses"] = responses.map((value) => (value).toJson()).toList(); |
281 } | 310 } |
282 return _json; | 311 return _json; |
283 } | 312 } |
284 } | 313 } |
285 | 314 |
286 /** Logical element on the page. */ | 315 /// Logical element on the page. |
287 class Block { | 316 class Block { |
288 /** | 317 /// Detected block type (text, image etc) for this block. |
289 * Detected block type (text, image etc) for this block. | 318 /// Possible string values are: |
290 * Possible string values are: | 319 /// - "UNKNOWN" : Unknown block type. |
291 * - "UNKNOWN" : Unknown block type. | 320 /// - "TEXT" : Regular text block. |
292 * - "TEXT" : Regular text block. | 321 /// - "TABLE" : Table block. |
293 * - "TABLE" : Table block. | 322 /// - "PICTURE" : Image block. |
294 * - "PICTURE" : Image block. | 323 /// - "RULER" : Horizontal/vertical line box. |
295 * - "RULER" : Horizontal/vertical line box. | 324 /// - "BARCODE" : Barcode block. |
296 * - "BARCODE" : Barcode block. | |
297 */ | |
298 core.String blockType; | 325 core.String blockType; |
299 /** | 326 |
300 * The bounding box for the block. | 327 /// The bounding box for the block. |
301 * The vertices are in the order of top-left, top-right, bottom-right, | 328 /// The vertices are in the order of top-left, top-right, bottom-right, |
302 * bottom-left. When a rotation of the bounding box is detected the rotation | 329 /// bottom-left. When a rotation of the bounding box is detected the rotation |
303 * is represented as around the top-left corner as defined when the text is | 330 /// is represented as around the top-left corner as defined when the text is |
304 * read in the 'natural' orientation. | 331 /// read in the 'natural' orientation. |
305 * For example: | 332 /// For example: |
306 * * when the text is horizontal it might look like: | 333 /// * when the text is horizontal it might look like: |
307 * 0----1 | 334 /// 0----1 |
308 * | | | 335 /// | | |
309 * 3----2 | 336 /// 3----2 |
310 * * when it's rotated 180 degrees around the top-left corner it becomes: | 337 /// * when it's rotated 180 degrees around the top-left corner it becomes: |
311 * 2----3 | 338 /// 2----3 |
312 * | | | 339 /// | | |
313 * 1----0 | 340 /// 1----0 |
314 * and the vertice order will still be (0, 1, 2, 3). | 341 /// and the vertice order will still be (0, 1, 2, 3). |
315 */ | |
316 BoundingPoly boundingBox; | 342 BoundingPoly boundingBox; |
317 /** List of paragraphs in this block (if this blocks is of type text). */ | 343 |
| 344 /// List of paragraphs in this block (if this blocks is of type text). |
318 core.List<Paragraph> paragraphs; | 345 core.List<Paragraph> paragraphs; |
319 /** Additional information detected for the block. */ | 346 |
| 347 /// Additional information detected for the block. |
320 TextProperty property; | 348 TextProperty property; |
321 | 349 |
322 Block(); | 350 Block(); |
323 | 351 |
324 Block.fromJson(core.Map _json) { | 352 Block.fromJson(core.Map _json) { |
325 if (_json.containsKey("blockType")) { | 353 if (_json.containsKey("blockType")) { |
326 blockType = _json["blockType"]; | 354 blockType = _json["blockType"]; |
327 } | 355 } |
328 if (_json.containsKey("boundingBox")) { | 356 if (_json.containsKey("boundingBox")) { |
329 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); | 357 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); |
330 } | 358 } |
331 if (_json.containsKey("paragraphs")) { | 359 if (_json.containsKey("paragraphs")) { |
332 paragraphs = _json["paragraphs"].map((value) => new Paragraph.fromJson(val
ue)).toList(); | 360 paragraphs = _json["paragraphs"] |
| 361 .map((value) => new Paragraph.fromJson(value)) |
| 362 .toList(); |
333 } | 363 } |
334 if (_json.containsKey("property")) { | 364 if (_json.containsKey("property")) { |
335 property = new TextProperty.fromJson(_json["property"]); | 365 property = new TextProperty.fromJson(_json["property"]); |
336 } | 366 } |
337 } | 367 } |
338 | 368 |
339 core.Map<core.String, core.Object> toJson() { | 369 core.Map<core.String, core.Object> toJson() { |
340 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 370 final core.Map<core.String, core.Object> _json = |
| 371 new core.Map<core.String, core.Object>(); |
341 if (blockType != null) { | 372 if (blockType != null) { |
342 _json["blockType"] = blockType; | 373 _json["blockType"] = blockType; |
343 } | 374 } |
344 if (boundingBox != null) { | 375 if (boundingBox != null) { |
345 _json["boundingBox"] = (boundingBox).toJson(); | 376 _json["boundingBox"] = (boundingBox).toJson(); |
346 } | 377 } |
347 if (paragraphs != null) { | 378 if (paragraphs != null) { |
348 _json["paragraphs"] = paragraphs.map((value) => (value).toJson()).toList()
; | 379 _json["paragraphs"] = |
| 380 paragraphs.map((value) => (value).toJson()).toList(); |
349 } | 381 } |
350 if (property != null) { | 382 if (property != null) { |
351 _json["property"] = (property).toJson(); | 383 _json["property"] = (property).toJson(); |
352 } | 384 } |
353 return _json; | 385 return _json; |
354 } | 386 } |
355 } | 387 } |
356 | 388 |
357 /** A bounding polygon for the detected image annotation. */ | 389 /// A bounding polygon for the detected image annotation. |
358 class BoundingPoly { | 390 class BoundingPoly { |
359 /** The bounding polygon vertices. */ | 391 /// The bounding polygon vertices. |
360 core.List<Vertex> vertices; | 392 core.List<Vertex> vertices; |
361 | 393 |
362 BoundingPoly(); | 394 BoundingPoly(); |
363 | 395 |
364 BoundingPoly.fromJson(core.Map _json) { | 396 BoundingPoly.fromJson(core.Map _json) { |
365 if (_json.containsKey("vertices")) { | 397 if (_json.containsKey("vertices")) { |
366 vertices = _json["vertices"].map((value) => new Vertex.fromJson(value)).to
List(); | 398 vertices = |
| 399 _json["vertices"].map((value) => new Vertex.fromJson(value)).toList(); |
367 } | 400 } |
368 } | 401 } |
369 | 402 |
370 core.Map<core.String, core.Object> toJson() { | 403 core.Map<core.String, core.Object> toJson() { |
371 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 404 final core.Map<core.String, core.Object> _json = |
| 405 new core.Map<core.String, core.Object>(); |
372 if (vertices != null) { | 406 if (vertices != null) { |
373 _json["vertices"] = vertices.map((value) => (value).toJson()).toList(); | 407 _json["vertices"] = vertices.map((value) => (value).toJson()).toList(); |
374 } | 408 } |
375 return _json; | 409 return _json; |
376 } | 410 } |
377 } | 411 } |
378 | 412 |
379 /** | 413 /// Represents a color in the RGBA color space. This representation is designed |
380 * Represents a color in the RGBA color space. This representation is designed | 414 /// for simplicity of conversion to/from color representations in various |
381 * for simplicity of conversion to/from color representations in various | 415 /// languages over compactness; for example, the fields of this representation |
382 * languages over compactness; for example, the fields of this representation | 416 /// can be trivially provided to the constructor of "java.awt.Color" in Java; |
383 * can be trivially provided to the constructor of "java.awt.Color" in Java; it | 417 /// it |
384 * can also be trivially provided to UIColor's "+colorWithRed:green:blue:alpha" | 418 /// can also be trivially provided to UIColor's |
385 * method in iOS; and, with just a little work, it can be easily formatted into | 419 /// "+colorWithRed:green:blue:alpha" |
386 * a CSS "rgba()" string in JavaScript, as well. Here are some examples: | 420 /// method in iOS; and, with just a little work, it can be easily formatted |
387 * | 421 /// into |
388 * Example (Java): | 422 /// a CSS "rgba()" string in JavaScript, as well. Here are some examples: |
389 * | 423 /// |
390 * import com.google.type.Color; | 424 /// Example (Java): |
391 * | 425 /// |
392 * // ... | 426 /// import com.google.type.Color; |
393 * public static java.awt.Color fromProto(Color protocolor) { | 427 /// |
394 * float alpha = protocolor.hasAlpha() | 428 /// // ... |
395 * ? protocolor.getAlpha().getValue() | 429 /// public static java.awt.Color fromProto(Color protocolor) { |
396 * : 1.0; | 430 /// float alpha = protocolor.hasAlpha() |
397 * | 431 /// ? protocolor.getAlpha().getValue() |
398 * return new java.awt.Color( | 432 /// : 1.0; |
399 * protocolor.getRed(), | 433 /// |
400 * protocolor.getGreen(), | 434 /// return new java.awt.Color( |
401 * protocolor.getBlue(), | 435 /// protocolor.getRed(), |
402 * alpha); | 436 /// protocolor.getGreen(), |
403 * } | 437 /// protocolor.getBlue(), |
404 * | 438 /// alpha); |
405 * public static Color toProto(java.awt.Color color) { | 439 /// } |
406 * float red = (float) color.getRed(); | 440 /// |
407 * float green = (float) color.getGreen(); | 441 /// public static Color toProto(java.awt.Color color) { |
408 * float blue = (float) color.getBlue(); | 442 /// float red = (float) color.getRed(); |
409 * float denominator = 255.0; | 443 /// float green = (float) color.getGreen(); |
410 * Color.Builder resultBuilder = | 444 /// float blue = (float) color.getBlue(); |
411 * Color | 445 /// float denominator = 255.0; |
412 * .newBuilder() | 446 /// Color.Builder resultBuilder = |
413 * .setRed(red / denominator) | 447 /// Color |
414 * .setGreen(green / denominator) | 448 /// .newBuilder() |
415 * .setBlue(blue / denominator); | 449 /// .setRed(red / denominator) |
416 * int alpha = color.getAlpha(); | 450 /// .setGreen(green / denominator) |
417 * if (alpha != 255) { | 451 /// .setBlue(blue / denominator); |
418 * result.setAlpha( | 452 /// int alpha = color.getAlpha(); |
419 * FloatValue | 453 /// if (alpha != 255) { |
420 * .newBuilder() | 454 /// result.setAlpha( |
421 * .setValue(((float) alpha) / denominator) | 455 /// FloatValue |
422 * .build()); | 456 /// .newBuilder() |
423 * } | 457 /// .setValue(((float) alpha) / denominator) |
424 * return resultBuilder.build(); | 458 /// .build()); |
425 * } | 459 /// } |
426 * // ... | 460 /// return resultBuilder.build(); |
427 * | 461 /// } |
428 * Example (iOS / Obj-C): | 462 /// // ... |
429 * | 463 /// |
430 * // ... | 464 /// Example (iOS / Obj-C): |
431 * static UIColor* fromProto(Color* protocolor) { | 465 /// |
432 * float red = [protocolor red]; | 466 /// // ... |
433 * float green = [protocolor green]; | 467 /// static UIColor* fromProto(Color* protocolor) { |
434 * float blue = [protocolor blue]; | 468 /// float red = [protocolor red]; |
435 * FloatValue* alpha_wrapper = [protocolor alpha]; | 469 /// float green = [protocolor green]; |
436 * float alpha = 1.0; | 470 /// float blue = [protocolor blue]; |
437 * if (alpha_wrapper != nil) { | 471 /// FloatValue* alpha_wrapper = [protocolor alpha]; |
438 * alpha = [alpha_wrapper value]; | 472 /// float alpha = 1.0; |
439 * } | 473 /// if (alpha_wrapper != nil) { |
440 * return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; | 474 /// alpha = [alpha_wrapper value]; |
441 * } | 475 /// } |
442 * | 476 /// return [UIColor colorWithRed:red green:green blue:blue alpha:alpha]; |
443 * static Color* toProto(UIColor* color) { | 477 /// } |
444 * CGFloat red, green, blue, alpha; | 478 /// |
445 * if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { | 479 /// static Color* toProto(UIColor* color) { |
446 * return nil; | 480 /// CGFloat red, green, blue, alpha; |
447 * } | 481 /// if (![color getRed:&red green:&green blue:&blue alpha:&alpha]) { |
448 * Color* result = [Color alloc] init]; | 482 /// return nil; |
449 * [result setRed:red]; | 483 /// } |
450 * [result setGreen:green]; | 484 /// Color* result = [Color alloc] init]; |
451 * [result setBlue:blue]; | 485 /// [result setRed:red]; |
452 * if (alpha <= 0.9999) { | 486 /// [result setGreen:green]; |
453 * [result setAlpha:floatWrapperWithValue(alpha)]; | 487 /// [result setBlue:blue]; |
454 * } | 488 /// if (alpha <= 0.9999) { |
455 * [result autorelease]; | 489 /// [result setAlpha:floatWrapperWithValue(alpha)]; |
456 * return result; | 490 /// } |
457 * } | 491 /// [result autorelease]; |
458 * // ... | 492 /// return result; |
459 * | 493 /// } |
460 * Example (JavaScript): | 494 /// // ... |
461 * | 495 /// |
462 * // ... | 496 /// Example (JavaScript): |
463 * | 497 /// |
464 * var protoToCssColor = function(rgb_color) { | 498 /// // ... |
465 * var redFrac = rgb_color.red || 0.0; | 499 /// |
466 * var greenFrac = rgb_color.green || 0.0; | 500 /// var protoToCssColor = function(rgb_color) { |
467 * var blueFrac = rgb_color.blue || 0.0; | 501 /// var redFrac = rgb_color.red || 0.0; |
468 * var red = Math.floor(redFrac * 255); | 502 /// var greenFrac = rgb_color.green || 0.0; |
469 * var green = Math.floor(greenFrac * 255); | 503 /// var blueFrac = rgb_color.blue || 0.0; |
470 * var blue = Math.floor(blueFrac * 255); | 504 /// var red = Math.floor(redFrac * 255); |
471 * | 505 /// var green = Math.floor(greenFrac * 255); |
472 * if (!('alpha' in rgb_color)) { | 506 /// var blue = Math.floor(blueFrac * 255); |
473 * return rgbToCssColor_(red, green, blue); | 507 /// |
474 * } | 508 /// if (!('alpha' in rgb_color)) { |
475 * | 509 /// return rgbToCssColor_(red, green, blue); |
476 * var alphaFrac = rgb_color.alpha.value || 0.0; | 510 /// } |
477 * var rgbParams = [red, green, blue].join(','); | 511 /// |
478 * return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); | 512 /// var alphaFrac = rgb_color.alpha.value || 0.0; |
479 * }; | 513 /// var rgbParams = [red, green, blue].join(','); |
480 * | 514 /// return ['rgba(', rgbParams, ',', alphaFrac, ')'].join(''); |
481 * var rgbToCssColor_ = function(red, green, blue) { | 515 /// }; |
482 * var rgbNumber = new Number((red << 16) | (green << 8) | blue); | 516 /// |
483 * var hexString = rgbNumber.toString(16); | 517 /// var rgbToCssColor_ = function(red, green, blue) { |
484 * var missingZeros = 6 - hexString.length; | 518 /// var rgbNumber = new Number((red << 16) | (green << 8) | blue); |
485 * var resultBuilder = ['#']; | 519 /// var hexString = rgbNumber.toString(16); |
486 * for (var i = 0; i < missingZeros; i++) { | 520 /// var missingZeros = 6 - hexString.length; |
487 * resultBuilder.push('0'); | 521 /// var resultBuilder = ['#']; |
488 * } | 522 /// for (var i = 0; i < missingZeros; i++) { |
489 * resultBuilder.push(hexString); | 523 /// resultBuilder.push('0'); |
490 * return resultBuilder.join(''); | 524 /// } |
491 * }; | 525 /// resultBuilder.push(hexString); |
492 * | 526 /// return resultBuilder.join(''); |
493 * // ... | 527 /// }; |
494 */ | 528 /// |
| 529 /// // ... |
495 class Color { | 530 class Color { |
496 /** | 531 /// The fraction of this color that should be applied to the pixel. That is, |
497 * The fraction of this color that should be applied to the pixel. That is, | 532 /// the final pixel color is defined by the equation: |
498 * the final pixel color is defined by the equation: | 533 /// |
499 * | 534 /// pixel color = alpha * (this color) + (1.0 - alpha) * (background color) |
500 * pixel color = alpha * (this color) + (1.0 - alpha) * (background color) | 535 /// |
501 * | 536 /// This means that a value of 1.0 corresponds to a solid color, whereas |
502 * This means that a value of 1.0 corresponds to a solid color, whereas | 537 /// a value of 0.0 corresponds to a completely transparent color. This |
503 * a value of 0.0 corresponds to a completely transparent color. This | 538 /// uses a wrapper message rather than a simple float scalar so that it is |
504 * uses a wrapper message rather than a simple float scalar so that it is | 539 /// possible to distinguish between a default value and the value being |
505 * possible to distinguish between a default value and the value being unset. | 540 /// unset. |
506 * If omitted, this color object is to be rendered as a solid color | 541 /// If omitted, this color object is to be rendered as a solid color |
507 * (as if the alpha value had been explicitly given with a value of 1.0). | 542 /// (as if the alpha value had been explicitly given with a value of 1.0). |
508 */ | |
509 core.double alpha; | 543 core.double alpha; |
510 /** The amount of blue in the color as a value in the interval [0, 1]. */ | 544 |
| 545 /// The amount of blue in the color as a value in the interval [0, 1]. |
511 core.double blue; | 546 core.double blue; |
512 /** The amount of green in the color as a value in the interval [0, 1]. */ | 547 |
| 548 /// The amount of green in the color as a value in the interval [0, 1]. |
513 core.double green; | 549 core.double green; |
514 /** The amount of red in the color as a value in the interval [0, 1]. */ | 550 |
| 551 /// The amount of red in the color as a value in the interval [0, 1]. |
515 core.double red; | 552 core.double red; |
516 | 553 |
517 Color(); | 554 Color(); |
518 | 555 |
519 Color.fromJson(core.Map _json) { | 556 Color.fromJson(core.Map _json) { |
520 if (_json.containsKey("alpha")) { | 557 if (_json.containsKey("alpha")) { |
521 alpha = _json["alpha"]; | 558 alpha = _json["alpha"]; |
522 } | 559 } |
523 if (_json.containsKey("blue")) { | 560 if (_json.containsKey("blue")) { |
524 blue = _json["blue"]; | 561 blue = _json["blue"]; |
525 } | 562 } |
526 if (_json.containsKey("green")) { | 563 if (_json.containsKey("green")) { |
527 green = _json["green"]; | 564 green = _json["green"]; |
528 } | 565 } |
529 if (_json.containsKey("red")) { | 566 if (_json.containsKey("red")) { |
530 red = _json["red"]; | 567 red = _json["red"]; |
531 } | 568 } |
532 } | 569 } |
533 | 570 |
534 core.Map<core.String, core.Object> toJson() { | 571 core.Map<core.String, core.Object> toJson() { |
535 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 572 final core.Map<core.String, core.Object> _json = |
| 573 new core.Map<core.String, core.Object>(); |
536 if (alpha != null) { | 574 if (alpha != null) { |
537 _json["alpha"] = alpha; | 575 _json["alpha"] = alpha; |
538 } | 576 } |
539 if (blue != null) { | 577 if (blue != null) { |
540 _json["blue"] = blue; | 578 _json["blue"] = blue; |
541 } | 579 } |
542 if (green != null) { | 580 if (green != null) { |
543 _json["green"] = green; | 581 _json["green"] = green; |
544 } | 582 } |
545 if (red != null) { | 583 if (red != null) { |
546 _json["red"] = red; | 584 _json["red"] = red; |
547 } | 585 } |
548 return _json; | 586 return _json; |
549 } | 587 } |
550 } | 588 } |
551 | 589 |
552 /** | 590 /// Color information consists of RGB channels, score, and the fraction of |
553 * Color information consists of RGB channels, score, and the fraction of | 591 /// the image that the color occupies in the image. |
554 * the image that the color occupies in the image. | |
555 */ | |
556 class ColorInfo { | 592 class ColorInfo { |
557 /** RGB components of the color. */ | 593 /// RGB components of the color. |
558 Color color; | 594 Color color; |
559 /** | 595 |
560 * The fraction of pixels the color occupies in the image. | 596 /// The fraction of pixels the color occupies in the image. |
561 * Value in range [0, 1]. | 597 /// Value in range [0, 1]. |
562 */ | |
563 core.double pixelFraction; | 598 core.double pixelFraction; |
564 /** Image-specific score for this color. Value in range [0, 1]. */ | 599 |
| 600 /// Image-specific score for this color. Value in range [0, 1]. |
565 core.double score; | 601 core.double score; |
566 | 602 |
567 ColorInfo(); | 603 ColorInfo(); |
568 | 604 |
569 ColorInfo.fromJson(core.Map _json) { | 605 ColorInfo.fromJson(core.Map _json) { |
570 if (_json.containsKey("color")) { | 606 if (_json.containsKey("color")) { |
571 color = new Color.fromJson(_json["color"]); | 607 color = new Color.fromJson(_json["color"]); |
572 } | 608 } |
573 if (_json.containsKey("pixelFraction")) { | 609 if (_json.containsKey("pixelFraction")) { |
574 pixelFraction = _json["pixelFraction"]; | 610 pixelFraction = _json["pixelFraction"]; |
575 } | 611 } |
576 if (_json.containsKey("score")) { | 612 if (_json.containsKey("score")) { |
577 score = _json["score"]; | 613 score = _json["score"]; |
578 } | 614 } |
579 } | 615 } |
580 | 616 |
581 core.Map<core.String, core.Object> toJson() { | 617 core.Map<core.String, core.Object> toJson() { |
582 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 618 final core.Map<core.String, core.Object> _json = |
| 619 new core.Map<core.String, core.Object>(); |
583 if (color != null) { | 620 if (color != null) { |
584 _json["color"] = (color).toJson(); | 621 _json["color"] = (color).toJson(); |
585 } | 622 } |
586 if (pixelFraction != null) { | 623 if (pixelFraction != null) { |
587 _json["pixelFraction"] = pixelFraction; | 624 _json["pixelFraction"] = pixelFraction; |
588 } | 625 } |
589 if (score != null) { | 626 if (score != null) { |
590 _json["score"] = score; | 627 _json["score"] = score; |
591 } | 628 } |
592 return _json; | 629 return _json; |
593 } | 630 } |
594 } | 631 } |
595 | 632 |
596 /** | 633 /// Single crop hint that is used to generate a new crop when serving an image. |
597 * Single crop hint that is used to generate a new crop when serving an image. | |
598 */ | |
599 class CropHint { | 634 class CropHint { |
600 /** | 635 /// The bounding polygon for the crop region. The coordinates of the bounding |
601 * The bounding polygon for the crop region. The coordinates of the bounding | 636 /// box are in the original image's scale, as returned in `ImageParams`. |
602 * box are in the original image's scale, as returned in `ImageParams`. | |
603 */ | |
604 BoundingPoly boundingPoly; | 637 BoundingPoly boundingPoly; |
605 /** Confidence of this being a salient region. Range [0, 1]. */ | 638 |
| 639 /// Confidence of this being a salient region. Range [0, 1]. |
606 core.double confidence; | 640 core.double confidence; |
607 /** | 641 |
608 * Fraction of importance of this salient region with respect to the original | 642 /// Fraction of importance of this salient region with respect to the |
609 * image. | 643 /// original |
610 */ | 644 /// image. |
611 core.double importanceFraction; | 645 core.double importanceFraction; |
612 | 646 |
613 CropHint(); | 647 CropHint(); |
614 | 648 |
615 CropHint.fromJson(core.Map _json) { | 649 CropHint.fromJson(core.Map _json) { |
616 if (_json.containsKey("boundingPoly")) { | 650 if (_json.containsKey("boundingPoly")) { |
617 boundingPoly = new BoundingPoly.fromJson(_json["boundingPoly"]); | 651 boundingPoly = new BoundingPoly.fromJson(_json["boundingPoly"]); |
618 } | 652 } |
619 if (_json.containsKey("confidence")) { | 653 if (_json.containsKey("confidence")) { |
620 confidence = _json["confidence"]; | 654 confidence = _json["confidence"]; |
621 } | 655 } |
622 if (_json.containsKey("importanceFraction")) { | 656 if (_json.containsKey("importanceFraction")) { |
623 importanceFraction = _json["importanceFraction"]; | 657 importanceFraction = _json["importanceFraction"]; |
624 } | 658 } |
625 } | 659 } |
626 | 660 |
627 core.Map<core.String, core.Object> toJson() { | 661 core.Map<core.String, core.Object> toJson() { |
628 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 662 final core.Map<core.String, core.Object> _json = |
| 663 new core.Map<core.String, core.Object>(); |
629 if (boundingPoly != null) { | 664 if (boundingPoly != null) { |
630 _json["boundingPoly"] = (boundingPoly).toJson(); | 665 _json["boundingPoly"] = (boundingPoly).toJson(); |
631 } | 666 } |
632 if (confidence != null) { | 667 if (confidence != null) { |
633 _json["confidence"] = confidence; | 668 _json["confidence"] = confidence; |
634 } | 669 } |
635 if (importanceFraction != null) { | 670 if (importanceFraction != null) { |
636 _json["importanceFraction"] = importanceFraction; | 671 _json["importanceFraction"] = importanceFraction; |
637 } | 672 } |
638 return _json; | 673 return _json; |
639 } | 674 } |
640 } | 675 } |
641 | 676 |
642 /** | 677 /// Set of crop hints that are used to generate new crops when serving images. |
643 * Set of crop hints that are used to generate new crops when serving images. | |
644 */ | |
645 class CropHintsAnnotation { | 678 class CropHintsAnnotation { |
646 /** Crop hint results. */ | 679 /// Crop hint results. |
647 core.List<CropHint> cropHints; | 680 core.List<CropHint> cropHints; |
648 | 681 |
649 CropHintsAnnotation(); | 682 CropHintsAnnotation(); |
650 | 683 |
651 CropHintsAnnotation.fromJson(core.Map _json) { | 684 CropHintsAnnotation.fromJson(core.Map _json) { |
652 if (_json.containsKey("cropHints")) { | 685 if (_json.containsKey("cropHints")) { |
653 cropHints = _json["cropHints"].map((value) => new CropHint.fromJson(value)
).toList(); | 686 cropHints = _json["cropHints"] |
| 687 .map((value) => new CropHint.fromJson(value)) |
| 688 .toList(); |
654 } | 689 } |
655 } | 690 } |
656 | 691 |
657 core.Map<core.String, core.Object> toJson() { | 692 core.Map<core.String, core.Object> toJson() { |
658 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 693 final core.Map<core.String, core.Object> _json = |
| 694 new core.Map<core.String, core.Object>(); |
659 if (cropHints != null) { | 695 if (cropHints != null) { |
660 _json["cropHints"] = cropHints.map((value) => (value).toJson()).toList(); | 696 _json["cropHints"] = cropHints.map((value) => (value).toJson()).toList(); |
661 } | 697 } |
662 return _json; | 698 return _json; |
663 } | 699 } |
664 } | 700 } |
665 | 701 |
666 /** Parameters for crop hints annotation request. */ | 702 /// Parameters for crop hints annotation request. |
667 class CropHintsParams { | 703 class CropHintsParams { |
668 /** | 704 /// Aspect ratios in floats, representing the ratio of the width to the |
669 * Aspect ratios in floats, representing the ratio of the width to the height | 705 /// height |
670 * of the image. For example, if the desired aspect ratio is 4/3, the | 706 /// of the image. For example, if the desired aspect ratio is 4/3, the |
671 * corresponding float value should be 1.33333. If not specified, the | 707 /// corresponding float value should be 1.33333. If not specified, the |
672 * best possible crop is returned. The number of provided aspect ratios is | 708 /// best possible crop is returned. The number of provided aspect ratios is |
673 * limited to a maximum of 16; any aspect ratios provided after the 16th are | 709 /// limited to a maximum of 16; any aspect ratios provided after the 16th are |
674 * ignored. | 710 /// ignored. |
675 */ | |
676 core.List<core.double> aspectRatios; | 711 core.List<core.double> aspectRatios; |
677 | 712 |
678 CropHintsParams(); | 713 CropHintsParams(); |
679 | 714 |
680 CropHintsParams.fromJson(core.Map _json) { | 715 CropHintsParams.fromJson(core.Map _json) { |
681 if (_json.containsKey("aspectRatios")) { | 716 if (_json.containsKey("aspectRatios")) { |
682 aspectRatios = _json["aspectRatios"]; | 717 aspectRatios = _json["aspectRatios"]; |
683 } | 718 } |
684 } | 719 } |
685 | 720 |
686 core.Map<core.String, core.Object> toJson() { | 721 core.Map<core.String, core.Object> toJson() { |
687 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 722 final core.Map<core.String, core.Object> _json = |
| 723 new core.Map<core.String, core.Object>(); |
688 if (aspectRatios != null) { | 724 if (aspectRatios != null) { |
689 _json["aspectRatios"] = aspectRatios; | 725 _json["aspectRatios"] = aspectRatios; |
690 } | 726 } |
691 return _json; | 727 return _json; |
692 } | 728 } |
693 } | 729 } |
694 | 730 |
695 /** Detected start or end of a structural component. */ | 731 /// Detected start or end of a structural component. |
696 class DetectedBreak { | 732 class DetectedBreak { |
697 /** True if break prepends the element. */ | 733 /// True if break prepends the element. |
698 core.bool isPrefix; | 734 core.bool isPrefix; |
699 /** | 735 |
700 * Detected break type. | 736 /// Detected break type. |
701 * Possible string values are: | 737 /// Possible string values are: |
702 * - "UNKNOWN" : Unknown break label type. | 738 /// - "UNKNOWN" : Unknown break label type. |
703 * - "SPACE" : Regular space. | 739 /// - "SPACE" : Regular space. |
704 * - "SURE_SPACE" : Sure space (very wide). | 740 /// - "SURE_SPACE" : Sure space (very wide). |
705 * - "EOL_SURE_SPACE" : Line-wrapping break. | 741 /// - "EOL_SURE_SPACE" : Line-wrapping break. |
706 * - "HYPHEN" : End-line hyphen that is not present in text; does not co-occur | 742 /// - "HYPHEN" : End-line hyphen that is not present in text; does not |
707 * with | 743 /// co-occur with |
708 * `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. | 744 /// `SPACE`, `LEADER_SPACE`, or `LINE_BREAK`. |
709 * - "LINE_BREAK" : Line break that ends a paragraph. | 745 /// - "LINE_BREAK" : Line break that ends a paragraph. |
710 */ | |
711 core.String type; | 746 core.String type; |
712 | 747 |
713 DetectedBreak(); | 748 DetectedBreak(); |
714 | 749 |
715 DetectedBreak.fromJson(core.Map _json) { | 750 DetectedBreak.fromJson(core.Map _json) { |
716 if (_json.containsKey("isPrefix")) { | 751 if (_json.containsKey("isPrefix")) { |
717 isPrefix = _json["isPrefix"]; | 752 isPrefix = _json["isPrefix"]; |
718 } | 753 } |
719 if (_json.containsKey("type")) { | 754 if (_json.containsKey("type")) { |
720 type = _json["type"]; | 755 type = _json["type"]; |
721 } | 756 } |
722 } | 757 } |
723 | 758 |
724 core.Map<core.String, core.Object> toJson() { | 759 core.Map<core.String, core.Object> toJson() { |
725 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 760 final core.Map<core.String, core.Object> _json = |
| 761 new core.Map<core.String, core.Object>(); |
726 if (isPrefix != null) { | 762 if (isPrefix != null) { |
727 _json["isPrefix"] = isPrefix; | 763 _json["isPrefix"] = isPrefix; |
728 } | 764 } |
729 if (type != null) { | 765 if (type != null) { |
730 _json["type"] = type; | 766 _json["type"] = type; |
731 } | 767 } |
732 return _json; | 768 return _json; |
733 } | 769 } |
734 } | 770 } |
735 | 771 |
736 /** Detected language for a structural component. */ | 772 /// Detected language for a structural component. |
737 class DetectedLanguage { | 773 class DetectedLanguage { |
738 /** Confidence of detected language. Range [0, 1]. */ | 774 /// Confidence of detected language. Range [0, 1]. |
739 core.double confidence; | 775 core.double confidence; |
740 /** | 776 |
741 * The BCP-47 language code, such as "en-US" or "sr-Latn". For more | 777 /// The BCP-47 language code, such as "en-US" or "sr-Latn". For more |
742 * information, see | 778 /// information, see |
743 * http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. | 779 /// http://www.unicode.org/reports/tr35/#Unicode_locale_identifier. |
744 */ | |
745 core.String languageCode; | 780 core.String languageCode; |
746 | 781 |
747 DetectedLanguage(); | 782 DetectedLanguage(); |
748 | 783 |
749 DetectedLanguage.fromJson(core.Map _json) { | 784 DetectedLanguage.fromJson(core.Map _json) { |
750 if (_json.containsKey("confidence")) { | 785 if (_json.containsKey("confidence")) { |
751 confidence = _json["confidence"]; | 786 confidence = _json["confidence"]; |
752 } | 787 } |
753 if (_json.containsKey("languageCode")) { | 788 if (_json.containsKey("languageCode")) { |
754 languageCode = _json["languageCode"]; | 789 languageCode = _json["languageCode"]; |
755 } | 790 } |
756 } | 791 } |
757 | 792 |
758 core.Map<core.String, core.Object> toJson() { | 793 core.Map<core.String, core.Object> toJson() { |
759 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 794 final core.Map<core.String, core.Object> _json = |
| 795 new core.Map<core.String, core.Object>(); |
760 if (confidence != null) { | 796 if (confidence != null) { |
761 _json["confidence"] = confidence; | 797 _json["confidence"] = confidence; |
762 } | 798 } |
763 if (languageCode != null) { | 799 if (languageCode != null) { |
764 _json["languageCode"] = languageCode; | 800 _json["languageCode"] = languageCode; |
765 } | 801 } |
766 return _json; | 802 return _json; |
767 } | 803 } |
768 } | 804 } |
769 | 805 |
770 /** Set of dominant colors and their corresponding scores. */ | 806 /// Set of dominant colors and their corresponding scores. |
771 class DominantColorsAnnotation { | 807 class DominantColorsAnnotation { |
772 /** RGB color values with their score and pixel fraction. */ | 808 /// RGB color values with their score and pixel fraction. |
773 core.List<ColorInfo> colors; | 809 core.List<ColorInfo> colors; |
774 | 810 |
775 DominantColorsAnnotation(); | 811 DominantColorsAnnotation(); |
776 | 812 |
777 DominantColorsAnnotation.fromJson(core.Map _json) { | 813 DominantColorsAnnotation.fromJson(core.Map _json) { |
778 if (_json.containsKey("colors")) { | 814 if (_json.containsKey("colors")) { |
779 colors = _json["colors"].map((value) => new ColorInfo.fromJson(value)).toL
ist(); | 815 colors = _json["colors"] |
| 816 .map((value) => new ColorInfo.fromJson(value)) |
| 817 .toList(); |
780 } | 818 } |
781 } | 819 } |
782 | 820 |
783 core.Map<core.String, core.Object> toJson() { | 821 core.Map<core.String, core.Object> toJson() { |
784 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 822 final core.Map<core.String, core.Object> _json = |
| 823 new core.Map<core.String, core.Object>(); |
785 if (colors != null) { | 824 if (colors != null) { |
786 _json["colors"] = colors.map((value) => (value).toJson()).toList(); | 825 _json["colors"] = colors.map((value) => (value).toJson()).toList(); |
787 } | 826 } |
788 return _json; | 827 return _json; |
789 } | 828 } |
790 } | 829 } |
791 | 830 |
792 /** Set of detected entity features. */ | 831 /// Set of detected entity features. |
793 class EntityAnnotation { | 832 class EntityAnnotation { |
794 /** | 833 /// Image region to which this entity belongs. Not produced |
795 * Image region to which this entity belongs. Not produced | 834 /// for `LABEL_DETECTION` features. |
796 * for `LABEL_DETECTION` features. | |
797 */ | |
798 BoundingPoly boundingPoly; | 835 BoundingPoly boundingPoly; |
799 /** | 836 |
800 * The accuracy of the entity detection in an image. | 837 /// The accuracy of the entity detection in an image. |
801 * For example, for an image in which the "Eiffel Tower" entity is detected, | 838 /// For example, for an image in which the "Eiffel Tower" entity is detected, |
802 * this field represents the confidence that there is a tower in the query | 839 /// this field represents the confidence that there is a tower in the query |
803 * image. Range [0, 1]. | 840 /// image. Range [0, 1]. |
804 */ | |
805 core.double confidence; | 841 core.double confidence; |
806 /** Entity textual description, expressed in its `locale` language. */ | 842 |
| 843 /// Entity textual description, expressed in its `locale` language. |
807 core.String description; | 844 core.String description; |
808 /** | 845 |
809 * The language code for the locale in which the entity textual | 846 /// The language code for the locale in which the entity textual |
810 * `description` is expressed. | 847 /// `description` is expressed. |
811 */ | |
812 core.String locale; | 848 core.String locale; |
813 /** | 849 |
814 * The location information for the detected entity. Multiple | 850 /// The location information for the detected entity. Multiple |
815 * `LocationInfo` elements can be present because one location may | 851 /// `LocationInfo` elements can be present because one location may |
816 * indicate the location of the scene in the image, and another location | 852 /// indicate the location of the scene in the image, and another location |
817 * may indicate the location of the place where the image was taken. | 853 /// may indicate the location of the place where the image was taken. |
818 * Location information is usually present for landmarks. | 854 /// Location information is usually present for landmarks. |
819 */ | |
820 core.List<LocationInfo> locations; | 855 core.List<LocationInfo> locations; |
821 /** | 856 |
822 * Opaque entity ID. Some IDs may be available in | 857 /// Opaque entity ID. Some IDs may be available in |
823 * [Google Knowledge Graph Search | 858 /// [Google Knowledge Graph Search |
824 * API](https://developers.google.com/knowledge-graph/). | 859 /// API](https://developers.google.com/knowledge-graph/). |
825 */ | |
826 core.String mid; | 860 core.String mid; |
827 /** | 861 |
828 * Some entities may have optional user-supplied `Property` (name/value) | 862 /// Some entities may have optional user-supplied `Property` (name/value) |
829 * fields, such a score or string that qualifies the entity. | 863 /// fields, such a score or string that qualifies the entity. |
830 */ | |
831 core.List<Property> properties; | 864 core.List<Property> properties; |
832 /** Overall score of the result. Range [0, 1]. */ | 865 |
| 866 /// Overall score of the result. Range [0, 1]. |
833 core.double score; | 867 core.double score; |
834 /** | 868 |
835 * The relevancy of the ICA (Image Content Annotation) label to the | 869 /// The relevancy of the ICA (Image Content Annotation) label to the |
836 * image. For example, the relevancy of "tower" is likely higher to an image | 870 /// image. For example, the relevancy of "tower" is likely higher to an image |
837 * containing the detected "Eiffel Tower" than to an image containing a | 871 /// containing the detected "Eiffel Tower" than to an image containing a |
838 * detected distant towering building, even though the confidence that | 872 /// detected distant towering building, even though the confidence that |
839 * there is a tower in each image may be the same. Range [0, 1]. | 873 /// there is a tower in each image may be the same. Range [0, 1]. |
840 */ | |
841 core.double topicality; | 874 core.double topicality; |
842 | 875 |
843 EntityAnnotation(); | 876 EntityAnnotation(); |
844 | 877 |
845 EntityAnnotation.fromJson(core.Map _json) { | 878 EntityAnnotation.fromJson(core.Map _json) { |
846 if (_json.containsKey("boundingPoly")) { | 879 if (_json.containsKey("boundingPoly")) { |
847 boundingPoly = new BoundingPoly.fromJson(_json["boundingPoly"]); | 880 boundingPoly = new BoundingPoly.fromJson(_json["boundingPoly"]); |
848 } | 881 } |
849 if (_json.containsKey("confidence")) { | 882 if (_json.containsKey("confidence")) { |
850 confidence = _json["confidence"]; | 883 confidence = _json["confidence"]; |
851 } | 884 } |
852 if (_json.containsKey("description")) { | 885 if (_json.containsKey("description")) { |
853 description = _json["description"]; | 886 description = _json["description"]; |
854 } | 887 } |
855 if (_json.containsKey("locale")) { | 888 if (_json.containsKey("locale")) { |
856 locale = _json["locale"]; | 889 locale = _json["locale"]; |
857 } | 890 } |
858 if (_json.containsKey("locations")) { | 891 if (_json.containsKey("locations")) { |
859 locations = _json["locations"].map((value) => new LocationInfo.fromJson(va
lue)).toList(); | 892 locations = _json["locations"] |
| 893 .map((value) => new LocationInfo.fromJson(value)) |
| 894 .toList(); |
860 } | 895 } |
861 if (_json.containsKey("mid")) { | 896 if (_json.containsKey("mid")) { |
862 mid = _json["mid"]; | 897 mid = _json["mid"]; |
863 } | 898 } |
864 if (_json.containsKey("properties")) { | 899 if (_json.containsKey("properties")) { |
865 properties = _json["properties"].map((value) => new Property.fromJson(valu
e)).toList(); | 900 properties = _json["properties"] |
| 901 .map((value) => new Property.fromJson(value)) |
| 902 .toList(); |
866 } | 903 } |
867 if (_json.containsKey("score")) { | 904 if (_json.containsKey("score")) { |
868 score = _json["score"]; | 905 score = _json["score"]; |
869 } | 906 } |
870 if (_json.containsKey("topicality")) { | 907 if (_json.containsKey("topicality")) { |
871 topicality = _json["topicality"]; | 908 topicality = _json["topicality"]; |
872 } | 909 } |
873 } | 910 } |
874 | 911 |
875 core.Map<core.String, core.Object> toJson() { | 912 core.Map<core.String, core.Object> toJson() { |
876 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 913 final core.Map<core.String, core.Object> _json = |
| 914 new core.Map<core.String, core.Object>(); |
877 if (boundingPoly != null) { | 915 if (boundingPoly != null) { |
878 _json["boundingPoly"] = (boundingPoly).toJson(); | 916 _json["boundingPoly"] = (boundingPoly).toJson(); |
879 } | 917 } |
880 if (confidence != null) { | 918 if (confidence != null) { |
881 _json["confidence"] = confidence; | 919 _json["confidence"] = confidence; |
882 } | 920 } |
883 if (description != null) { | 921 if (description != null) { |
884 _json["description"] = description; | 922 _json["description"] = description; |
885 } | 923 } |
886 if (locale != null) { | 924 if (locale != null) { |
887 _json["locale"] = locale; | 925 _json["locale"] = locale; |
888 } | 926 } |
889 if (locations != null) { | 927 if (locations != null) { |
890 _json["locations"] = locations.map((value) => (value).toJson()).toList(); | 928 _json["locations"] = locations.map((value) => (value).toJson()).toList(); |
891 } | 929 } |
892 if (mid != null) { | 930 if (mid != null) { |
893 _json["mid"] = mid; | 931 _json["mid"] = mid; |
894 } | 932 } |
895 if (properties != null) { | 933 if (properties != null) { |
896 _json["properties"] = properties.map((value) => (value).toJson()).toList()
; | 934 _json["properties"] = |
| 935 properties.map((value) => (value).toJson()).toList(); |
897 } | 936 } |
898 if (score != null) { | 937 if (score != null) { |
899 _json["score"] = score; | 938 _json["score"] = score; |
900 } | 939 } |
901 if (topicality != null) { | 940 if (topicality != null) { |
902 _json["topicality"] = topicality; | 941 _json["topicality"] = topicality; |
903 } | 942 } |
904 return _json; | 943 return _json; |
905 } | 944 } |
906 } | 945 } |
907 | 946 |
908 /** A face annotation object contains the results of face detection. */ | 947 /// A face annotation object contains the results of face detection. |
909 class FaceAnnotation { | 948 class FaceAnnotation { |
910 /** | 949 /// Anger likelihood. |
911 * Anger likelihood. | 950 /// Possible string values are: |
912 * Possible string values are: | 951 /// - "UNKNOWN" : Unknown likelihood. |
913 * - "UNKNOWN" : Unknown likelihood. | 952 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
914 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 953 /// specified vertical. |
915 * specified vertical. | 954 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
916 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 955 /// vertical. |
917 * vertical. | 956 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
918 * - "POSSIBLE" : It is possible that the image belongs to the specified | 957 /// vertical. |
919 * vertical. | 958 /// - "LIKELY" : It is likely that the image belongs to the specified |
920 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 959 /// vertical. |
921 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 960 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
922 * vertical. | 961 /// specified vertical. |
923 */ | |
924 core.String angerLikelihood; | 962 core.String angerLikelihood; |
925 /** | 963 |
926 * Blurred likelihood. | 964 /// Blurred likelihood. |
927 * Possible string values are: | 965 /// Possible string values are: |
928 * - "UNKNOWN" : Unknown likelihood. | 966 /// - "UNKNOWN" : Unknown likelihood. |
929 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 967 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
930 * specified vertical. | 968 /// specified vertical. |
931 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 969 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
932 * vertical. | 970 /// vertical. |
933 * - "POSSIBLE" : It is possible that the image belongs to the specified | 971 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
934 * vertical. | 972 /// vertical. |
935 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 973 /// - "LIKELY" : It is likely that the image belongs to the specified |
936 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 974 /// vertical. |
937 * vertical. | 975 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
938 */ | 976 /// specified vertical. |
939 core.String blurredLikelihood; | 977 core.String blurredLikelihood; |
940 /** | 978 |
941 * The bounding polygon around the face. The coordinates of the bounding box | 979 /// The bounding polygon around the face. The coordinates of the bounding box |
942 * are in the original image's scale, as returned in `ImageParams`. | 980 /// are in the original image's scale, as returned in `ImageParams`. |
943 * The bounding box is computed to "frame" the face in accordance with human | 981 /// The bounding box is computed to "frame" the face in accordance with human |
944 * expectations. It is based on the landmarker results. | 982 /// expectations. It is based on the landmarker results. |
945 * Note that one or more x and/or y coordinates may not be generated in the | 983 /// Note that one or more x and/or y coordinates may not be generated in the |
946 * `BoundingPoly` (the polygon will be unbounded) if only a partial face | 984 /// `BoundingPoly` (the polygon will be unbounded) if only a partial face |
947 * appears in the image to be annotated. | 985 /// appears in the image to be annotated. |
948 */ | |
949 BoundingPoly boundingPoly; | 986 BoundingPoly boundingPoly; |
950 /** Detection confidence. Range [0, 1]. */ | 987 |
| 988 /// Detection confidence. Range [0, 1]. |
951 core.double detectionConfidence; | 989 core.double detectionConfidence; |
952 /** | 990 |
953 * The `fd_bounding_poly` bounding polygon is tighter than the | 991 /// The `fd_bounding_poly` bounding polygon is tighter than the |
954 * `boundingPoly`, and encloses only the skin part of the face. Typically, it | 992 /// `boundingPoly`, and encloses only the skin part of the face. Typically, |
955 * is used to eliminate the face from any image analysis that detects the | 993 /// it |
956 * "amount of skin" visible in an image. It is not based on the | 994 /// is used to eliminate the face from any image analysis that detects the |
957 * landmarker results, only on the initial face detection, hence | 995 /// "amount of skin" visible in an image. It is not based on the |
958 * the <code>fd</code> (face detection) prefix. | 996 /// landmarker results, only on the initial face detection, hence |
959 */ | 997 /// the <code>fd</code> (face detection) prefix. |
960 BoundingPoly fdBoundingPoly; | 998 BoundingPoly fdBoundingPoly; |
961 /** | 999 |
962 * Headwear likelihood. | 1000 /// Headwear likelihood. |
963 * Possible string values are: | 1001 /// Possible string values are: |
964 * - "UNKNOWN" : Unknown likelihood. | 1002 /// - "UNKNOWN" : Unknown likelihood. |
965 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1003 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
966 * specified vertical. | 1004 /// specified vertical. |
967 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1005 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
968 * vertical. | 1006 /// vertical. |
969 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1007 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
970 * vertical. | 1008 /// vertical. |
971 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1009 /// - "LIKELY" : It is likely that the image belongs to the specified |
972 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1010 /// vertical. |
973 * vertical. | 1011 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
974 */ | 1012 /// specified vertical. |
975 core.String headwearLikelihood; | 1013 core.String headwearLikelihood; |
976 /** | 1014 |
977 * Joy likelihood. | 1015 /// Joy likelihood. |
978 * Possible string values are: | 1016 /// Possible string values are: |
979 * - "UNKNOWN" : Unknown likelihood. | 1017 /// - "UNKNOWN" : Unknown likelihood. |
980 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1018 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
981 * specified vertical. | 1019 /// specified vertical. |
982 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1020 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
983 * vertical. | 1021 /// vertical. |
984 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1022 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
985 * vertical. | 1023 /// vertical. |
986 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1024 /// - "LIKELY" : It is likely that the image belongs to the specified |
987 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1025 /// vertical. |
988 * vertical. | 1026 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
989 */ | 1027 /// specified vertical. |
990 core.String joyLikelihood; | 1028 core.String joyLikelihood; |
991 /** Face landmarking confidence. Range [0, 1]. */ | 1029 |
| 1030 /// Face landmarking confidence. Range [0, 1]. |
992 core.double landmarkingConfidence; | 1031 core.double landmarkingConfidence; |
993 /** Detected face landmarks. */ | 1032 |
| 1033 /// Detected face landmarks. |
994 core.List<Landmark> landmarks; | 1034 core.List<Landmark> landmarks; |
995 /** | 1035 |
996 * Yaw angle, which indicates the leftward/rightward angle that the face is | 1036 /// Yaw angle, which indicates the leftward/rightward angle that the face is |
997 * pointing relative to the vertical plane perpendicular to the image. Range | 1037 /// pointing relative to the vertical plane perpendicular to the image. Range |
998 * [-180,180]. | 1038 /// [-180,180]. |
999 */ | |
1000 core.double panAngle; | 1039 core.double panAngle; |
1001 /** | 1040 |
1002 * Roll angle, which indicates the amount of clockwise/anti-clockwise rotation | 1041 /// Roll angle, which indicates the amount of clockwise/anti-clockwise |
1003 * of the face relative to the image vertical about the axis perpendicular to | 1042 /// rotation |
1004 * the face. Range [-180,180]. | 1043 /// of the face relative to the image vertical about the axis perpendicular |
1005 */ | 1044 /// to |
| 1045 /// the face. Range [-180,180]. |
1006 core.double rollAngle; | 1046 core.double rollAngle; |
1007 /** | 1047 |
1008 * Sorrow likelihood. | 1048 /// Sorrow likelihood. |
1009 * Possible string values are: | 1049 /// Possible string values are: |
1010 * - "UNKNOWN" : Unknown likelihood. | 1050 /// - "UNKNOWN" : Unknown likelihood. |
1011 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1051 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1012 * specified vertical. | 1052 /// specified vertical. |
1013 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1053 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1014 * vertical. | 1054 /// vertical. |
1015 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1055 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1016 * vertical. | 1056 /// vertical. |
1017 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1057 /// - "LIKELY" : It is likely that the image belongs to the specified |
1018 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1058 /// vertical. |
1019 * vertical. | 1059 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1020 */ | 1060 /// specified vertical. |
1021 core.String sorrowLikelihood; | 1061 core.String sorrowLikelihood; |
1022 /** | 1062 |
1023 * Surprise likelihood. | 1063 /// Surprise likelihood. |
1024 * Possible string values are: | 1064 /// Possible string values are: |
1025 * - "UNKNOWN" : Unknown likelihood. | 1065 /// - "UNKNOWN" : Unknown likelihood. |
1026 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1066 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1027 * specified vertical. | 1067 /// specified vertical. |
1028 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1068 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1029 * vertical. | 1069 /// vertical. |
1030 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1070 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1031 * vertical. | 1071 /// vertical. |
1032 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1072 /// - "LIKELY" : It is likely that the image belongs to the specified |
1033 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1073 /// vertical. |
1034 * vertical. | 1074 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1035 */ | 1075 /// specified vertical. |
1036 core.String surpriseLikelihood; | 1076 core.String surpriseLikelihood; |
1037 /** | 1077 |
1038 * Pitch angle, which indicates the upwards/downwards angle that the face is | 1078 /// Pitch angle, which indicates the upwards/downwards angle that the face is |
1039 * pointing relative to the image's horizontal plane. Range [-180,180]. | 1079 /// pointing relative to the image's horizontal plane. Range [-180,180]. |
1040 */ | |
1041 core.double tiltAngle; | 1080 core.double tiltAngle; |
1042 /** | 1081 |
1043 * Under-exposed likelihood. | 1082 /// Under-exposed likelihood. |
1044 * Possible string values are: | 1083 /// Possible string values are: |
1045 * - "UNKNOWN" : Unknown likelihood. | 1084 /// - "UNKNOWN" : Unknown likelihood. |
1046 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1085 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1047 * specified vertical. | 1086 /// specified vertical. |
1048 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1087 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1049 * vertical. | 1088 /// vertical. |
1050 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1089 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1051 * vertical. | 1090 /// vertical. |
1052 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1091 /// - "LIKELY" : It is likely that the image belongs to the specified |
1053 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1092 /// vertical. |
1054 * vertical. | 1093 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1055 */ | 1094 /// specified vertical. |
1056 core.String underExposedLikelihood; | 1095 core.String underExposedLikelihood; |
1057 | 1096 |
1058 FaceAnnotation(); | 1097 FaceAnnotation(); |
1059 | 1098 |
1060 FaceAnnotation.fromJson(core.Map _json) { | 1099 FaceAnnotation.fromJson(core.Map _json) { |
1061 if (_json.containsKey("angerLikelihood")) { | 1100 if (_json.containsKey("angerLikelihood")) { |
1062 angerLikelihood = _json["angerLikelihood"]; | 1101 angerLikelihood = _json["angerLikelihood"]; |
1063 } | 1102 } |
1064 if (_json.containsKey("blurredLikelihood")) { | 1103 if (_json.containsKey("blurredLikelihood")) { |
1065 blurredLikelihood = _json["blurredLikelihood"]; | 1104 blurredLikelihood = _json["blurredLikelihood"]; |
(...skipping 10 matching lines...) Expand all Loading... |
1076 if (_json.containsKey("headwearLikelihood")) { | 1115 if (_json.containsKey("headwearLikelihood")) { |
1077 headwearLikelihood = _json["headwearLikelihood"]; | 1116 headwearLikelihood = _json["headwearLikelihood"]; |
1078 } | 1117 } |
1079 if (_json.containsKey("joyLikelihood")) { | 1118 if (_json.containsKey("joyLikelihood")) { |
1080 joyLikelihood = _json["joyLikelihood"]; | 1119 joyLikelihood = _json["joyLikelihood"]; |
1081 } | 1120 } |
1082 if (_json.containsKey("landmarkingConfidence")) { | 1121 if (_json.containsKey("landmarkingConfidence")) { |
1083 landmarkingConfidence = _json["landmarkingConfidence"]; | 1122 landmarkingConfidence = _json["landmarkingConfidence"]; |
1084 } | 1123 } |
1085 if (_json.containsKey("landmarks")) { | 1124 if (_json.containsKey("landmarks")) { |
1086 landmarks = _json["landmarks"].map((value) => new Landmark.fromJson(value)
).toList(); | 1125 landmarks = _json["landmarks"] |
| 1126 .map((value) => new Landmark.fromJson(value)) |
| 1127 .toList(); |
1087 } | 1128 } |
1088 if (_json.containsKey("panAngle")) { | 1129 if (_json.containsKey("panAngle")) { |
1089 panAngle = _json["panAngle"]; | 1130 panAngle = _json["panAngle"]; |
1090 } | 1131 } |
1091 if (_json.containsKey("rollAngle")) { | 1132 if (_json.containsKey("rollAngle")) { |
1092 rollAngle = _json["rollAngle"]; | 1133 rollAngle = _json["rollAngle"]; |
1093 } | 1134 } |
1094 if (_json.containsKey("sorrowLikelihood")) { | 1135 if (_json.containsKey("sorrowLikelihood")) { |
1095 sorrowLikelihood = _json["sorrowLikelihood"]; | 1136 sorrowLikelihood = _json["sorrowLikelihood"]; |
1096 } | 1137 } |
1097 if (_json.containsKey("surpriseLikelihood")) { | 1138 if (_json.containsKey("surpriseLikelihood")) { |
1098 surpriseLikelihood = _json["surpriseLikelihood"]; | 1139 surpriseLikelihood = _json["surpriseLikelihood"]; |
1099 } | 1140 } |
1100 if (_json.containsKey("tiltAngle")) { | 1141 if (_json.containsKey("tiltAngle")) { |
1101 tiltAngle = _json["tiltAngle"]; | 1142 tiltAngle = _json["tiltAngle"]; |
1102 } | 1143 } |
1103 if (_json.containsKey("underExposedLikelihood")) { | 1144 if (_json.containsKey("underExposedLikelihood")) { |
1104 underExposedLikelihood = _json["underExposedLikelihood"]; | 1145 underExposedLikelihood = _json["underExposedLikelihood"]; |
1105 } | 1146 } |
1106 } | 1147 } |
1107 | 1148 |
1108 core.Map<core.String, core.Object> toJson() { | 1149 core.Map<core.String, core.Object> toJson() { |
1109 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1150 final core.Map<core.String, core.Object> _json = |
| 1151 new core.Map<core.String, core.Object>(); |
1110 if (angerLikelihood != null) { | 1152 if (angerLikelihood != null) { |
1111 _json["angerLikelihood"] = angerLikelihood; | 1153 _json["angerLikelihood"] = angerLikelihood; |
1112 } | 1154 } |
1113 if (blurredLikelihood != null) { | 1155 if (blurredLikelihood != null) { |
1114 _json["blurredLikelihood"] = blurredLikelihood; | 1156 _json["blurredLikelihood"] = blurredLikelihood; |
1115 } | 1157 } |
1116 if (boundingPoly != null) { | 1158 if (boundingPoly != null) { |
1117 _json["boundingPoly"] = (boundingPoly).toJson(); | 1159 _json["boundingPoly"] = (boundingPoly).toJson(); |
1118 } | 1160 } |
1119 if (detectionConfidence != null) { | 1161 if (detectionConfidence != null) { |
(...skipping 29 matching lines...) Expand all Loading... |
1149 if (tiltAngle != null) { | 1191 if (tiltAngle != null) { |
1150 _json["tiltAngle"] = tiltAngle; | 1192 _json["tiltAngle"] = tiltAngle; |
1151 } | 1193 } |
1152 if (underExposedLikelihood != null) { | 1194 if (underExposedLikelihood != null) { |
1153 _json["underExposedLikelihood"] = underExposedLikelihood; | 1195 _json["underExposedLikelihood"] = underExposedLikelihood; |
1154 } | 1196 } |
1155 return _json; | 1197 return _json; |
1156 } | 1198 } |
1157 } | 1199 } |
1158 | 1200 |
1159 /** | 1201 /// Users describe the type of Google Cloud Vision API tasks to perform over |
1160 * Users describe the type of Google Cloud Vision API tasks to perform over | 1202 /// images by using *Feature*s. Each Feature indicates a type of image |
1161 * images by using *Feature*s. Each Feature indicates a type of image | 1203 /// detection task to perform. Features encode the Cloud Vision API |
1162 * detection task to perform. Features encode the Cloud Vision API | 1204 /// vertical to operate on and the number of top-scoring results to return. |
1163 * vertical to operate on and the number of top-scoring results to return. | |
1164 */ | |
1165 class Feature { | 1205 class Feature { |
1166 /** Maximum number of results of this type. */ | 1206 /// Maximum number of results of this type. |
1167 core.int maxResults; | 1207 core.int maxResults; |
1168 /** | 1208 |
1169 * The feature type. | 1209 /// The feature type. |
1170 * Possible string values are: | 1210 /// Possible string values are: |
1171 * - "TYPE_UNSPECIFIED" : Unspecified feature type. | 1211 /// - "TYPE_UNSPECIFIED" : Unspecified feature type. |
1172 * - "FACE_DETECTION" : Run face detection. | 1212 /// - "FACE_DETECTION" : Run face detection. |
1173 * - "LANDMARK_DETECTION" : Run landmark detection. | 1213 /// - "LANDMARK_DETECTION" : Run landmark detection. |
1174 * - "LOGO_DETECTION" : Run logo detection. | 1214 /// - "LOGO_DETECTION" : Run logo detection. |
1175 * - "LABEL_DETECTION" : Run label detection. | 1215 /// - "LABEL_DETECTION" : Run label detection. |
1176 * - "TEXT_DETECTION" : Run OCR. | 1216 /// - "TEXT_DETECTION" : Run OCR. |
1177 * - "DOCUMENT_TEXT_DETECTION" : Run dense text document OCR. Takes precedence | 1217 /// - "DOCUMENT_TEXT_DETECTION" : Run dense text document OCR. Takes |
1178 * when both | 1218 /// precedence when both |
1179 * DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. | 1219 /// DOCUMENT_TEXT_DETECTION and TEXT_DETECTION are present. |
1180 * - "SAFE_SEARCH_DETECTION" : Run computer vision models to compute image | 1220 /// - "SAFE_SEARCH_DETECTION" : Run computer vision models to compute image |
1181 * safe-search properties. | 1221 /// safe-search properties. |
1182 * - "IMAGE_PROPERTIES" : Compute a set of image properties, such as the | 1222 /// - "IMAGE_PROPERTIES" : Compute a set of image properties, such as the |
1183 * image's dominant colors. | 1223 /// image's dominant colors. |
1184 * - "CROP_HINTS" : Run crop hints. | 1224 /// - "CROP_HINTS" : Run crop hints. |
1185 * - "WEB_DETECTION" : Run web detection. | 1225 /// - "WEB_DETECTION" : Run web detection. |
1186 */ | |
1187 core.String type; | 1226 core.String type; |
1188 | 1227 |
1189 Feature(); | 1228 Feature(); |
1190 | 1229 |
1191 Feature.fromJson(core.Map _json) { | 1230 Feature.fromJson(core.Map _json) { |
1192 if (_json.containsKey("maxResults")) { | 1231 if (_json.containsKey("maxResults")) { |
1193 maxResults = _json["maxResults"]; | 1232 maxResults = _json["maxResults"]; |
1194 } | 1233 } |
1195 if (_json.containsKey("type")) { | 1234 if (_json.containsKey("type")) { |
1196 type = _json["type"]; | 1235 type = _json["type"]; |
1197 } | 1236 } |
1198 } | 1237 } |
1199 | 1238 |
1200 core.Map<core.String, core.Object> toJson() { | 1239 core.Map<core.String, core.Object> toJson() { |
1201 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1240 final core.Map<core.String, core.Object> _json = |
| 1241 new core.Map<core.String, core.Object>(); |
1202 if (maxResults != null) { | 1242 if (maxResults != null) { |
1203 _json["maxResults"] = maxResults; | 1243 _json["maxResults"] = maxResults; |
1204 } | 1244 } |
1205 if (type != null) { | 1245 if (type != null) { |
1206 _json["type"] = type; | 1246 _json["type"] = type; |
1207 } | 1247 } |
1208 return _json; | 1248 return _json; |
1209 } | 1249 } |
1210 } | 1250 } |
1211 | 1251 |
1212 /** Client image to perform Google Cloud Vision API tasks over. */ | 1252 /// Client image to perform Google Cloud Vision API tasks over. |
1213 class Image { | 1253 class Image { |
1214 /** | 1254 /// Image content, represented as a stream of bytes. |
1215 * Image content, represented as a stream of bytes. | 1255 /// Note: as with all `bytes` fields, protobuffers use a pure binary |
1216 * Note: as with all `bytes` fields, protobuffers use a pure binary | 1256 /// representation, whereas JSON representations use base64. |
1217 * representation, whereas JSON representations use base64. | |
1218 */ | |
1219 core.String content; | 1257 core.String content; |
1220 core.List<core.int> get contentAsBytes { | 1258 core.List<core.int> get contentAsBytes { |
1221 return convert.BASE64.decode(content); | 1259 return convert.BASE64.decode(content); |
1222 } | 1260 } |
1223 | 1261 |
1224 void set contentAsBytes(core.List<core.int> _bytes) { | 1262 void set contentAsBytes(core.List<core.int> _bytes) { |
1225 content = convert.BASE64.encode(_bytes).replaceAll("/", "_").replaceAll("+",
"-"); | 1263 content = |
| 1264 convert.BASE64.encode(_bytes).replaceAll("/", "_").replaceAll("+", "-"); |
1226 } | 1265 } |
1227 /** | 1266 |
1228 * Google Cloud Storage image location. If both `content` and `source` | 1267 /// Google Cloud Storage image location. If both `content` and `source` |
1229 * are provided for an image, `content` takes precedence and is | 1268 /// are provided for an image, `content` takes precedence and is |
1230 * used to perform the image annotation request. | 1269 /// used to perform the image annotation request. |
1231 */ | |
1232 ImageSource source; | 1270 ImageSource source; |
1233 | 1271 |
1234 Image(); | 1272 Image(); |
1235 | 1273 |
1236 Image.fromJson(core.Map _json) { | 1274 Image.fromJson(core.Map _json) { |
1237 if (_json.containsKey("content")) { | 1275 if (_json.containsKey("content")) { |
1238 content = _json["content"]; | 1276 content = _json["content"]; |
1239 } | 1277 } |
1240 if (_json.containsKey("source")) { | 1278 if (_json.containsKey("source")) { |
1241 source = new ImageSource.fromJson(_json["source"]); | 1279 source = new ImageSource.fromJson(_json["source"]); |
1242 } | 1280 } |
1243 } | 1281 } |
1244 | 1282 |
1245 core.Map<core.String, core.Object> toJson() { | 1283 core.Map<core.String, core.Object> toJson() { |
1246 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1284 final core.Map<core.String, core.Object> _json = |
| 1285 new core.Map<core.String, core.Object>(); |
1247 if (content != null) { | 1286 if (content != null) { |
1248 _json["content"] = content; | 1287 _json["content"] = content; |
1249 } | 1288 } |
1250 if (source != null) { | 1289 if (source != null) { |
1251 _json["source"] = (source).toJson(); | 1290 _json["source"] = (source).toJson(); |
1252 } | 1291 } |
1253 return _json; | 1292 return _json; |
1254 } | 1293 } |
1255 } | 1294 } |
1256 | 1295 |
1257 /** Image context and/or feature-specific parameters. */ | 1296 /// Image context and/or feature-specific parameters. |
1258 class ImageContext { | 1297 class ImageContext { |
1259 /** Parameters for crop hints annotation request. */ | 1298 /// Parameters for crop hints annotation request. |
1260 CropHintsParams cropHintsParams; | 1299 CropHintsParams cropHintsParams; |
1261 /** | 1300 |
1262 * List of languages to use for TEXT_DETECTION. In most cases, an empty value | 1301 /// List of languages to use for TEXT_DETECTION. In most cases, an empty |
1263 * yields the best results since it enables automatic language detection. For | 1302 /// value |
1264 * languages based on the Latin alphabet, setting `language_hints` is not | 1303 /// yields the best results since it enables automatic language detection. |
1265 * needed. In rare cases, when the language of the text in the image is known, | 1304 /// For |
1266 * setting a hint will help get better results (although it will be a | 1305 /// languages based on the Latin alphabet, setting `language_hints` is not |
1267 * significant hindrance if the hint is wrong). Text detection returns an | 1306 /// needed. In rare cases, when the language of the text in the image is |
1268 * error if one or more of the specified languages is not one of the | 1307 /// known, |
1269 * [supported languages](/vision/docs/languages). | 1308 /// setting a hint will help get better results (although it will be a |
1270 */ | 1309 /// significant hindrance if the hint is wrong). Text detection returns an |
| 1310 /// error if one or more of the specified languages is not one of the |
| 1311 /// [supported languages](/vision/docs/languages). |
1271 core.List<core.String> languageHints; | 1312 core.List<core.String> languageHints; |
1272 /** lat/long rectangle that specifies the location of the image. */ | 1313 |
| 1314 /// lat/long rectangle that specifies the location of the image. |
1273 LatLongRect latLongRect; | 1315 LatLongRect latLongRect; |
1274 | 1316 |
1275 ImageContext(); | 1317 ImageContext(); |
1276 | 1318 |
1277 ImageContext.fromJson(core.Map _json) { | 1319 ImageContext.fromJson(core.Map _json) { |
1278 if (_json.containsKey("cropHintsParams")) { | 1320 if (_json.containsKey("cropHintsParams")) { |
1279 cropHintsParams = new CropHintsParams.fromJson(_json["cropHintsParams"]); | 1321 cropHintsParams = new CropHintsParams.fromJson(_json["cropHintsParams"]); |
1280 } | 1322 } |
1281 if (_json.containsKey("languageHints")) { | 1323 if (_json.containsKey("languageHints")) { |
1282 languageHints = _json["languageHints"]; | 1324 languageHints = _json["languageHints"]; |
1283 } | 1325 } |
1284 if (_json.containsKey("latLongRect")) { | 1326 if (_json.containsKey("latLongRect")) { |
1285 latLongRect = new LatLongRect.fromJson(_json["latLongRect"]); | 1327 latLongRect = new LatLongRect.fromJson(_json["latLongRect"]); |
1286 } | 1328 } |
1287 } | 1329 } |
1288 | 1330 |
1289 core.Map<core.String, core.Object> toJson() { | 1331 core.Map<core.String, core.Object> toJson() { |
1290 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1332 final core.Map<core.String, core.Object> _json = |
| 1333 new core.Map<core.String, core.Object>(); |
1291 if (cropHintsParams != null) { | 1334 if (cropHintsParams != null) { |
1292 _json["cropHintsParams"] = (cropHintsParams).toJson(); | 1335 _json["cropHintsParams"] = (cropHintsParams).toJson(); |
1293 } | 1336 } |
1294 if (languageHints != null) { | 1337 if (languageHints != null) { |
1295 _json["languageHints"] = languageHints; | 1338 _json["languageHints"] = languageHints; |
1296 } | 1339 } |
1297 if (latLongRect != null) { | 1340 if (latLongRect != null) { |
1298 _json["latLongRect"] = (latLongRect).toJson(); | 1341 _json["latLongRect"] = (latLongRect).toJson(); |
1299 } | 1342 } |
1300 return _json; | 1343 return _json; |
1301 } | 1344 } |
1302 } | 1345 } |
1303 | 1346 |
1304 /** Stores image properties, such as dominant colors. */ | 1347 /// Stores image properties, such as dominant colors. |
1305 class ImageProperties { | 1348 class ImageProperties { |
1306 /** If present, dominant colors completed successfully. */ | 1349 /// If present, dominant colors completed successfully. |
1307 DominantColorsAnnotation dominantColors; | 1350 DominantColorsAnnotation dominantColors; |
1308 | 1351 |
1309 ImageProperties(); | 1352 ImageProperties(); |
1310 | 1353 |
1311 ImageProperties.fromJson(core.Map _json) { | 1354 ImageProperties.fromJson(core.Map _json) { |
1312 if (_json.containsKey("dominantColors")) { | 1355 if (_json.containsKey("dominantColors")) { |
1313 dominantColors = new DominantColorsAnnotation.fromJson(_json["dominantColo
rs"]); | 1356 dominantColors = |
| 1357 new DominantColorsAnnotation.fromJson(_json["dominantColors"]); |
1314 } | 1358 } |
1315 } | 1359 } |
1316 | 1360 |
1317 core.Map<core.String, core.Object> toJson() { | 1361 core.Map<core.String, core.Object> toJson() { |
1318 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1362 final core.Map<core.String, core.Object> _json = |
| 1363 new core.Map<core.String, core.Object>(); |
1319 if (dominantColors != null) { | 1364 if (dominantColors != null) { |
1320 _json["dominantColors"] = (dominantColors).toJson(); | 1365 _json["dominantColors"] = (dominantColors).toJson(); |
1321 } | 1366 } |
1322 return _json; | 1367 return _json; |
1323 } | 1368 } |
1324 } | 1369 } |
1325 | 1370 |
1326 /** External image source (Google Cloud Storage image location). */ | 1371 /// External image source (Google Cloud Storage image location). |
1327 class ImageSource { | 1372 class ImageSource { |
1328 /** | 1373 /// NOTE: For new code `image_uri` below is preferred. |
1329 * NOTE: For new code `image_uri` below is preferred. | 1374 /// Google Cloud Storage image URI, which must be in the following form: |
1330 * Google Cloud Storage image URI, which must be in the following form: | 1375 /// `gs://bucket_name/object_name` (for details, see |
1331 * `gs://bucket_name/object_name` (for details, see | 1376 /// [Google Cloud Storage Request |
1332 * [Google Cloud Storage Request | 1377 /// URIs](https://cloud.google.com/storage/docs/reference-uris)). |
1333 * URIs](https://cloud.google.com/storage/docs/reference-uris)). | 1378 /// NOTE: Cloud Storage object versioning is not supported. |
1334 * NOTE: Cloud Storage object versioning is not supported. | |
1335 */ | |
1336 core.String gcsImageUri; | 1379 core.String gcsImageUri; |
1337 /** | 1380 |
1338 * Image URI which supports: | 1381 /// Image URI which supports: |
1339 * 1) Google Cloud Storage image URI, which must be in the following form: | 1382 /// 1) Google Cloud Storage image URI, which must be in the following form: |
1340 * `gs://bucket_name/object_name` (for details, see | 1383 /// `gs://bucket_name/object_name` (for details, see |
1341 * [Google Cloud Storage Request | 1384 /// [Google Cloud Storage Request |
1342 * URIs](https://cloud.google.com/storage/docs/reference-uris)). | 1385 /// URIs](https://cloud.google.com/storage/docs/reference-uris)). |
1343 * NOTE: Cloud Storage object versioning is not supported. | 1386 /// NOTE: Cloud Storage object versioning is not supported. |
1344 * 2) Publicly accessible image HTTP/HTTPS URL. | 1387 /// 2) Publicly accessible image HTTP/HTTPS URL. |
1345 * This is preferred over the legacy `gcs_image_uri` above. When both | 1388 /// This is preferred over the legacy `gcs_image_uri` above. When both |
1346 * `gcs_image_uri` and `image_uri` are specified, `image_uri` takes | 1389 /// `gcs_image_uri` and `image_uri` are specified, `image_uri` takes |
1347 * precedence. | 1390 /// precedence. |
1348 */ | |
1349 core.String imageUri; | 1391 core.String imageUri; |
1350 | 1392 |
1351 ImageSource(); | 1393 ImageSource(); |
1352 | 1394 |
1353 ImageSource.fromJson(core.Map _json) { | 1395 ImageSource.fromJson(core.Map _json) { |
1354 if (_json.containsKey("gcsImageUri")) { | 1396 if (_json.containsKey("gcsImageUri")) { |
1355 gcsImageUri = _json["gcsImageUri"]; | 1397 gcsImageUri = _json["gcsImageUri"]; |
1356 } | 1398 } |
1357 if (_json.containsKey("imageUri")) { | 1399 if (_json.containsKey("imageUri")) { |
1358 imageUri = _json["imageUri"]; | 1400 imageUri = _json["imageUri"]; |
1359 } | 1401 } |
1360 } | 1402 } |
1361 | 1403 |
1362 core.Map<core.String, core.Object> toJson() { | 1404 core.Map<core.String, core.Object> toJson() { |
1363 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1405 final core.Map<core.String, core.Object> _json = |
| 1406 new core.Map<core.String, core.Object>(); |
1364 if (gcsImageUri != null) { | 1407 if (gcsImageUri != null) { |
1365 _json["gcsImageUri"] = gcsImageUri; | 1408 _json["gcsImageUri"] = gcsImageUri; |
1366 } | 1409 } |
1367 if (imageUri != null) { | 1410 if (imageUri != null) { |
1368 _json["imageUri"] = imageUri; | 1411 _json["imageUri"] = imageUri; |
1369 } | 1412 } |
1370 return _json; | 1413 return _json; |
1371 } | 1414 } |
1372 } | 1415 } |
1373 | 1416 |
1374 /** | 1417 /// A face-specific landmark (for example, a face feature). |
1375 * A face-specific landmark (for example, a face feature). | 1418 /// Landmark positions may fall outside the bounds of the image |
1376 * Landmark positions may fall outside the bounds of the image | 1419 /// if the face is near one or more edges of the image. |
1377 * if the face is near one or more edges of the image. | 1420 /// Therefore it is NOT guaranteed that `0 <= x < width` or |
1378 * Therefore it is NOT guaranteed that `0 <= x < width` or | 1421 /// `0 <= y < height`. |
1379 * `0 <= y < height`. | |
1380 */ | |
1381 class Landmark { | 1422 class Landmark { |
1382 /** Face landmark position. */ | 1423 /// Face landmark position. |
1383 Position position; | 1424 Position position; |
1384 /** | 1425 |
1385 * Face landmark type. | 1426 /// Face landmark type. |
1386 * Possible string values are: | 1427 /// Possible string values are: |
1387 * - "UNKNOWN_LANDMARK" : Unknown face landmark detected. Should not be | 1428 /// - "UNKNOWN_LANDMARK" : Unknown face landmark detected. Should not be |
1388 * filled. | 1429 /// filled. |
1389 * - "LEFT_EYE" : Left eye. | 1430 /// - "LEFT_EYE" : Left eye. |
1390 * - "RIGHT_EYE" : Right eye. | 1431 /// - "RIGHT_EYE" : Right eye. |
1391 * - "LEFT_OF_LEFT_EYEBROW" : Left of left eyebrow. | 1432 /// - "LEFT_OF_LEFT_EYEBROW" : Left of left eyebrow. |
1392 * - "RIGHT_OF_LEFT_EYEBROW" : Right of left eyebrow. | 1433 /// - "RIGHT_OF_LEFT_EYEBROW" : Right of left eyebrow. |
1393 * - "LEFT_OF_RIGHT_EYEBROW" : Left of right eyebrow. | 1434 /// - "LEFT_OF_RIGHT_EYEBROW" : Left of right eyebrow. |
1394 * - "RIGHT_OF_RIGHT_EYEBROW" : Right of right eyebrow. | 1435 /// - "RIGHT_OF_RIGHT_EYEBROW" : Right of right eyebrow. |
1395 * - "MIDPOINT_BETWEEN_EYES" : Midpoint between eyes. | 1436 /// - "MIDPOINT_BETWEEN_EYES" : Midpoint between eyes. |
1396 * - "NOSE_TIP" : Nose tip. | 1437 /// - "NOSE_TIP" : Nose tip. |
1397 * - "UPPER_LIP" : Upper lip. | 1438 /// - "UPPER_LIP" : Upper lip. |
1398 * - "LOWER_LIP" : Lower lip. | 1439 /// - "LOWER_LIP" : Lower lip. |
1399 * - "MOUTH_LEFT" : Mouth left. | 1440 /// - "MOUTH_LEFT" : Mouth left. |
1400 * - "MOUTH_RIGHT" : Mouth right. | 1441 /// - "MOUTH_RIGHT" : Mouth right. |
1401 * - "MOUTH_CENTER" : Mouth center. | 1442 /// - "MOUTH_CENTER" : Mouth center. |
1402 * - "NOSE_BOTTOM_RIGHT" : Nose, bottom right. | 1443 /// - "NOSE_BOTTOM_RIGHT" : Nose, bottom right. |
1403 * - "NOSE_BOTTOM_LEFT" : Nose, bottom left. | 1444 /// - "NOSE_BOTTOM_LEFT" : Nose, bottom left. |
1404 * - "NOSE_BOTTOM_CENTER" : Nose, bottom center. | 1445 /// - "NOSE_BOTTOM_CENTER" : Nose, bottom center. |
1405 * - "LEFT_EYE_TOP_BOUNDARY" : Left eye, top boundary. | 1446 /// - "LEFT_EYE_TOP_BOUNDARY" : Left eye, top boundary. |
1406 * - "LEFT_EYE_RIGHT_CORNER" : Left eye, right corner. | 1447 /// - "LEFT_EYE_RIGHT_CORNER" : Left eye, right corner. |
1407 * - "LEFT_EYE_BOTTOM_BOUNDARY" : Left eye, bottom boundary. | 1448 /// - "LEFT_EYE_BOTTOM_BOUNDARY" : Left eye, bottom boundary. |
1408 * - "LEFT_EYE_LEFT_CORNER" : Left eye, left corner. | 1449 /// - "LEFT_EYE_LEFT_CORNER" : Left eye, left corner. |
1409 * - "RIGHT_EYE_TOP_BOUNDARY" : Right eye, top boundary. | 1450 /// - "RIGHT_EYE_TOP_BOUNDARY" : Right eye, top boundary. |
1410 * - "RIGHT_EYE_RIGHT_CORNER" : Right eye, right corner. | 1451 /// - "RIGHT_EYE_RIGHT_CORNER" : Right eye, right corner. |
1411 * - "RIGHT_EYE_BOTTOM_BOUNDARY" : Right eye, bottom boundary. | 1452 /// - "RIGHT_EYE_BOTTOM_BOUNDARY" : Right eye, bottom boundary. |
1412 * - "RIGHT_EYE_LEFT_CORNER" : Right eye, left corner. | 1453 /// - "RIGHT_EYE_LEFT_CORNER" : Right eye, left corner. |
1413 * - "LEFT_EYEBROW_UPPER_MIDPOINT" : Left eyebrow, upper midpoint. | 1454 /// - "LEFT_EYEBROW_UPPER_MIDPOINT" : Left eyebrow, upper midpoint. |
1414 * - "RIGHT_EYEBROW_UPPER_MIDPOINT" : Right eyebrow, upper midpoint. | 1455 /// - "RIGHT_EYEBROW_UPPER_MIDPOINT" : Right eyebrow, upper midpoint. |
1415 * - "LEFT_EAR_TRAGION" : Left ear tragion. | 1456 /// - "LEFT_EAR_TRAGION" : Left ear tragion. |
1416 * - "RIGHT_EAR_TRAGION" : Right ear tragion. | 1457 /// - "RIGHT_EAR_TRAGION" : Right ear tragion. |
1417 * - "LEFT_EYE_PUPIL" : Left eye pupil. | 1458 /// - "LEFT_EYE_PUPIL" : Left eye pupil. |
1418 * - "RIGHT_EYE_PUPIL" : Right eye pupil. | 1459 /// - "RIGHT_EYE_PUPIL" : Right eye pupil. |
1419 * - "FOREHEAD_GLABELLA" : Forehead glabella. | 1460 /// - "FOREHEAD_GLABELLA" : Forehead glabella. |
1420 * - "CHIN_GNATHION" : Chin gnathion. | 1461 /// - "CHIN_GNATHION" : Chin gnathion. |
1421 * - "CHIN_LEFT_GONION" : Chin left gonion. | 1462 /// - "CHIN_LEFT_GONION" : Chin left gonion. |
1422 * - "CHIN_RIGHT_GONION" : Chin right gonion. | 1463 /// - "CHIN_RIGHT_GONION" : Chin right gonion. |
1423 */ | |
1424 core.String type; | 1464 core.String type; |
1425 | 1465 |
1426 Landmark(); | 1466 Landmark(); |
1427 | 1467 |
1428 Landmark.fromJson(core.Map _json) { | 1468 Landmark.fromJson(core.Map _json) { |
1429 if (_json.containsKey("position")) { | 1469 if (_json.containsKey("position")) { |
1430 position = new Position.fromJson(_json["position"]); | 1470 position = new Position.fromJson(_json["position"]); |
1431 } | 1471 } |
1432 if (_json.containsKey("type")) { | 1472 if (_json.containsKey("type")) { |
1433 type = _json["type"]; | 1473 type = _json["type"]; |
1434 } | 1474 } |
1435 } | 1475 } |
1436 | 1476 |
1437 core.Map<core.String, core.Object> toJson() { | 1477 core.Map<core.String, core.Object> toJson() { |
1438 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1478 final core.Map<core.String, core.Object> _json = |
| 1479 new core.Map<core.String, core.Object>(); |
1439 if (position != null) { | 1480 if (position != null) { |
1440 _json["position"] = (position).toJson(); | 1481 _json["position"] = (position).toJson(); |
1441 } | 1482 } |
1442 if (type != null) { | 1483 if (type != null) { |
1443 _json["type"] = type; | 1484 _json["type"] = type; |
1444 } | 1485 } |
1445 return _json; | 1486 return _json; |
1446 } | 1487 } |
1447 } | 1488 } |
1448 | 1489 |
1449 /** | 1490 /// An object representing a latitude/longitude pair. This is expressed as a |
1450 * An object representing a latitude/longitude pair. This is expressed as a pair | 1491 /// pair |
1451 * of doubles representing degrees latitude and degrees longitude. Unless | 1492 /// of doubles representing degrees latitude and degrees longitude. Unless |
1452 * specified otherwise, this must conform to the | 1493 /// specified otherwise, this must conform to the |
1453 * <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 | 1494 /// <a href="http://www.unoosa.org/pdf/icg/2012/template/WGS_84.pdf">WGS84 |
1454 * standard</a>. Values must be within normalized ranges. | 1495 /// standard</a>. Values must be within normalized ranges. |
1455 * | 1496 /// |
1456 * Example of normalization code in Python: | 1497 /// Example of normalization code in Python: |
1457 * | 1498 /// |
1458 * def NormalizeLongitude(longitude): | 1499 /// def NormalizeLongitude(longitude): |
1459 * """Wraps decimal degrees longitude to [-180.0, 180.0].""" | 1500 /// """Wraps decimal degrees longitude to [-180.0, 180.0].""" |
1460 * q, r = divmod(longitude, 360.0) | 1501 /// q, r = divmod(longitude, 360.0) |
1461 * if r > 180.0 or (r == 180.0 and q <= -1.0): | 1502 /// if r > 180.0 or (r == 180.0 and q <= -1.0): |
1462 * return r - 360.0 | 1503 /// return r - 360.0 |
1463 * return r | 1504 /// return r |
1464 * | 1505 /// |
1465 * def NormalizeLatLng(latitude, longitude): | 1506 /// def NormalizeLatLng(latitude, longitude): |
1466 * """Wraps decimal degrees latitude and longitude to | 1507 /// """Wraps decimal degrees latitude and longitude to |
1467 * [-90.0, 90.0] and [-180.0, 180.0], respectively.""" | 1508 /// [-90.0, 90.0] and [-180.0, 180.0], respectively.""" |
1468 * r = latitude % 360.0 | 1509 /// r = latitude % 360.0 |
1469 * if r <= 90.0: | 1510 /// if r <= 90.0: |
1470 * return r, NormalizeLongitude(longitude) | 1511 /// return r, NormalizeLongitude(longitude) |
1471 * elif r >= 270.0: | 1512 /// elif r >= 270.0: |
1472 * return r - 360, NormalizeLongitude(longitude) | 1513 /// return r - 360, NormalizeLongitude(longitude) |
1473 * else: | 1514 /// else: |
1474 * return 180 - r, NormalizeLongitude(longitude + 180.0) | 1515 /// return 180 - r, NormalizeLongitude(longitude + 180.0) |
1475 * | 1516 /// |
1476 * assert 180.0 == NormalizeLongitude(180.0) | 1517 /// assert 180.0 == NormalizeLongitude(180.0) |
1477 * assert -180.0 == NormalizeLongitude(-180.0) | 1518 /// assert -180.0 == NormalizeLongitude(-180.0) |
1478 * assert -179.0 == NormalizeLongitude(181.0) | 1519 /// assert -179.0 == NormalizeLongitude(181.0) |
1479 * assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) | 1520 /// assert (0.0, 0.0) == NormalizeLatLng(360.0, 0.0) |
1480 * assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) | 1521 /// assert (0.0, 0.0) == NormalizeLatLng(-360.0, 0.0) |
1481 * assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) | 1522 /// assert (85.0, 180.0) == NormalizeLatLng(95.0, 0.0) |
1482 * assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) | 1523 /// assert (-85.0, -170.0) == NormalizeLatLng(-95.0, 10.0) |
1483 * assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) | 1524 /// assert (90.0, 10.0) == NormalizeLatLng(90.0, 10.0) |
1484 * assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) | 1525 /// assert (-90.0, -10.0) == NormalizeLatLng(-90.0, -10.0) |
1485 * assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) | 1526 /// assert (0.0, -170.0) == NormalizeLatLng(-180.0, 10.0) |
1486 * assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) | 1527 /// assert (0.0, -170.0) == NormalizeLatLng(180.0, 10.0) |
1487 * assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) | 1528 /// assert (-90.0, 10.0) == NormalizeLatLng(270.0, 10.0) |
1488 * assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) | 1529 /// assert (90.0, 10.0) == NormalizeLatLng(-270.0, 10.0) |
1489 */ | |
1490 class LatLng { | 1530 class LatLng { |
1491 /** The latitude in degrees. It must be in the range [-90.0, +90.0]. */ | 1531 /// The latitude in degrees. It must be in the range [-90.0, +90.0]. |
1492 core.double latitude; | 1532 core.double latitude; |
1493 /** The longitude in degrees. It must be in the range [-180.0, +180.0]. */ | 1533 |
| 1534 /// The longitude in degrees. It must be in the range [-180.0, +180.0]. |
1494 core.double longitude; | 1535 core.double longitude; |
1495 | 1536 |
1496 LatLng(); | 1537 LatLng(); |
1497 | 1538 |
1498 LatLng.fromJson(core.Map _json) { | 1539 LatLng.fromJson(core.Map _json) { |
1499 if (_json.containsKey("latitude")) { | 1540 if (_json.containsKey("latitude")) { |
1500 latitude = _json["latitude"]; | 1541 latitude = _json["latitude"]; |
1501 } | 1542 } |
1502 if (_json.containsKey("longitude")) { | 1543 if (_json.containsKey("longitude")) { |
1503 longitude = _json["longitude"]; | 1544 longitude = _json["longitude"]; |
1504 } | 1545 } |
1505 } | 1546 } |
1506 | 1547 |
1507 core.Map<core.String, core.Object> toJson() { | 1548 core.Map<core.String, core.Object> toJson() { |
1508 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1549 final core.Map<core.String, core.Object> _json = |
| 1550 new core.Map<core.String, core.Object>(); |
1509 if (latitude != null) { | 1551 if (latitude != null) { |
1510 _json["latitude"] = latitude; | 1552 _json["latitude"] = latitude; |
1511 } | 1553 } |
1512 if (longitude != null) { | 1554 if (longitude != null) { |
1513 _json["longitude"] = longitude; | 1555 _json["longitude"] = longitude; |
1514 } | 1556 } |
1515 return _json; | 1557 return _json; |
1516 } | 1558 } |
1517 } | 1559 } |
1518 | 1560 |
1519 /** Rectangle determined by min and max `LatLng` pairs. */ | 1561 /// Rectangle determined by min and max `LatLng` pairs. |
1520 class LatLongRect { | 1562 class LatLongRect { |
1521 /** Max lat/long pair. */ | 1563 /// Max lat/long pair. |
1522 LatLng maxLatLng; | 1564 LatLng maxLatLng; |
1523 /** Min lat/long pair. */ | 1565 |
| 1566 /// Min lat/long pair. |
1524 LatLng minLatLng; | 1567 LatLng minLatLng; |
1525 | 1568 |
1526 LatLongRect(); | 1569 LatLongRect(); |
1527 | 1570 |
1528 LatLongRect.fromJson(core.Map _json) { | 1571 LatLongRect.fromJson(core.Map _json) { |
1529 if (_json.containsKey("maxLatLng")) { | 1572 if (_json.containsKey("maxLatLng")) { |
1530 maxLatLng = new LatLng.fromJson(_json["maxLatLng"]); | 1573 maxLatLng = new LatLng.fromJson(_json["maxLatLng"]); |
1531 } | 1574 } |
1532 if (_json.containsKey("minLatLng")) { | 1575 if (_json.containsKey("minLatLng")) { |
1533 minLatLng = new LatLng.fromJson(_json["minLatLng"]); | 1576 minLatLng = new LatLng.fromJson(_json["minLatLng"]); |
1534 } | 1577 } |
1535 } | 1578 } |
1536 | 1579 |
1537 core.Map<core.String, core.Object> toJson() { | 1580 core.Map<core.String, core.Object> toJson() { |
1538 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1581 final core.Map<core.String, core.Object> _json = |
| 1582 new core.Map<core.String, core.Object>(); |
1539 if (maxLatLng != null) { | 1583 if (maxLatLng != null) { |
1540 _json["maxLatLng"] = (maxLatLng).toJson(); | 1584 _json["maxLatLng"] = (maxLatLng).toJson(); |
1541 } | 1585 } |
1542 if (minLatLng != null) { | 1586 if (minLatLng != null) { |
1543 _json["minLatLng"] = (minLatLng).toJson(); | 1587 _json["minLatLng"] = (minLatLng).toJson(); |
1544 } | 1588 } |
1545 return _json; | 1589 return _json; |
1546 } | 1590 } |
1547 } | 1591 } |
1548 | 1592 |
1549 /** Detected entity location information. */ | 1593 /// Detected entity location information. |
1550 class LocationInfo { | 1594 class LocationInfo { |
1551 /** lat/long location coordinates. */ | 1595 /// lat/long location coordinates. |
1552 LatLng latLng; | 1596 LatLng latLng; |
1553 | 1597 |
1554 LocationInfo(); | 1598 LocationInfo(); |
1555 | 1599 |
1556 LocationInfo.fromJson(core.Map _json) { | 1600 LocationInfo.fromJson(core.Map _json) { |
1557 if (_json.containsKey("latLng")) { | 1601 if (_json.containsKey("latLng")) { |
1558 latLng = new LatLng.fromJson(_json["latLng"]); | 1602 latLng = new LatLng.fromJson(_json["latLng"]); |
1559 } | 1603 } |
1560 } | 1604 } |
1561 | 1605 |
1562 core.Map<core.String, core.Object> toJson() { | 1606 core.Map<core.String, core.Object> toJson() { |
1563 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1607 final core.Map<core.String, core.Object> _json = |
| 1608 new core.Map<core.String, core.Object>(); |
1564 if (latLng != null) { | 1609 if (latLng != null) { |
1565 _json["latLng"] = (latLng).toJson(); | 1610 _json["latLng"] = (latLng).toJson(); |
1566 } | 1611 } |
1567 return _json; | 1612 return _json; |
1568 } | 1613 } |
1569 } | 1614 } |
1570 | 1615 |
1571 /** Detected page from OCR. */ | 1616 /// Detected page from OCR. |
1572 class Page { | 1617 class Page { |
1573 /** List of blocks of text, images etc on this page. */ | 1618 /// List of blocks of text, images etc on this page. |
1574 core.List<Block> blocks; | 1619 core.List<Block> blocks; |
1575 /** Page height in pixels. */ | 1620 |
| 1621 /// Page height in pixels. |
1576 core.int height; | 1622 core.int height; |
1577 /** Additional information detected on the page. */ | 1623 |
| 1624 /// Additional information detected on the page. |
1578 TextProperty property; | 1625 TextProperty property; |
1579 /** Page width in pixels. */ | 1626 |
| 1627 /// Page width in pixels. |
1580 core.int width; | 1628 core.int width; |
1581 | 1629 |
1582 Page(); | 1630 Page(); |
1583 | 1631 |
1584 Page.fromJson(core.Map _json) { | 1632 Page.fromJson(core.Map _json) { |
1585 if (_json.containsKey("blocks")) { | 1633 if (_json.containsKey("blocks")) { |
1586 blocks = _json["blocks"].map((value) => new Block.fromJson(value)).toList(
); | 1634 blocks = |
| 1635 _json["blocks"].map((value) => new Block.fromJson(value)).toList(); |
1587 } | 1636 } |
1588 if (_json.containsKey("height")) { | 1637 if (_json.containsKey("height")) { |
1589 height = _json["height"]; | 1638 height = _json["height"]; |
1590 } | 1639 } |
1591 if (_json.containsKey("property")) { | 1640 if (_json.containsKey("property")) { |
1592 property = new TextProperty.fromJson(_json["property"]); | 1641 property = new TextProperty.fromJson(_json["property"]); |
1593 } | 1642 } |
1594 if (_json.containsKey("width")) { | 1643 if (_json.containsKey("width")) { |
1595 width = _json["width"]; | 1644 width = _json["width"]; |
1596 } | 1645 } |
1597 } | 1646 } |
1598 | 1647 |
1599 core.Map<core.String, core.Object> toJson() { | 1648 core.Map<core.String, core.Object> toJson() { |
1600 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1649 final core.Map<core.String, core.Object> _json = |
| 1650 new core.Map<core.String, core.Object>(); |
1601 if (blocks != null) { | 1651 if (blocks != null) { |
1602 _json["blocks"] = blocks.map((value) => (value).toJson()).toList(); | 1652 _json["blocks"] = blocks.map((value) => (value).toJson()).toList(); |
1603 } | 1653 } |
1604 if (height != null) { | 1654 if (height != null) { |
1605 _json["height"] = height; | 1655 _json["height"] = height; |
1606 } | 1656 } |
1607 if (property != null) { | 1657 if (property != null) { |
1608 _json["property"] = (property).toJson(); | 1658 _json["property"] = (property).toJson(); |
1609 } | 1659 } |
1610 if (width != null) { | 1660 if (width != null) { |
1611 _json["width"] = width; | 1661 _json["width"] = width; |
1612 } | 1662 } |
1613 return _json; | 1663 return _json; |
1614 } | 1664 } |
1615 } | 1665 } |
1616 | 1666 |
1617 /** Structural unit of text representing a number of words in certain order. */ | 1667 /// Structural unit of text representing a number of words in certain order. |
1618 class Paragraph { | 1668 class Paragraph { |
1619 /** | 1669 /// The bounding box for the paragraph. |
1620 * The bounding box for the paragraph. | 1670 /// The vertices are in the order of top-left, top-right, bottom-right, |
1621 * The vertices are in the order of top-left, top-right, bottom-right, | 1671 /// bottom-left. When a rotation of the bounding box is detected the rotation |
1622 * bottom-left. When a rotation of the bounding box is detected the rotation | 1672 /// is represented as around the top-left corner as defined when the text is |
1623 * is represented as around the top-left corner as defined when the text is | 1673 /// read in the 'natural' orientation. |
1624 * read in the 'natural' orientation. | 1674 /// For example: |
1625 * For example: | 1675 /// * when the text is horizontal it might look like: |
1626 * * when the text is horizontal it might look like: | 1676 /// 0----1 |
1627 * 0----1 | 1677 /// | | |
1628 * | | | 1678 /// 3----2 |
1629 * 3----2 | 1679 /// * when it's rotated 180 degrees around the top-left corner it becomes: |
1630 * * when it's rotated 180 degrees around the top-left corner it becomes: | 1680 /// 2----3 |
1631 * 2----3 | 1681 /// | | |
1632 * | | | 1682 /// 1----0 |
1633 * 1----0 | 1683 /// and the vertice order will still be (0, 1, 2, 3). |
1634 * and the vertice order will still be (0, 1, 2, 3). | |
1635 */ | |
1636 BoundingPoly boundingBox; | 1684 BoundingPoly boundingBox; |
1637 /** Additional information detected for the paragraph. */ | 1685 |
| 1686 /// Additional information detected for the paragraph. |
1638 TextProperty property; | 1687 TextProperty property; |
1639 /** List of words in this paragraph. */ | 1688 |
| 1689 /// List of words in this paragraph. |
1640 core.List<Word> words; | 1690 core.List<Word> words; |
1641 | 1691 |
1642 Paragraph(); | 1692 Paragraph(); |
1643 | 1693 |
1644 Paragraph.fromJson(core.Map _json) { | 1694 Paragraph.fromJson(core.Map _json) { |
1645 if (_json.containsKey("boundingBox")) { | 1695 if (_json.containsKey("boundingBox")) { |
1646 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); | 1696 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); |
1647 } | 1697 } |
1648 if (_json.containsKey("property")) { | 1698 if (_json.containsKey("property")) { |
1649 property = new TextProperty.fromJson(_json["property"]); | 1699 property = new TextProperty.fromJson(_json["property"]); |
1650 } | 1700 } |
1651 if (_json.containsKey("words")) { | 1701 if (_json.containsKey("words")) { |
1652 words = _json["words"].map((value) => new Word.fromJson(value)).toList(); | 1702 words = _json["words"].map((value) => new Word.fromJson(value)).toList(); |
1653 } | 1703 } |
1654 } | 1704 } |
1655 | 1705 |
1656 core.Map<core.String, core.Object> toJson() { | 1706 core.Map<core.String, core.Object> toJson() { |
1657 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1707 final core.Map<core.String, core.Object> _json = |
| 1708 new core.Map<core.String, core.Object>(); |
1658 if (boundingBox != null) { | 1709 if (boundingBox != null) { |
1659 _json["boundingBox"] = (boundingBox).toJson(); | 1710 _json["boundingBox"] = (boundingBox).toJson(); |
1660 } | 1711 } |
1661 if (property != null) { | 1712 if (property != null) { |
1662 _json["property"] = (property).toJson(); | 1713 _json["property"] = (property).toJson(); |
1663 } | 1714 } |
1664 if (words != null) { | 1715 if (words != null) { |
1665 _json["words"] = words.map((value) => (value).toJson()).toList(); | 1716 _json["words"] = words.map((value) => (value).toJson()).toList(); |
1666 } | 1717 } |
1667 return _json; | 1718 return _json; |
1668 } | 1719 } |
1669 } | 1720 } |
1670 | 1721 |
1671 /** | 1722 /// A 3D position in the image, used primarily for Face detection landmarks. |
1672 * A 3D position in the image, used primarily for Face detection landmarks. | 1723 /// A valid Position must have both x and y coordinates. |
1673 * A valid Position must have both x and y coordinates. | 1724 /// The position coordinates are in the same scale as the original image. |
1674 * The position coordinates are in the same scale as the original image. | |
1675 */ | |
1676 class Position { | 1725 class Position { |
1677 /** X coordinate. */ | 1726 /// X coordinate. |
1678 core.double x; | 1727 core.double x; |
1679 /** Y coordinate. */ | 1728 |
| 1729 /// Y coordinate. |
1680 core.double y; | 1730 core.double y; |
1681 /** Z coordinate (or depth). */ | 1731 |
| 1732 /// Z coordinate (or depth). |
1682 core.double z; | 1733 core.double z; |
1683 | 1734 |
1684 Position(); | 1735 Position(); |
1685 | 1736 |
1686 Position.fromJson(core.Map _json) { | 1737 Position.fromJson(core.Map _json) { |
1687 if (_json.containsKey("x")) { | 1738 if (_json.containsKey("x")) { |
1688 x = _json["x"]; | 1739 x = _json["x"]; |
1689 } | 1740 } |
1690 if (_json.containsKey("y")) { | 1741 if (_json.containsKey("y")) { |
1691 y = _json["y"]; | 1742 y = _json["y"]; |
1692 } | 1743 } |
1693 if (_json.containsKey("z")) { | 1744 if (_json.containsKey("z")) { |
1694 z = _json["z"]; | 1745 z = _json["z"]; |
1695 } | 1746 } |
1696 } | 1747 } |
1697 | 1748 |
1698 core.Map<core.String, core.Object> toJson() { | 1749 core.Map<core.String, core.Object> toJson() { |
1699 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1750 final core.Map<core.String, core.Object> _json = |
| 1751 new core.Map<core.String, core.Object>(); |
1700 if (x != null) { | 1752 if (x != null) { |
1701 _json["x"] = x; | 1753 _json["x"] = x; |
1702 } | 1754 } |
1703 if (y != null) { | 1755 if (y != null) { |
1704 _json["y"] = y; | 1756 _json["y"] = y; |
1705 } | 1757 } |
1706 if (z != null) { | 1758 if (z != null) { |
1707 _json["z"] = z; | 1759 _json["z"] = z; |
1708 } | 1760 } |
1709 return _json; | 1761 return _json; |
1710 } | 1762 } |
1711 } | 1763 } |
1712 | 1764 |
1713 /** A `Property` consists of a user-supplied name/value pair. */ | 1765 /// A `Property` consists of a user-supplied name/value pair. |
1714 class Property { | 1766 class Property { |
1715 /** Name of the property. */ | 1767 /// Name of the property. |
1716 core.String name; | 1768 core.String name; |
1717 /** Value of numeric properties. */ | 1769 |
| 1770 /// Value of numeric properties. |
1718 core.String uint64Value; | 1771 core.String uint64Value; |
1719 /** Value of the property. */ | 1772 |
| 1773 /// Value of the property. |
1720 core.String value; | 1774 core.String value; |
1721 | 1775 |
1722 Property(); | 1776 Property(); |
1723 | 1777 |
1724 Property.fromJson(core.Map _json) { | 1778 Property.fromJson(core.Map _json) { |
1725 if (_json.containsKey("name")) { | 1779 if (_json.containsKey("name")) { |
1726 name = _json["name"]; | 1780 name = _json["name"]; |
1727 } | 1781 } |
1728 if (_json.containsKey("uint64Value")) { | 1782 if (_json.containsKey("uint64Value")) { |
1729 uint64Value = _json["uint64Value"]; | 1783 uint64Value = _json["uint64Value"]; |
1730 } | 1784 } |
1731 if (_json.containsKey("value")) { | 1785 if (_json.containsKey("value")) { |
1732 value = _json["value"]; | 1786 value = _json["value"]; |
1733 } | 1787 } |
1734 } | 1788 } |
1735 | 1789 |
1736 core.Map<core.String, core.Object> toJson() { | 1790 core.Map<core.String, core.Object> toJson() { |
1737 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1791 final core.Map<core.String, core.Object> _json = |
| 1792 new core.Map<core.String, core.Object>(); |
1738 if (name != null) { | 1793 if (name != null) { |
1739 _json["name"] = name; | 1794 _json["name"] = name; |
1740 } | 1795 } |
1741 if (uint64Value != null) { | 1796 if (uint64Value != null) { |
1742 _json["uint64Value"] = uint64Value; | 1797 _json["uint64Value"] = uint64Value; |
1743 } | 1798 } |
1744 if (value != null) { | 1799 if (value != null) { |
1745 _json["value"] = value; | 1800 _json["value"] = value; |
1746 } | 1801 } |
1747 return _json; | 1802 return _json; |
1748 } | 1803 } |
1749 } | 1804 } |
1750 | 1805 |
1751 /** | 1806 /// Set of features pertaining to the image, computed by computer vision |
1752 * Set of features pertaining to the image, computed by computer vision | 1807 /// methods over safe-search verticals (for example, adult, spoof, medical, |
1753 * methods over safe-search verticals (for example, adult, spoof, medical, | 1808 /// violence). |
1754 * violence). | |
1755 */ | |
1756 class SafeSearchAnnotation { | 1809 class SafeSearchAnnotation { |
1757 /** | 1810 /// Represents the adult content likelihood for the image. |
1758 * Represents the adult content likelihood for the image. | 1811 /// Possible string values are: |
1759 * Possible string values are: | 1812 /// - "UNKNOWN" : Unknown likelihood. |
1760 * - "UNKNOWN" : Unknown likelihood. | 1813 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1761 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1814 /// specified vertical. |
1762 * specified vertical. | 1815 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1763 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1816 /// vertical. |
1764 * vertical. | 1817 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1765 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1818 /// vertical. |
1766 * vertical. | 1819 /// - "LIKELY" : It is likely that the image belongs to the specified |
1767 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1820 /// vertical. |
1768 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1821 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1769 * vertical. | 1822 /// specified vertical. |
1770 */ | |
1771 core.String adult; | 1823 core.String adult; |
1772 /** | 1824 |
1773 * Likelihood that this is a medical image. | 1825 /// Likelihood that this is a medical image. |
1774 * Possible string values are: | 1826 /// Possible string values are: |
1775 * - "UNKNOWN" : Unknown likelihood. | 1827 /// - "UNKNOWN" : Unknown likelihood. |
1776 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1828 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1777 * specified vertical. | 1829 /// specified vertical. |
1778 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1830 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1779 * vertical. | 1831 /// vertical. |
1780 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1832 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1781 * vertical. | 1833 /// vertical. |
1782 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1834 /// - "LIKELY" : It is likely that the image belongs to the specified |
1783 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1835 /// vertical. |
1784 * vertical. | 1836 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1785 */ | 1837 /// specified vertical. |
1786 core.String medical; | 1838 core.String medical; |
1787 /** | 1839 |
1788 * Spoof likelihood. The likelihood that an modification | 1840 /// Spoof likelihood. The likelihood that an modification |
1789 * was made to the image's canonical version to make it appear | 1841 /// was made to the image's canonical version to make it appear |
1790 * funny or offensive. | 1842 /// funny or offensive. |
1791 * Possible string values are: | 1843 /// Possible string values are: |
1792 * - "UNKNOWN" : Unknown likelihood. | 1844 /// - "UNKNOWN" : Unknown likelihood. |
1793 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1845 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1794 * specified vertical. | 1846 /// specified vertical. |
1795 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1847 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1796 * vertical. | 1848 /// vertical. |
1797 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1849 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1798 * vertical. | 1850 /// vertical. |
1799 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1851 /// - "LIKELY" : It is likely that the image belongs to the specified |
1800 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1852 /// vertical. |
1801 * vertical. | 1853 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1802 */ | 1854 /// specified vertical. |
1803 core.String spoof; | 1855 core.String spoof; |
1804 /** | 1856 |
1805 * Violence likelihood. | 1857 /// Violence likelihood. |
1806 * Possible string values are: | 1858 /// Possible string values are: |
1807 * - "UNKNOWN" : Unknown likelihood. | 1859 /// - "UNKNOWN" : Unknown likelihood. |
1808 * - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the | 1860 /// - "VERY_UNLIKELY" : It is very unlikely that the image belongs to the |
1809 * specified vertical. | 1861 /// specified vertical. |
1810 * - "UNLIKELY" : It is unlikely that the image belongs to the specified | 1862 /// - "UNLIKELY" : It is unlikely that the image belongs to the specified |
1811 * vertical. | 1863 /// vertical. |
1812 * - "POSSIBLE" : It is possible that the image belongs to the specified | 1864 /// - "POSSIBLE" : It is possible that the image belongs to the specified |
1813 * vertical. | 1865 /// vertical. |
1814 * - "LIKELY" : It is likely that the image belongs to the specified vertical. | 1866 /// - "LIKELY" : It is likely that the image belongs to the specified |
1815 * - "VERY_LIKELY" : It is very likely that the image belongs to the specified | 1867 /// vertical. |
1816 * vertical. | 1868 /// - "VERY_LIKELY" : It is very likely that the image belongs to the |
1817 */ | 1869 /// specified vertical. |
1818 core.String violence; | 1870 core.String violence; |
1819 | 1871 |
1820 SafeSearchAnnotation(); | 1872 SafeSearchAnnotation(); |
1821 | 1873 |
1822 SafeSearchAnnotation.fromJson(core.Map _json) { | 1874 SafeSearchAnnotation.fromJson(core.Map _json) { |
1823 if (_json.containsKey("adult")) { | 1875 if (_json.containsKey("adult")) { |
1824 adult = _json["adult"]; | 1876 adult = _json["adult"]; |
1825 } | 1877 } |
1826 if (_json.containsKey("medical")) { | 1878 if (_json.containsKey("medical")) { |
1827 medical = _json["medical"]; | 1879 medical = _json["medical"]; |
1828 } | 1880 } |
1829 if (_json.containsKey("spoof")) { | 1881 if (_json.containsKey("spoof")) { |
1830 spoof = _json["spoof"]; | 1882 spoof = _json["spoof"]; |
1831 } | 1883 } |
1832 if (_json.containsKey("violence")) { | 1884 if (_json.containsKey("violence")) { |
1833 violence = _json["violence"]; | 1885 violence = _json["violence"]; |
1834 } | 1886 } |
1835 } | 1887 } |
1836 | 1888 |
1837 core.Map<core.String, core.Object> toJson() { | 1889 core.Map<core.String, core.Object> toJson() { |
1838 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1890 final core.Map<core.String, core.Object> _json = |
| 1891 new core.Map<core.String, core.Object>(); |
1839 if (adult != null) { | 1892 if (adult != null) { |
1840 _json["adult"] = adult; | 1893 _json["adult"] = adult; |
1841 } | 1894 } |
1842 if (medical != null) { | 1895 if (medical != null) { |
1843 _json["medical"] = medical; | 1896 _json["medical"] = medical; |
1844 } | 1897 } |
1845 if (spoof != null) { | 1898 if (spoof != null) { |
1846 _json["spoof"] = spoof; | 1899 _json["spoof"] = spoof; |
1847 } | 1900 } |
1848 if (violence != null) { | 1901 if (violence != null) { |
1849 _json["violence"] = violence; | 1902 _json["violence"] = violence; |
1850 } | 1903 } |
1851 return _json; | 1904 return _json; |
1852 } | 1905 } |
1853 } | 1906 } |
1854 | 1907 |
1855 /** | 1908 /// The `Status` type defines a logical error model that is suitable for |
1856 * The `Status` type defines a logical error model that is suitable for | 1909 /// different |
1857 * different | 1910 /// programming environments, including REST APIs and RPC APIs. It is used by |
1858 * programming environments, including REST APIs and RPC APIs. It is used by | 1911 /// [gRPC](https://github.com/grpc). The error model is designed to be: |
1859 * [gRPC](https://github.com/grpc). The error model is designed to be: | 1912 /// |
1860 * | 1913 /// - Simple to use and understand for most users |
1861 * - Simple to use and understand for most users | 1914 /// - Flexible enough to meet unexpected needs |
1862 * - Flexible enough to meet unexpected needs | 1915 /// |
1863 * | 1916 /// # Overview |
1864 * # Overview | 1917 /// |
1865 * | 1918 /// The `Status` message contains three pieces of data: error code, error |
1866 * The `Status` message contains three pieces of data: error code, error | 1919 /// message, |
1867 * message, | 1920 /// and error details. The error code should be an enum value of |
1868 * and error details. The error code should be an enum value of | 1921 /// google.rpc.Code, but it may accept additional error codes if needed. The |
1869 * google.rpc.Code, but it may accept additional error codes if needed. The | 1922 /// error message should be a developer-facing English message that helps |
1870 * error message should be a developer-facing English message that helps | 1923 /// developers *understand* and *resolve* the error. If a localized user-facing |
1871 * developers *understand* and *resolve* the error. If a localized user-facing | 1924 /// error message is needed, put the localized message in the error details or |
1872 * error message is needed, put the localized message in the error details or | 1925 /// localize it in the client. The optional error details may contain arbitrary |
1873 * localize it in the client. The optional error details may contain arbitrary | 1926 /// information about the error. There is a predefined set of error detail |
1874 * information about the error. There is a predefined set of error detail types | 1927 /// types |
1875 * in the package `google.rpc` that can be used for common error conditions. | 1928 /// in the package `google.rpc` that can be used for common error conditions. |
1876 * | 1929 /// |
1877 * # Language mapping | 1930 /// # Language mapping |
1878 * | 1931 /// |
1879 * The `Status` message is the logical representation of the error model, but it | 1932 /// The `Status` message is the logical representation of the error model, but |
1880 * is not necessarily the actual wire format. When the `Status` message is | 1933 /// it |
1881 * exposed in different client libraries and different wire protocols, it can be | 1934 /// is not necessarily the actual wire format. When the `Status` message is |
1882 * mapped differently. For example, it will likely be mapped to some exceptions | 1935 /// exposed in different client libraries and different wire protocols, it can |
1883 * in Java, but more likely mapped to some error codes in C. | 1936 /// be |
1884 * | 1937 /// mapped differently. For example, it will likely be mapped to some |
1885 * # Other uses | 1938 /// exceptions |
1886 * | 1939 /// in Java, but more likely mapped to some error codes in C. |
1887 * The error model and the `Status` message can be used in a variety of | 1940 /// |
1888 * environments, either with or without APIs, to provide a | 1941 /// # Other uses |
1889 * consistent developer experience across different environments. | 1942 /// |
1890 * | 1943 /// The error model and the `Status` message can be used in a variety of |
1891 * Example uses of this error model include: | 1944 /// environments, either with or without APIs, to provide a |
1892 * | 1945 /// consistent developer experience across different environments. |
1893 * - Partial errors. If a service needs to return partial errors to the client, | 1946 /// |
1894 * it may embed the `Status` in the normal response to indicate the partial | 1947 /// Example uses of this error model include: |
1895 * errors. | 1948 /// |
1896 * | 1949 /// - Partial errors. If a service needs to return partial errors to the |
1897 * - Workflow errors. A typical workflow has multiple steps. Each step may | 1950 /// client, |
1898 * have a `Status` message for error reporting. | 1951 /// it may embed the `Status` in the normal response to indicate the partial |
1899 * | 1952 /// errors. |
1900 * - Batch operations. If a client uses batch request and batch response, the | 1953 /// |
1901 * `Status` message should be used directly inside batch response, one for | 1954 /// - Workflow errors. A typical workflow has multiple steps. Each step may |
1902 * each error sub-response. | 1955 /// have a `Status` message for error reporting. |
1903 * | 1956 /// |
1904 * - Asynchronous operations. If an API call embeds asynchronous operation | 1957 /// - Batch operations. If a client uses batch request and batch response, the |
1905 * results in its response, the status of those operations should be | 1958 /// `Status` message should be used directly inside batch response, one for |
1906 * represented directly using the `Status` message. | 1959 /// each error sub-response. |
1907 * | 1960 /// |
1908 * - Logging. If some API errors are stored in logs, the message `Status` could | 1961 /// - Asynchronous operations. If an API call embeds asynchronous operation |
1909 * be used directly after any stripping needed for security/privacy reasons. | 1962 /// results in its response, the status of those operations should be |
1910 */ | 1963 /// represented directly using the `Status` message. |
| 1964 /// |
| 1965 /// - Logging. If some API errors are stored in logs, the message `Status` |
| 1966 /// could |
| 1967 /// be used directly after any stripping needed for security/privacy reasons. |
1911 class Status { | 1968 class Status { |
1912 /** The status code, which should be an enum value of google.rpc.Code. */ | 1969 /// The status code, which should be an enum value of google.rpc.Code. |
1913 core.int code; | 1970 core.int code; |
1914 /** | 1971 |
1915 * A list of messages that carry the error details. There is a common set of | 1972 /// A list of messages that carry the error details. There is a common set |
1916 * message types for APIs to use. | 1973 /// of |
1917 * | 1974 /// message types for APIs to use. |
1918 * The values for Object must be JSON objects. It can consist of `num`, | 1975 /// |
1919 * `String`, `bool` and `null` as well as `Map` and `List` values. | 1976 /// The values for Object must be JSON objects. It can consist of `num`, |
1920 */ | 1977 /// `String`, `bool` and `null` as well as `Map` and `List` values. |
1921 core.List<core.Map<core.String, core.Object>> details; | 1978 core.List<core.Map<core.String, core.Object>> details; |
1922 /** | 1979 |
1923 * A developer-facing error message, which should be in English. Any | 1980 /// A developer-facing error message, which should be in English. Any |
1924 * user-facing error message should be localized and sent in the | 1981 /// user-facing error message should be localized and sent in the |
1925 * google.rpc.Status.details field, or localized by the client. | 1982 /// google.rpc.Status.details field, or localized by the client. |
1926 */ | |
1927 core.String message; | 1983 core.String message; |
1928 | 1984 |
1929 Status(); | 1985 Status(); |
1930 | 1986 |
1931 Status.fromJson(core.Map _json) { | 1987 Status.fromJson(core.Map _json) { |
1932 if (_json.containsKey("code")) { | 1988 if (_json.containsKey("code")) { |
1933 code = _json["code"]; | 1989 code = _json["code"]; |
1934 } | 1990 } |
1935 if (_json.containsKey("details")) { | 1991 if (_json.containsKey("details")) { |
1936 details = _json["details"]; | 1992 details = _json["details"]; |
1937 } | 1993 } |
1938 if (_json.containsKey("message")) { | 1994 if (_json.containsKey("message")) { |
1939 message = _json["message"]; | 1995 message = _json["message"]; |
1940 } | 1996 } |
1941 } | 1997 } |
1942 | 1998 |
1943 core.Map<core.String, core.Object> toJson() { | 1999 core.Map<core.String, core.Object> toJson() { |
1944 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2000 final core.Map<core.String, core.Object> _json = |
| 2001 new core.Map<core.String, core.Object>(); |
1945 if (code != null) { | 2002 if (code != null) { |
1946 _json["code"] = code; | 2003 _json["code"] = code; |
1947 } | 2004 } |
1948 if (details != null) { | 2005 if (details != null) { |
1949 _json["details"] = details; | 2006 _json["details"] = details; |
1950 } | 2007 } |
1951 if (message != null) { | 2008 if (message != null) { |
1952 _json["message"] = message; | 2009 _json["message"] = message; |
1953 } | 2010 } |
1954 return _json; | 2011 return _json; |
1955 } | 2012 } |
1956 } | 2013 } |
1957 | 2014 |
1958 /** A single symbol representation. */ | 2015 /// A single symbol representation. |
1959 class Symbol { | 2016 class Symbol { |
1960 /** | 2017 /// The bounding box for the symbol. |
1961 * The bounding box for the symbol. | 2018 /// The vertices are in the order of top-left, top-right, bottom-right, |
1962 * The vertices are in the order of top-left, top-right, bottom-right, | 2019 /// bottom-left. When a rotation of the bounding box is detected the rotation |
1963 * bottom-left. When a rotation of the bounding box is detected the rotation | 2020 /// is represented as around the top-left corner as defined when the text is |
1964 * is represented as around the top-left corner as defined when the text is | 2021 /// read in the 'natural' orientation. |
1965 * read in the 'natural' orientation. | 2022 /// For example: |
1966 * For example: | 2023 /// * when the text is horizontal it might look like: |
1967 * * when the text is horizontal it might look like: | 2024 /// 0----1 |
1968 * 0----1 | 2025 /// | | |
1969 * | | | 2026 /// 3----2 |
1970 * 3----2 | 2027 /// * when it's rotated 180 degrees around the top-left corner it becomes: |
1971 * * when it's rotated 180 degrees around the top-left corner it becomes: | 2028 /// 2----3 |
1972 * 2----3 | 2029 /// | | |
1973 * | | | 2030 /// 1----0 |
1974 * 1----0 | 2031 /// and the vertice order will still be (0, 1, 2, 3). |
1975 * and the vertice order will still be (0, 1, 2, 3). | |
1976 */ | |
1977 BoundingPoly boundingBox; | 2032 BoundingPoly boundingBox; |
1978 /** Additional information detected for the symbol. */ | 2033 |
| 2034 /// Additional information detected for the symbol. |
1979 TextProperty property; | 2035 TextProperty property; |
1980 /** The actual UTF-8 representation of the symbol. */ | 2036 |
| 2037 /// The actual UTF-8 representation of the symbol. |
1981 core.String text; | 2038 core.String text; |
1982 | 2039 |
1983 Symbol(); | 2040 Symbol(); |
1984 | 2041 |
1985 Symbol.fromJson(core.Map _json) { | 2042 Symbol.fromJson(core.Map _json) { |
1986 if (_json.containsKey("boundingBox")) { | 2043 if (_json.containsKey("boundingBox")) { |
1987 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); | 2044 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); |
1988 } | 2045 } |
1989 if (_json.containsKey("property")) { | 2046 if (_json.containsKey("property")) { |
1990 property = new TextProperty.fromJson(_json["property"]); | 2047 property = new TextProperty.fromJson(_json["property"]); |
1991 } | 2048 } |
1992 if (_json.containsKey("text")) { | 2049 if (_json.containsKey("text")) { |
1993 text = _json["text"]; | 2050 text = _json["text"]; |
1994 } | 2051 } |
1995 } | 2052 } |
1996 | 2053 |
1997 core.Map<core.String, core.Object> toJson() { | 2054 core.Map<core.String, core.Object> toJson() { |
1998 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2055 final core.Map<core.String, core.Object> _json = |
| 2056 new core.Map<core.String, core.Object>(); |
1999 if (boundingBox != null) { | 2057 if (boundingBox != null) { |
2000 _json["boundingBox"] = (boundingBox).toJson(); | 2058 _json["boundingBox"] = (boundingBox).toJson(); |
2001 } | 2059 } |
2002 if (property != null) { | 2060 if (property != null) { |
2003 _json["property"] = (property).toJson(); | 2061 _json["property"] = (property).toJson(); |
2004 } | 2062 } |
2005 if (text != null) { | 2063 if (text != null) { |
2006 _json["text"] = text; | 2064 _json["text"] = text; |
2007 } | 2065 } |
2008 return _json; | 2066 return _json; |
2009 } | 2067 } |
2010 } | 2068 } |
2011 | 2069 |
2012 /** | 2070 /// TextAnnotation contains a structured representation of OCR extracted text. |
2013 * TextAnnotation contains a structured representation of OCR extracted text. | 2071 /// The hierarchy of an OCR extracted text structure is like this: |
2014 * The hierarchy of an OCR extracted text structure is like this: | 2072 /// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol |
2015 * TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol | 2073 /// Each structural component, starting from Page, may further have their own |
2016 * Each structural component, starting from Page, may further have their own | 2074 /// properties. Properties describe detected languages, breaks etc.. Please |
2017 * properties. Properties describe detected languages, breaks etc.. Please | 2075 /// refer to the google.cloud.vision.v1.TextAnnotation.TextProperty message |
2018 * refer to the google.cloud.vision.v1.TextAnnotation.TextProperty message | 2076 /// definition below for more detail. |
2019 * definition below for more detail. | |
2020 */ | |
2021 class TextAnnotation { | 2077 class TextAnnotation { |
2022 /** List of pages detected by OCR. */ | 2078 /// List of pages detected by OCR. |
2023 core.List<Page> pages; | 2079 core.List<Page> pages; |
2024 /** UTF-8 text detected on the pages. */ | 2080 |
| 2081 /// UTF-8 text detected on the pages. |
2025 core.String text; | 2082 core.String text; |
2026 | 2083 |
2027 TextAnnotation(); | 2084 TextAnnotation(); |
2028 | 2085 |
2029 TextAnnotation.fromJson(core.Map _json) { | 2086 TextAnnotation.fromJson(core.Map _json) { |
2030 if (_json.containsKey("pages")) { | 2087 if (_json.containsKey("pages")) { |
2031 pages = _json["pages"].map((value) => new Page.fromJson(value)).toList(); | 2088 pages = _json["pages"].map((value) => new Page.fromJson(value)).toList(); |
2032 } | 2089 } |
2033 if (_json.containsKey("text")) { | 2090 if (_json.containsKey("text")) { |
2034 text = _json["text"]; | 2091 text = _json["text"]; |
2035 } | 2092 } |
2036 } | 2093 } |
2037 | 2094 |
2038 core.Map<core.String, core.Object> toJson() { | 2095 core.Map<core.String, core.Object> toJson() { |
2039 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2096 final core.Map<core.String, core.Object> _json = |
| 2097 new core.Map<core.String, core.Object>(); |
2040 if (pages != null) { | 2098 if (pages != null) { |
2041 _json["pages"] = pages.map((value) => (value).toJson()).toList(); | 2099 _json["pages"] = pages.map((value) => (value).toJson()).toList(); |
2042 } | 2100 } |
2043 if (text != null) { | 2101 if (text != null) { |
2044 _json["text"] = text; | 2102 _json["text"] = text; |
2045 } | 2103 } |
2046 return _json; | 2104 return _json; |
2047 } | 2105 } |
2048 } | 2106 } |
2049 | 2107 |
2050 /** Additional information detected on the structural component. */ | 2108 /// Additional information detected on the structural component. |
2051 class TextProperty { | 2109 class TextProperty { |
2052 /** Detected start or end of a text segment. */ | 2110 /// Detected start or end of a text segment. |
2053 DetectedBreak detectedBreak; | 2111 DetectedBreak detectedBreak; |
2054 /** A list of detected languages together with confidence. */ | 2112 |
| 2113 /// A list of detected languages together with confidence. |
2055 core.List<DetectedLanguage> detectedLanguages; | 2114 core.List<DetectedLanguage> detectedLanguages; |
2056 | 2115 |
2057 TextProperty(); | 2116 TextProperty(); |
2058 | 2117 |
2059 TextProperty.fromJson(core.Map _json) { | 2118 TextProperty.fromJson(core.Map _json) { |
2060 if (_json.containsKey("detectedBreak")) { | 2119 if (_json.containsKey("detectedBreak")) { |
2061 detectedBreak = new DetectedBreak.fromJson(_json["detectedBreak"]); | 2120 detectedBreak = new DetectedBreak.fromJson(_json["detectedBreak"]); |
2062 } | 2121 } |
2063 if (_json.containsKey("detectedLanguages")) { | 2122 if (_json.containsKey("detectedLanguages")) { |
2064 detectedLanguages = _json["detectedLanguages"].map((value) => new Detected
Language.fromJson(value)).toList(); | 2123 detectedLanguages = _json["detectedLanguages"] |
| 2124 .map((value) => new DetectedLanguage.fromJson(value)) |
| 2125 .toList(); |
2065 } | 2126 } |
2066 } | 2127 } |
2067 | 2128 |
2068 core.Map<core.String, core.Object> toJson() { | 2129 core.Map<core.String, core.Object> toJson() { |
2069 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2130 final core.Map<core.String, core.Object> _json = |
| 2131 new core.Map<core.String, core.Object>(); |
2070 if (detectedBreak != null) { | 2132 if (detectedBreak != null) { |
2071 _json["detectedBreak"] = (detectedBreak).toJson(); | 2133 _json["detectedBreak"] = (detectedBreak).toJson(); |
2072 } | 2134 } |
2073 if (detectedLanguages != null) { | 2135 if (detectedLanguages != null) { |
2074 _json["detectedLanguages"] = detectedLanguages.map((value) => (value).toJs
on()).toList(); | 2136 _json["detectedLanguages"] = |
| 2137 detectedLanguages.map((value) => (value).toJson()).toList(); |
2075 } | 2138 } |
2076 return _json; | 2139 return _json; |
2077 } | 2140 } |
2078 } | 2141 } |
2079 | 2142 |
2080 /** | 2143 /// A vertex represents a 2D point in the image. |
2081 * A vertex represents a 2D point in the image. | 2144 /// NOTE: the vertex coordinates are in the same scale as the original image. |
2082 * NOTE: the vertex coordinates are in the same scale as the original image. | |
2083 */ | |
2084 class Vertex { | 2145 class Vertex { |
2085 /** X coordinate. */ | 2146 /// X coordinate. |
2086 core.int x; | 2147 core.int x; |
2087 /** Y coordinate. */ | 2148 |
| 2149 /// Y coordinate. |
2088 core.int y; | 2150 core.int y; |
2089 | 2151 |
2090 Vertex(); | 2152 Vertex(); |
2091 | 2153 |
2092 Vertex.fromJson(core.Map _json) { | 2154 Vertex.fromJson(core.Map _json) { |
2093 if (_json.containsKey("x")) { | 2155 if (_json.containsKey("x")) { |
2094 x = _json["x"]; | 2156 x = _json["x"]; |
2095 } | 2157 } |
2096 if (_json.containsKey("y")) { | 2158 if (_json.containsKey("y")) { |
2097 y = _json["y"]; | 2159 y = _json["y"]; |
2098 } | 2160 } |
2099 } | 2161 } |
2100 | 2162 |
2101 core.Map<core.String, core.Object> toJson() { | 2163 core.Map<core.String, core.Object> toJson() { |
2102 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2164 final core.Map<core.String, core.Object> _json = |
| 2165 new core.Map<core.String, core.Object>(); |
2103 if (x != null) { | 2166 if (x != null) { |
2104 _json["x"] = x; | 2167 _json["x"] = x; |
2105 } | 2168 } |
2106 if (y != null) { | 2169 if (y != null) { |
2107 _json["y"] = y; | 2170 _json["y"] = y; |
2108 } | 2171 } |
2109 return _json; | 2172 return _json; |
2110 } | 2173 } |
2111 } | 2174 } |
2112 | 2175 |
2113 /** Relevant information for the image from the Internet. */ | 2176 /// Relevant information for the image from the Internet. |
2114 class WebDetection { | 2177 class WebDetection { |
2115 /** | 2178 /// Fully matching images from the Internet. |
2116 * Fully matching images from the Internet. | 2179 /// Can include resized copies of the query image. |
2117 * Can include resized copies of the query image. | |
2118 */ | |
2119 core.List<WebImage> fullMatchingImages; | 2180 core.List<WebImage> fullMatchingImages; |
2120 /** Web pages containing the matching images from the Internet. */ | 2181 |
| 2182 /// Web pages containing the matching images from the Internet. |
2121 core.List<WebPage> pagesWithMatchingImages; | 2183 core.List<WebPage> pagesWithMatchingImages; |
2122 /** | 2184 |
2123 * Partial matching images from the Internet. | 2185 /// Partial matching images from the Internet. |
2124 * Those images are similar enough to share some key-point features. For | 2186 /// Those images are similar enough to share some key-point features. For |
2125 * example an original image will likely have partial matching for its crops. | 2187 /// example an original image will likely have partial matching for its |
2126 */ | 2188 /// crops. |
2127 core.List<WebImage> partialMatchingImages; | 2189 core.List<WebImage> partialMatchingImages; |
2128 /** The visually similar image results. */ | 2190 |
| 2191 /// The visually similar image results. |
2129 core.List<WebImage> visuallySimilarImages; | 2192 core.List<WebImage> visuallySimilarImages; |
2130 /** Deduced entities from similar images on the Internet. */ | 2193 |
| 2194 /// Deduced entities from similar images on the Internet. |
2131 core.List<WebEntity> webEntities; | 2195 core.List<WebEntity> webEntities; |
2132 | 2196 |
2133 WebDetection(); | 2197 WebDetection(); |
2134 | 2198 |
2135 WebDetection.fromJson(core.Map _json) { | 2199 WebDetection.fromJson(core.Map _json) { |
2136 if (_json.containsKey("fullMatchingImages")) { | 2200 if (_json.containsKey("fullMatchingImages")) { |
2137 fullMatchingImages = _json["fullMatchingImages"].map((value) => new WebIma
ge.fromJson(value)).toList(); | 2201 fullMatchingImages = _json["fullMatchingImages"] |
| 2202 .map((value) => new WebImage.fromJson(value)) |
| 2203 .toList(); |
2138 } | 2204 } |
2139 if (_json.containsKey("pagesWithMatchingImages")) { | 2205 if (_json.containsKey("pagesWithMatchingImages")) { |
2140 pagesWithMatchingImages = _json["pagesWithMatchingImages"].map((value) =>
new WebPage.fromJson(value)).toList(); | 2206 pagesWithMatchingImages = _json["pagesWithMatchingImages"] |
| 2207 .map((value) => new WebPage.fromJson(value)) |
| 2208 .toList(); |
2141 } | 2209 } |
2142 if (_json.containsKey("partialMatchingImages")) { | 2210 if (_json.containsKey("partialMatchingImages")) { |
2143 partialMatchingImages = _json["partialMatchingImages"].map((value) => new
WebImage.fromJson(value)).toList(); | 2211 partialMatchingImages = _json["partialMatchingImages"] |
| 2212 .map((value) => new WebImage.fromJson(value)) |
| 2213 .toList(); |
2144 } | 2214 } |
2145 if (_json.containsKey("visuallySimilarImages")) { | 2215 if (_json.containsKey("visuallySimilarImages")) { |
2146 visuallySimilarImages = _json["visuallySimilarImages"].map((value) => new
WebImage.fromJson(value)).toList(); | 2216 visuallySimilarImages = _json["visuallySimilarImages"] |
| 2217 .map((value) => new WebImage.fromJson(value)) |
| 2218 .toList(); |
2147 } | 2219 } |
2148 if (_json.containsKey("webEntities")) { | 2220 if (_json.containsKey("webEntities")) { |
2149 webEntities = _json["webEntities"].map((value) => new WebEntity.fromJson(v
alue)).toList(); | 2221 webEntities = _json["webEntities"] |
| 2222 .map((value) => new WebEntity.fromJson(value)) |
| 2223 .toList(); |
2150 } | 2224 } |
2151 } | 2225 } |
2152 | 2226 |
2153 core.Map<core.String, core.Object> toJson() { | 2227 core.Map<core.String, core.Object> toJson() { |
2154 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2228 final core.Map<core.String, core.Object> _json = |
| 2229 new core.Map<core.String, core.Object>(); |
2155 if (fullMatchingImages != null) { | 2230 if (fullMatchingImages != null) { |
2156 _json["fullMatchingImages"] = fullMatchingImages.map((value) => (value).to
Json()).toList(); | 2231 _json["fullMatchingImages"] = |
| 2232 fullMatchingImages.map((value) => (value).toJson()).toList(); |
2157 } | 2233 } |
2158 if (pagesWithMatchingImages != null) { | 2234 if (pagesWithMatchingImages != null) { |
2159 _json["pagesWithMatchingImages"] = pagesWithMatchingImages.map((value) =>
(value).toJson()).toList(); | 2235 _json["pagesWithMatchingImages"] = |
| 2236 pagesWithMatchingImages.map((value) => (value).toJson()).toList(); |
2160 } | 2237 } |
2161 if (partialMatchingImages != null) { | 2238 if (partialMatchingImages != null) { |
2162 _json["partialMatchingImages"] = partialMatchingImages.map((value) => (val
ue).toJson()).toList(); | 2239 _json["partialMatchingImages"] = |
| 2240 partialMatchingImages.map((value) => (value).toJson()).toList(); |
2163 } | 2241 } |
2164 if (visuallySimilarImages != null) { | 2242 if (visuallySimilarImages != null) { |
2165 _json["visuallySimilarImages"] = visuallySimilarImages.map((value) => (val
ue).toJson()).toList(); | 2243 _json["visuallySimilarImages"] = |
| 2244 visuallySimilarImages.map((value) => (value).toJson()).toList(); |
2166 } | 2245 } |
2167 if (webEntities != null) { | 2246 if (webEntities != null) { |
2168 _json["webEntities"] = webEntities.map((value) => (value).toJson()).toList
(); | 2247 _json["webEntities"] = |
| 2248 webEntities.map((value) => (value).toJson()).toList(); |
2169 } | 2249 } |
2170 return _json; | 2250 return _json; |
2171 } | 2251 } |
2172 } | 2252 } |
2173 | 2253 |
2174 /** Entity deduced from similar images on the Internet. */ | 2254 /// Entity deduced from similar images on the Internet. |
2175 class WebEntity { | 2255 class WebEntity { |
2176 /** Canonical description of the entity, in English. */ | 2256 /// Canonical description of the entity, in English. |
2177 core.String description; | 2257 core.String description; |
2178 /** Opaque entity ID. */ | 2258 |
| 2259 /// Opaque entity ID. |
2179 core.String entityId; | 2260 core.String entityId; |
2180 /** | 2261 |
2181 * Overall relevancy score for the entity. | 2262 /// Overall relevancy score for the entity. |
2182 * Not normalized and not comparable across different image queries. | 2263 /// Not normalized and not comparable across different image queries. |
2183 */ | |
2184 core.double score; | 2264 core.double score; |
2185 | 2265 |
2186 WebEntity(); | 2266 WebEntity(); |
2187 | 2267 |
2188 WebEntity.fromJson(core.Map _json) { | 2268 WebEntity.fromJson(core.Map _json) { |
2189 if (_json.containsKey("description")) { | 2269 if (_json.containsKey("description")) { |
2190 description = _json["description"]; | 2270 description = _json["description"]; |
2191 } | 2271 } |
2192 if (_json.containsKey("entityId")) { | 2272 if (_json.containsKey("entityId")) { |
2193 entityId = _json["entityId"]; | 2273 entityId = _json["entityId"]; |
2194 } | 2274 } |
2195 if (_json.containsKey("score")) { | 2275 if (_json.containsKey("score")) { |
2196 score = _json["score"]; | 2276 score = _json["score"]; |
2197 } | 2277 } |
2198 } | 2278 } |
2199 | 2279 |
2200 core.Map<core.String, core.Object> toJson() { | 2280 core.Map<core.String, core.Object> toJson() { |
2201 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2281 final core.Map<core.String, core.Object> _json = |
| 2282 new core.Map<core.String, core.Object>(); |
2202 if (description != null) { | 2283 if (description != null) { |
2203 _json["description"] = description; | 2284 _json["description"] = description; |
2204 } | 2285 } |
2205 if (entityId != null) { | 2286 if (entityId != null) { |
2206 _json["entityId"] = entityId; | 2287 _json["entityId"] = entityId; |
2207 } | 2288 } |
2208 if (score != null) { | 2289 if (score != null) { |
2209 _json["score"] = score; | 2290 _json["score"] = score; |
2210 } | 2291 } |
2211 return _json; | 2292 return _json; |
2212 } | 2293 } |
2213 } | 2294 } |
2214 | 2295 |
2215 /** Metadata for online images. */ | 2296 /// Metadata for online images. |
2216 class WebImage { | 2297 class WebImage { |
2217 /** | 2298 /// (Deprecated) Overall relevancy score for the image. |
2218 * Overall relevancy score for the image. | |
2219 * Not normalized and not comparable across different image queries. | |
2220 */ | |
2221 core.double score; | 2299 core.double score; |
2222 /** The result image URL. */ | 2300 |
| 2301 /// The result image URL. |
2223 core.String url; | 2302 core.String url; |
2224 | 2303 |
2225 WebImage(); | 2304 WebImage(); |
2226 | 2305 |
2227 WebImage.fromJson(core.Map _json) { | 2306 WebImage.fromJson(core.Map _json) { |
2228 if (_json.containsKey("score")) { | 2307 if (_json.containsKey("score")) { |
2229 score = _json["score"]; | 2308 score = _json["score"]; |
2230 } | 2309 } |
2231 if (_json.containsKey("url")) { | 2310 if (_json.containsKey("url")) { |
2232 url = _json["url"]; | 2311 url = _json["url"]; |
2233 } | 2312 } |
2234 } | 2313 } |
2235 | 2314 |
2236 core.Map<core.String, core.Object> toJson() { | 2315 core.Map<core.String, core.Object> toJson() { |
2237 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2316 final core.Map<core.String, core.Object> _json = |
| 2317 new core.Map<core.String, core.Object>(); |
2238 if (score != null) { | 2318 if (score != null) { |
2239 _json["score"] = score; | 2319 _json["score"] = score; |
2240 } | 2320 } |
2241 if (url != null) { | 2321 if (url != null) { |
2242 _json["url"] = url; | 2322 _json["url"] = url; |
2243 } | 2323 } |
2244 return _json; | 2324 return _json; |
2245 } | 2325 } |
2246 } | 2326 } |
2247 | 2327 |
2248 /** Metadata for web pages. */ | 2328 /// Metadata for web pages. |
2249 class WebPage { | 2329 class WebPage { |
2250 /** | 2330 /// (Deprecated) Overall relevancy score for the web page. |
2251 * Overall relevancy score for the web page. | |
2252 * Not normalized and not comparable across different image queries. | |
2253 */ | |
2254 core.double score; | 2331 core.double score; |
2255 /** The result web page URL. */ | 2332 |
| 2333 /// The result web page URL. |
2256 core.String url; | 2334 core.String url; |
2257 | 2335 |
2258 WebPage(); | 2336 WebPage(); |
2259 | 2337 |
2260 WebPage.fromJson(core.Map _json) { | 2338 WebPage.fromJson(core.Map _json) { |
2261 if (_json.containsKey("score")) { | 2339 if (_json.containsKey("score")) { |
2262 score = _json["score"]; | 2340 score = _json["score"]; |
2263 } | 2341 } |
2264 if (_json.containsKey("url")) { | 2342 if (_json.containsKey("url")) { |
2265 url = _json["url"]; | 2343 url = _json["url"]; |
2266 } | 2344 } |
2267 } | 2345 } |
2268 | 2346 |
2269 core.Map<core.String, core.Object> toJson() { | 2347 core.Map<core.String, core.Object> toJson() { |
2270 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2348 final core.Map<core.String, core.Object> _json = |
| 2349 new core.Map<core.String, core.Object>(); |
2271 if (score != null) { | 2350 if (score != null) { |
2272 _json["score"] = score; | 2351 _json["score"] = score; |
2273 } | 2352 } |
2274 if (url != null) { | 2353 if (url != null) { |
2275 _json["url"] = url; | 2354 _json["url"] = url; |
2276 } | 2355 } |
2277 return _json; | 2356 return _json; |
2278 } | 2357 } |
2279 } | 2358 } |
2280 | 2359 |
2281 /** A word representation. */ | 2360 /// A word representation. |
2282 class Word { | 2361 class Word { |
2283 /** | 2362 /// The bounding box for the word. |
2284 * The bounding box for the word. | 2363 /// The vertices are in the order of top-left, top-right, bottom-right, |
2285 * The vertices are in the order of top-left, top-right, bottom-right, | 2364 /// bottom-left. When a rotation of the bounding box is detected the rotation |
2286 * bottom-left. When a rotation of the bounding box is detected the rotation | 2365 /// is represented as around the top-left corner as defined when the text is |
2287 * is represented as around the top-left corner as defined when the text is | 2366 /// read in the 'natural' orientation. |
2288 * read in the 'natural' orientation. | 2367 /// For example: |
2289 * For example: | 2368 /// * when the text is horizontal it might look like: |
2290 * * when the text is horizontal it might look like: | 2369 /// 0----1 |
2291 * 0----1 | 2370 /// | | |
2292 * | | | 2371 /// 3----2 |
2293 * 3----2 | 2372 /// * when it's rotated 180 degrees around the top-left corner it becomes: |
2294 * * when it's rotated 180 degrees around the top-left corner it becomes: | 2373 /// 2----3 |
2295 * 2----3 | 2374 /// | | |
2296 * | | | 2375 /// 1----0 |
2297 * 1----0 | 2376 /// and the vertice order will still be (0, 1, 2, 3). |
2298 * and the vertice order will still be (0, 1, 2, 3). | |
2299 */ | |
2300 BoundingPoly boundingBox; | 2377 BoundingPoly boundingBox; |
2301 /** Additional information detected for the word. */ | 2378 |
| 2379 /// Additional information detected for the word. |
2302 TextProperty property; | 2380 TextProperty property; |
2303 /** | 2381 |
2304 * List of symbols in the word. | 2382 /// List of symbols in the word. |
2305 * The order of the symbols follows the natural reading order. | 2383 /// The order of the symbols follows the natural reading order. |
2306 */ | |
2307 core.List<Symbol> symbols; | 2384 core.List<Symbol> symbols; |
2308 | 2385 |
2309 Word(); | 2386 Word(); |
2310 | 2387 |
2311 Word.fromJson(core.Map _json) { | 2388 Word.fromJson(core.Map _json) { |
2312 if (_json.containsKey("boundingBox")) { | 2389 if (_json.containsKey("boundingBox")) { |
2313 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); | 2390 boundingBox = new BoundingPoly.fromJson(_json["boundingBox"]); |
2314 } | 2391 } |
2315 if (_json.containsKey("property")) { | 2392 if (_json.containsKey("property")) { |
2316 property = new TextProperty.fromJson(_json["property"]); | 2393 property = new TextProperty.fromJson(_json["property"]); |
2317 } | 2394 } |
2318 if (_json.containsKey("symbols")) { | 2395 if (_json.containsKey("symbols")) { |
2319 symbols = _json["symbols"].map((value) => new Symbol.fromJson(value)).toLi
st(); | 2396 symbols = |
| 2397 _json["symbols"].map((value) => new Symbol.fromJson(value)).toList(); |
2320 } | 2398 } |
2321 } | 2399 } |
2322 | 2400 |
2323 core.Map<core.String, core.Object> toJson() { | 2401 core.Map<core.String, core.Object> toJson() { |
2324 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2402 final core.Map<core.String, core.Object> _json = |
| 2403 new core.Map<core.String, core.Object>(); |
2325 if (boundingBox != null) { | 2404 if (boundingBox != null) { |
2326 _json["boundingBox"] = (boundingBox).toJson(); | 2405 _json["boundingBox"] = (boundingBox).toJson(); |
2327 } | 2406 } |
2328 if (property != null) { | 2407 if (property != null) { |
2329 _json["property"] = (property).toJson(); | 2408 _json["property"] = (property).toJson(); |
2330 } | 2409 } |
2331 if (symbols != null) { | 2410 if (symbols != null) { |
2332 _json["symbols"] = symbols.map((value) => (value).toJson()).toList(); | 2411 _json["symbols"] = symbols.map((value) => (value).toJson()).toList(); |
2333 } | 2412 } |
2334 return _json; | 2413 return _json; |
2335 } | 2414 } |
2336 } | 2415 } |
OLD | NEW |