OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis_beta.videointelligence.v1beta1; | 3 library googleapis_beta.videointelligence.v1beta1; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
77 return _response.then((data) => new GoogleLongrunningOperation.fromJson(data
)); | 77 return _response.then((data) => new GoogleLongrunningOperation.fromJson(data
)); |
78 } | 78 } |
79 | 79 |
80 } | 80 } |
81 | 81 |
82 | 82 |
83 | 83 |
84 /** | 84 /** |
85 * Video annotation progress. Included in the `metadata` | 85 * Video annotation progress. Included in the `metadata` |
86 * field of the `Operation` returned by the `GetOperation` | 86 * field of the `Operation` returned by the `GetOperation` |
| 87 * call of the `google::longrunning::Operations` service. |
| 88 */ |
| 89 class GoogleCloudVideointelligenceV1AnnotateVideoProgress { |
| 90 /** Progress metadata for all videos specified in `AnnotateVideoRequest`. */ |
| 91 core.List<GoogleCloudVideointelligenceV1VideoAnnotationProgress> annotationPro
gress; |
| 92 |
| 93 GoogleCloudVideointelligenceV1AnnotateVideoProgress(); |
| 94 |
| 95 GoogleCloudVideointelligenceV1AnnotateVideoProgress.fromJson(core.Map _json) { |
| 96 if (_json.containsKey("annotationProgress")) { |
| 97 annotationProgress = _json["annotationProgress"].map((value) => new Google
CloudVideointelligenceV1VideoAnnotationProgress.fromJson(value)).toList(); |
| 98 } |
| 99 } |
| 100 |
| 101 core.Map<core.String, core.Object> toJson() { |
| 102 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 103 if (annotationProgress != null) { |
| 104 _json["annotationProgress"] = annotationProgress.map((value) => (value).to
Json()).toList(); |
| 105 } |
| 106 return _json; |
| 107 } |
| 108 } |
| 109 |
| 110 /** |
| 111 * Video annotation response. Included in the `response` |
| 112 * field of the `Operation` returned by the `GetOperation` |
| 113 * call of the `google::longrunning::Operations` service. |
| 114 */ |
| 115 class GoogleCloudVideointelligenceV1AnnotateVideoResponse { |
| 116 /** Annotation results for all videos specified in `AnnotateVideoRequest`. */ |
| 117 core.List<GoogleCloudVideointelligenceV1VideoAnnotationResults> annotationResu
lts; |
| 118 |
| 119 GoogleCloudVideointelligenceV1AnnotateVideoResponse(); |
| 120 |
| 121 GoogleCloudVideointelligenceV1AnnotateVideoResponse.fromJson(core.Map _json) { |
| 122 if (_json.containsKey("annotationResults")) { |
| 123 annotationResults = _json["annotationResults"].map((value) => new GoogleCl
oudVideointelligenceV1VideoAnnotationResults.fromJson(value)).toList(); |
| 124 } |
| 125 } |
| 126 |
| 127 core.Map<core.String, core.Object> toJson() { |
| 128 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 129 if (annotationResults != null) { |
| 130 _json["annotationResults"] = annotationResults.map((value) => (value).toJs
on()).toList(); |
| 131 } |
| 132 return _json; |
| 133 } |
| 134 } |
| 135 |
| 136 /** Label annotation. */ |
| 137 class GoogleCloudVideointelligenceV1LabelAnnotation { |
| 138 /** Textual description, e.g. `Fixed-gear bicycle`. */ |
| 139 core.String description; |
| 140 /** Language code for `description` in BCP-47 format. */ |
| 141 core.String languageCode; |
| 142 /** Where the label was detected and with what confidence. */ |
| 143 core.List<GoogleCloudVideointelligenceV1LabelLocation> locations; |
| 144 |
| 145 GoogleCloudVideointelligenceV1LabelAnnotation(); |
| 146 |
| 147 GoogleCloudVideointelligenceV1LabelAnnotation.fromJson(core.Map _json) { |
| 148 if (_json.containsKey("description")) { |
| 149 description = _json["description"]; |
| 150 } |
| 151 if (_json.containsKey("languageCode")) { |
| 152 languageCode = _json["languageCode"]; |
| 153 } |
| 154 if (_json.containsKey("locations")) { |
| 155 locations = _json["locations"].map((value) => new GoogleCloudVideointellig
enceV1LabelLocation.fromJson(value)).toList(); |
| 156 } |
| 157 } |
| 158 |
| 159 core.Map<core.String, core.Object> toJson() { |
| 160 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 161 if (description != null) { |
| 162 _json["description"] = description; |
| 163 } |
| 164 if (languageCode != null) { |
| 165 _json["languageCode"] = languageCode; |
| 166 } |
| 167 if (locations != null) { |
| 168 _json["locations"] = locations.map((value) => (value).toJson()).toList(); |
| 169 } |
| 170 return _json; |
| 171 } |
| 172 } |
| 173 |
| 174 /** Label location. */ |
| 175 class GoogleCloudVideointelligenceV1LabelLocation { |
| 176 /** Confidence that the label is accurate. Range: [0, 1]. */ |
| 177 core.double confidence; |
| 178 /** |
| 179 * Label level. |
| 180 * Possible string values are: |
| 181 * - "LABEL_LEVEL_UNSPECIFIED" : Unspecified. |
| 182 * - "VIDEO_LEVEL" : Video-level. Corresponds to the whole video. |
| 183 * - "SEGMENT_LEVEL" : Segment-level. Corresponds to one of |
| 184 * `AnnotateSpec.segments`. |
| 185 * - "SHOT_LEVEL" : Shot-level. Corresponds to a single shot (i.e. a series of |
| 186 * frames |
| 187 * without a major camera position or background change). |
| 188 * - "FRAME_LEVEL" : Frame-level. Corresponds to a single video frame. |
| 189 */ |
| 190 core.String level; |
| 191 /** |
| 192 * Video segment. Unset for video-level labels. |
| 193 * Set to a frame timestamp for frame-level labels. |
| 194 * Otherwise, corresponds to one of `AnnotateSpec.segments` |
| 195 * (if specified) or to shot boundaries (if requested). |
| 196 */ |
| 197 GoogleCloudVideointelligenceV1VideoSegment segment; |
| 198 |
| 199 GoogleCloudVideointelligenceV1LabelLocation(); |
| 200 |
| 201 GoogleCloudVideointelligenceV1LabelLocation.fromJson(core.Map _json) { |
| 202 if (_json.containsKey("confidence")) { |
| 203 confidence = _json["confidence"]; |
| 204 } |
| 205 if (_json.containsKey("level")) { |
| 206 level = _json["level"]; |
| 207 } |
| 208 if (_json.containsKey("segment")) { |
| 209 segment = new GoogleCloudVideointelligenceV1VideoSegment.fromJson(_json["s
egment"]); |
| 210 } |
| 211 } |
| 212 |
| 213 core.Map<core.String, core.Object> toJson() { |
| 214 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 215 if (confidence != null) { |
| 216 _json["confidence"] = confidence; |
| 217 } |
| 218 if (level != null) { |
| 219 _json["level"] = level; |
| 220 } |
| 221 if (segment != null) { |
| 222 _json["segment"] = (segment).toJson(); |
| 223 } |
| 224 return _json; |
| 225 } |
| 226 } |
| 227 |
| 228 /** |
| 229 * Safe search annotation (based on per-frame visual signals only). |
| 230 * If no unsafe content has been detected in a frame, no annotations |
| 231 * are present for that frame. |
| 232 */ |
| 233 class GoogleCloudVideointelligenceV1SafeSearchAnnotation { |
| 234 /** |
| 235 * Likelihood of adult content. |
| 236 * Possible string values are: |
| 237 * - "UNKNOWN" : Unknown likelihood. |
| 238 * - "VERY_UNLIKELY" : Very unlikely. |
| 239 * - "UNLIKELY" : Unlikely. |
| 240 * - "POSSIBLE" : Possible. |
| 241 * - "LIKELY" : Likely. |
| 242 * - "VERY_LIKELY" : Very likely. |
| 243 */ |
| 244 core.String adult; |
| 245 /** |
| 246 * Time-offset, relative to the beginning of the video, |
| 247 * corresponding to the video frame for this annotation. |
| 248 */ |
| 249 core.String time; |
| 250 |
| 251 GoogleCloudVideointelligenceV1SafeSearchAnnotation(); |
| 252 |
| 253 GoogleCloudVideointelligenceV1SafeSearchAnnotation.fromJson(core.Map _json) { |
| 254 if (_json.containsKey("adult")) { |
| 255 adult = _json["adult"]; |
| 256 } |
| 257 if (_json.containsKey("time")) { |
| 258 time = _json["time"]; |
| 259 } |
| 260 } |
| 261 |
| 262 core.Map<core.String, core.Object> toJson() { |
| 263 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 264 if (adult != null) { |
| 265 _json["adult"] = adult; |
| 266 } |
| 267 if (time != null) { |
| 268 _json["time"] = time; |
| 269 } |
| 270 return _json; |
| 271 } |
| 272 } |
| 273 |
| 274 /** Annotation progress for a single video. */ |
| 275 class GoogleCloudVideointelligenceV1VideoAnnotationProgress { |
| 276 /** |
| 277 * Video file location in |
| 278 * [Google Cloud Storage](https://cloud.google.com/storage/). |
| 279 */ |
| 280 core.String inputUri; |
| 281 /** |
| 282 * Approximate percentage processed thus far. |
| 283 * Guaranteed to be 100 when fully processed. |
| 284 */ |
| 285 core.int progressPercent; |
| 286 /** Time when the request was received. */ |
| 287 core.String startTime; |
| 288 /** Time of the most recent update. */ |
| 289 core.String updateTime; |
| 290 |
| 291 GoogleCloudVideointelligenceV1VideoAnnotationProgress(); |
| 292 |
| 293 GoogleCloudVideointelligenceV1VideoAnnotationProgress.fromJson(core.Map _json)
{ |
| 294 if (_json.containsKey("inputUri")) { |
| 295 inputUri = _json["inputUri"]; |
| 296 } |
| 297 if (_json.containsKey("progressPercent")) { |
| 298 progressPercent = _json["progressPercent"]; |
| 299 } |
| 300 if (_json.containsKey("startTime")) { |
| 301 startTime = _json["startTime"]; |
| 302 } |
| 303 if (_json.containsKey("updateTime")) { |
| 304 updateTime = _json["updateTime"]; |
| 305 } |
| 306 } |
| 307 |
| 308 core.Map<core.String, core.Object> toJson() { |
| 309 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 310 if (inputUri != null) { |
| 311 _json["inputUri"] = inputUri; |
| 312 } |
| 313 if (progressPercent != null) { |
| 314 _json["progressPercent"] = progressPercent; |
| 315 } |
| 316 if (startTime != null) { |
| 317 _json["startTime"] = startTime; |
| 318 } |
| 319 if (updateTime != null) { |
| 320 _json["updateTime"] = updateTime; |
| 321 } |
| 322 return _json; |
| 323 } |
| 324 } |
| 325 |
| 326 /** Annotation results for a single video. */ |
| 327 class GoogleCloudVideointelligenceV1VideoAnnotationResults { |
| 328 /** |
| 329 * If set, indicates an error. Note that for a single `AnnotateVideoRequest` |
| 330 * some videos may succeed and some may fail. |
| 331 */ |
| 332 GoogleRpcStatus error; |
| 333 /** |
| 334 * Video file location in |
| 335 * [Google Cloud Storage](https://cloud.google.com/storage/). |
| 336 */ |
| 337 core.String inputUri; |
| 338 /** Label annotations. There is exactly one element for each unique label. */ |
| 339 core.List<GoogleCloudVideointelligenceV1LabelAnnotation> labelAnnotations; |
| 340 /** Safe search annotations. */ |
| 341 core.List<GoogleCloudVideointelligenceV1SafeSearchAnnotation> safeSearchAnnota
tions; |
| 342 /** Shot annotations. Each shot is represented as a video segment. */ |
| 343 core.List<GoogleCloudVideointelligenceV1VideoSegment> shotAnnotations; |
| 344 |
| 345 GoogleCloudVideointelligenceV1VideoAnnotationResults(); |
| 346 |
| 347 GoogleCloudVideointelligenceV1VideoAnnotationResults.fromJson(core.Map _json)
{ |
| 348 if (_json.containsKey("error")) { |
| 349 error = new GoogleRpcStatus.fromJson(_json["error"]); |
| 350 } |
| 351 if (_json.containsKey("inputUri")) { |
| 352 inputUri = _json["inputUri"]; |
| 353 } |
| 354 if (_json.containsKey("labelAnnotations")) { |
| 355 labelAnnotations = _json["labelAnnotations"].map((value) => new GoogleClou
dVideointelligenceV1LabelAnnotation.fromJson(value)).toList(); |
| 356 } |
| 357 if (_json.containsKey("safeSearchAnnotations")) { |
| 358 safeSearchAnnotations = _json["safeSearchAnnotations"].map((value) => new
GoogleCloudVideointelligenceV1SafeSearchAnnotation.fromJson(value)).toList(); |
| 359 } |
| 360 if (_json.containsKey("shotAnnotations")) { |
| 361 shotAnnotations = _json["shotAnnotations"].map((value) => new GoogleCloudV
ideointelligenceV1VideoSegment.fromJson(value)).toList(); |
| 362 } |
| 363 } |
| 364 |
| 365 core.Map<core.String, core.Object> toJson() { |
| 366 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 367 if (error != null) { |
| 368 _json["error"] = (error).toJson(); |
| 369 } |
| 370 if (inputUri != null) { |
| 371 _json["inputUri"] = inputUri; |
| 372 } |
| 373 if (labelAnnotations != null) { |
| 374 _json["labelAnnotations"] = labelAnnotations.map((value) => (value).toJson
()).toList(); |
| 375 } |
| 376 if (safeSearchAnnotations != null) { |
| 377 _json["safeSearchAnnotations"] = safeSearchAnnotations.map((value) => (val
ue).toJson()).toList(); |
| 378 } |
| 379 if (shotAnnotations != null) { |
| 380 _json["shotAnnotations"] = shotAnnotations.map((value) => (value).toJson()
).toList(); |
| 381 } |
| 382 return _json; |
| 383 } |
| 384 } |
| 385 |
| 386 /** Video segment. */ |
| 387 class GoogleCloudVideointelligenceV1VideoSegment { |
| 388 /** |
| 389 * Time-offset, relative to the beginning of the video, |
| 390 * corresponding to the end of the segment (inclusive). |
| 391 */ |
| 392 core.String endTime; |
| 393 /** |
| 394 * Time-offset, relative to the beginning of the video, |
| 395 * corresponding to the start of the segment (inclusive). |
| 396 */ |
| 397 core.String startTime; |
| 398 |
| 399 GoogleCloudVideointelligenceV1VideoSegment(); |
| 400 |
| 401 GoogleCloudVideointelligenceV1VideoSegment.fromJson(core.Map _json) { |
| 402 if (_json.containsKey("endTime")) { |
| 403 endTime = _json["endTime"]; |
| 404 } |
| 405 if (_json.containsKey("startTime")) { |
| 406 startTime = _json["startTime"]; |
| 407 } |
| 408 } |
| 409 |
| 410 core.Map<core.String, core.Object> toJson() { |
| 411 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 412 if (endTime != null) { |
| 413 _json["endTime"] = endTime; |
| 414 } |
| 415 if (startTime != null) { |
| 416 _json["startTime"] = startTime; |
| 417 } |
| 418 return _json; |
| 419 } |
| 420 } |
| 421 |
| 422 /** |
| 423 * Video annotation progress. Included in the `metadata` |
| 424 * field of the `Operation` returned by the `GetOperation` |
87 * call of the `google::longrunning::Operations` service. | 425 * call of the `google::longrunning::Operations` service. |
88 */ | 426 */ |
89 class GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress { | 427 class GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress { |
90 /** Progress metadata for all videos specified in `AnnotateVideoRequest`. */ | 428 /** Progress metadata for all videos specified in `AnnotateVideoRequest`. */ |
91 core.List<GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress> annotati
onProgress; | 429 core.List<GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress> annotati
onProgress; |
92 | 430 |
93 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress(); | 431 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress(); |
94 | 432 |
95 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress.fromJson(core.Map _js
on) { | 433 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress.fromJson(core.Map _js
on) { |
96 if (_json.containsKey("annotationProgress")) { | 434 if (_json.containsKey("annotationProgress")) { |
(...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
691 } | 1029 } |
692 if (details != null) { | 1030 if (details != null) { |
693 _json["details"] = details; | 1031 _json["details"] = details; |
694 } | 1032 } |
695 if (message != null) { | 1033 if (message != null) { |
696 _json["message"] = message; | 1034 _json["message"] = message; |
697 } | 1035 } |
698 return _json; | 1036 return _json; |
699 } | 1037 } |
700 } | 1038 } |
OLD | NEW |