OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis_beta.videointelligence.v1beta1; | 3 library googleapis_beta.videointelligence.v1beta1; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:async' as async; | 6 import 'dart:async' as async; |
7 import 'dart:convert' as convert; | 7 import 'dart:convert' as convert; |
8 | 8 |
9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 9 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
10 import 'package:http/http.dart' as http; | 10 import 'package:http/http.dart' as http; |
11 | 11 |
12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' show | 12 export 'package:_discoveryapis_commons/_discoveryapis_commons.dart' |
13 ApiRequestError, DetailedApiRequestError; | 13 show ApiRequestError, DetailedApiRequestError; |
14 | 14 |
15 const core.String USER_AGENT = 'dart-api-client videointelligence/v1beta1'; | 15 const core.String USER_AGENT = 'dart-api-client videointelligence/v1beta1'; |
16 | 16 |
17 /** Google Cloud Video Intelligence API. */ | 17 /// Google Cloud Video Intelligence API. |
18 class VideointelligenceApi { | 18 class VideointelligenceApi { |
19 /** View and manage your data across Google Cloud Platform services */ | 19 /// View and manage your data across Google Cloud Platform services |
20 static const CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platf
orm"; | 20 static const CloudPlatformScope = |
21 | 21 "https://www.googleapis.com/auth/cloud-platform"; |
22 | 22 |
23 final commons.ApiRequester _requester; | 23 final commons.ApiRequester _requester; |
24 | 24 |
25 VideosResourceApi get videos => new VideosResourceApi(_requester); | 25 VideosResourceApi get videos => new VideosResourceApi(_requester); |
26 | 26 |
27 VideointelligenceApi(http.Client client, {core.String rootUrl: "https://videoi
ntelligence.googleapis.com/", core.String servicePath: ""}) : | 27 VideointelligenceApi(http.Client client, |
28 _requester = new commons.ApiRequester(client, rootUrl, servicePath, USER_A
GENT); | 28 {core.String rootUrl: "https://videointelligence.googleapis.com/", |
| 29 core.String servicePath: ""}) |
| 30 : _requester = |
| 31 new commons.ApiRequester(client, rootUrl, servicePath, USER_AGENT); |
29 } | 32 } |
30 | 33 |
31 | |
32 class VideosResourceApi { | 34 class VideosResourceApi { |
33 final commons.ApiRequester _requester; | 35 final commons.ApiRequester _requester; |
34 | 36 |
35 VideosResourceApi(commons.ApiRequester client) : | 37 VideosResourceApi(commons.ApiRequester client) : _requester = client; |
36 _requester = client; | |
37 | 38 |
38 /** | 39 /// Performs asynchronous video annotation. Progress and results can be |
39 * Performs asynchronous video annotation. Progress and results can be | 40 /// retrieved through the `google.longrunning.Operations` interface. |
40 * retrieved through the `google.longrunning.Operations` interface. | 41 /// `Operation.metadata` contains `AnnotateVideoProgress` (progress). |
41 * `Operation.metadata` contains `AnnotateVideoProgress` (progress). | 42 /// `Operation.response` contains `AnnotateVideoResponse` (results). |
42 * `Operation.response` contains `AnnotateVideoResponse` (results). | 43 /// |
43 * | 44 /// [request] - The metadata request object. |
44 * [request] - The metadata request object. | 45 /// |
45 * | 46 /// Request parameters: |
46 * Request parameters: | 47 /// |
47 * | 48 /// Completes with a [GoogleLongrunningOperation]. |
48 * Completes with a [GoogleLongrunningOperation]. | 49 /// |
49 * | 50 /// Completes with a [commons.ApiRequestError] if the API endpoint returned |
50 * Completes with a [commons.ApiRequestError] if the API endpoint returned an | 51 /// an error. |
51 * error. | 52 /// |
52 * | 53 /// If the used [http.Client] completes with an error when making a REST |
53 * If the used [http.Client] completes with an error when making a REST call, | 54 /// call, this method will complete with the same error. |
54 * this method will complete with the same error. | 55 async.Future<GoogleLongrunningOperation> annotate( |
55 */ | 56 GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest request) { |
56 async.Future<GoogleLongrunningOperation> annotate(GoogleCloudVideointelligence
V1beta1AnnotateVideoRequest request) { | |
57 var _url = null; | 57 var _url = null; |
58 var _queryParams = new core.Map(); | 58 var _queryParams = new core.Map(); |
59 var _uploadMedia = null; | 59 var _uploadMedia = null; |
60 var _uploadOptions = null; | 60 var _uploadOptions = null; |
61 var _downloadOptions = commons.DownloadOptions.Metadata; | 61 var _downloadOptions = commons.DownloadOptions.Metadata; |
62 var _body = null; | 62 var _body = null; |
63 | 63 |
64 if (request != null) { | 64 if (request != null) { |
65 _body = convert.JSON.encode((request).toJson()); | 65 _body = convert.JSON.encode((request).toJson()); |
66 } | 66 } |
67 | 67 |
68 _url = 'v1beta1/videos:annotate'; | 68 _url = 'v1beta1/videos:annotate'; |
69 | 69 |
70 var _response = _requester.request(_url, | 70 var _response = _requester.request(_url, "POST", |
71 "POST", | 71 body: _body, |
72 body: _body, | 72 queryParams: _queryParams, |
73 queryParams: _queryParams, | 73 uploadOptions: _uploadOptions, |
74 uploadOptions: _uploadOptions, | 74 uploadMedia: _uploadMedia, |
75 uploadMedia: _uploadMedia, | 75 downloadOptions: _downloadOptions); |
76 downloadOptions: _downloadOptions); | 76 return _response |
77 return _response.then((data) => new GoogleLongrunningOperation.fromJson(data
)); | 77 .then((data) => new GoogleLongrunningOperation.fromJson(data)); |
78 } | 78 } |
79 | |
80 } | 79 } |
81 | 80 |
82 | 81 /// Video annotation progress. Included in the `metadata` |
83 | 82 /// field of the `Operation` returned by the `GetOperation` |
84 /** | 83 /// call of the `google::longrunning::Operations` service. |
85 * Video annotation progress. Included in the `metadata` | |
86 * field of the `Operation` returned by the `GetOperation` | |
87 * call of the `google::longrunning::Operations` service. | |
88 */ | |
89 class GoogleCloudVideointelligenceV1AnnotateVideoProgress { | 84 class GoogleCloudVideointelligenceV1AnnotateVideoProgress { |
90 /** Progress metadata for all videos specified in `AnnotateVideoRequest`. */ | 85 /// Progress metadata for all videos specified in `AnnotateVideoRequest`. |
91 core.List<GoogleCloudVideointelligenceV1VideoAnnotationProgress> annotationPro
gress; | 86 core.List<GoogleCloudVideointelligenceV1VideoAnnotationProgress> |
| 87 annotationProgress; |
92 | 88 |
93 GoogleCloudVideointelligenceV1AnnotateVideoProgress(); | 89 GoogleCloudVideointelligenceV1AnnotateVideoProgress(); |
94 | 90 |
95 GoogleCloudVideointelligenceV1AnnotateVideoProgress.fromJson(core.Map _json) { | 91 GoogleCloudVideointelligenceV1AnnotateVideoProgress.fromJson(core.Map _json) { |
96 if (_json.containsKey("annotationProgress")) { | 92 if (_json.containsKey("annotationProgress")) { |
97 annotationProgress = _json["annotationProgress"].map((value) => new Google
CloudVideointelligenceV1VideoAnnotationProgress.fromJson(value)).toList(); | 93 annotationProgress = _json["annotationProgress"] |
| 94 .map((value) => |
| 95 new GoogleCloudVideointelligenceV1VideoAnnotationProgress |
| 96 .fromJson(value)) |
| 97 .toList(); |
98 } | 98 } |
99 } | 99 } |
100 | 100 |
101 core.Map<core.String, core.Object> toJson() { | 101 core.Map<core.String, core.Object> toJson() { |
102 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 102 final core.Map<core.String, core.Object> _json = |
| 103 new core.Map<core.String, core.Object>(); |
103 if (annotationProgress != null) { | 104 if (annotationProgress != null) { |
104 _json["annotationProgress"] = annotationProgress.map((value) => (value).to
Json()).toList(); | 105 _json["annotationProgress"] = |
| 106 annotationProgress.map((value) => (value).toJson()).toList(); |
105 } | 107 } |
106 return _json; | 108 return _json; |
107 } | 109 } |
108 } | 110 } |
109 | 111 |
110 /** | 112 /// Video annotation response. Included in the `response` |
111 * Video annotation response. Included in the `response` | 113 /// field of the `Operation` returned by the `GetOperation` |
112 * field of the `Operation` returned by the `GetOperation` | 114 /// call of the `google::longrunning::Operations` service. |
113 * call of the `google::longrunning::Operations` service. | |
114 */ | |
115 class GoogleCloudVideointelligenceV1AnnotateVideoResponse { | 115 class GoogleCloudVideointelligenceV1AnnotateVideoResponse { |
116 /** Annotation results for all videos specified in `AnnotateVideoRequest`. */ | 116 /// Annotation results for all videos specified in `AnnotateVideoRequest`. |
117 core.List<GoogleCloudVideointelligenceV1VideoAnnotationResults> annotationResu
lts; | 117 core.List<GoogleCloudVideointelligenceV1VideoAnnotationResults> |
| 118 annotationResults; |
118 | 119 |
119 GoogleCloudVideointelligenceV1AnnotateVideoResponse(); | 120 GoogleCloudVideointelligenceV1AnnotateVideoResponse(); |
120 | 121 |
121 GoogleCloudVideointelligenceV1AnnotateVideoResponse.fromJson(core.Map _json) { | 122 GoogleCloudVideointelligenceV1AnnotateVideoResponse.fromJson(core.Map _json) { |
122 if (_json.containsKey("annotationResults")) { | 123 if (_json.containsKey("annotationResults")) { |
123 annotationResults = _json["annotationResults"].map((value) => new GoogleCl
oudVideointelligenceV1VideoAnnotationResults.fromJson(value)).toList(); | 124 annotationResults = _json["annotationResults"] |
| 125 .map((value) => |
| 126 new GoogleCloudVideointelligenceV1VideoAnnotationResults.fromJson( |
| 127 value)) |
| 128 .toList(); |
124 } | 129 } |
125 } | 130 } |
126 | 131 |
127 core.Map<core.String, core.Object> toJson() { | 132 core.Map<core.String, core.Object> toJson() { |
128 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 133 final core.Map<core.String, core.Object> _json = |
| 134 new core.Map<core.String, core.Object>(); |
129 if (annotationResults != null) { | 135 if (annotationResults != null) { |
130 _json["annotationResults"] = annotationResults.map((value) => (value).toJs
on()).toList(); | 136 _json["annotationResults"] = |
| 137 annotationResults.map((value) => (value).toJson()).toList(); |
131 } | 138 } |
132 return _json; | 139 return _json; |
133 } | 140 } |
134 } | 141 } |
135 | 142 |
136 /** Label annotation. */ | 143 /// Label annotation. |
137 class GoogleCloudVideointelligenceV1LabelAnnotation { | 144 class GoogleCloudVideointelligenceV1LabelAnnotation { |
138 /** Textual description, e.g. `Fixed-gear bicycle`. */ | 145 /// Textual description, e.g. `Fixed-gear bicycle`. |
139 core.String description; | 146 core.String description; |
140 /** Language code for `description` in BCP-47 format. */ | 147 |
| 148 /// Language code for `description` in BCP-47 format. |
141 core.String languageCode; | 149 core.String languageCode; |
142 /** Where the label was detected and with what confidence. */ | 150 |
| 151 /// Where the label was detected and with what confidence. |
143 core.List<GoogleCloudVideointelligenceV1LabelLocation> locations; | 152 core.List<GoogleCloudVideointelligenceV1LabelLocation> locations; |
144 | 153 |
145 GoogleCloudVideointelligenceV1LabelAnnotation(); | 154 GoogleCloudVideointelligenceV1LabelAnnotation(); |
146 | 155 |
147 GoogleCloudVideointelligenceV1LabelAnnotation.fromJson(core.Map _json) { | 156 GoogleCloudVideointelligenceV1LabelAnnotation.fromJson(core.Map _json) { |
148 if (_json.containsKey("description")) { | 157 if (_json.containsKey("description")) { |
149 description = _json["description"]; | 158 description = _json["description"]; |
150 } | 159 } |
151 if (_json.containsKey("languageCode")) { | 160 if (_json.containsKey("languageCode")) { |
152 languageCode = _json["languageCode"]; | 161 languageCode = _json["languageCode"]; |
153 } | 162 } |
154 if (_json.containsKey("locations")) { | 163 if (_json.containsKey("locations")) { |
155 locations = _json["locations"].map((value) => new GoogleCloudVideointellig
enceV1LabelLocation.fromJson(value)).toList(); | 164 locations = _json["locations"] |
| 165 .map((value) => |
| 166 new GoogleCloudVideointelligenceV1LabelLocation.fromJson(value)) |
| 167 .toList(); |
156 } | 168 } |
157 } | 169 } |
158 | 170 |
159 core.Map<core.String, core.Object> toJson() { | 171 core.Map<core.String, core.Object> toJson() { |
160 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 172 final core.Map<core.String, core.Object> _json = |
| 173 new core.Map<core.String, core.Object>(); |
161 if (description != null) { | 174 if (description != null) { |
162 _json["description"] = description; | 175 _json["description"] = description; |
163 } | 176 } |
164 if (languageCode != null) { | 177 if (languageCode != null) { |
165 _json["languageCode"] = languageCode; | 178 _json["languageCode"] = languageCode; |
166 } | 179 } |
167 if (locations != null) { | 180 if (locations != null) { |
168 _json["locations"] = locations.map((value) => (value).toJson()).toList(); | 181 _json["locations"] = locations.map((value) => (value).toJson()).toList(); |
169 } | 182 } |
170 return _json; | 183 return _json; |
171 } | 184 } |
172 } | 185 } |
173 | 186 |
174 /** Label location. */ | 187 /// Label location. |
175 class GoogleCloudVideointelligenceV1LabelLocation { | 188 class GoogleCloudVideointelligenceV1LabelLocation { |
176 /** Confidence that the label is accurate. Range: [0, 1]. */ | 189 /// Confidence that the label is accurate. Range: [0, 1]. |
177 core.double confidence; | 190 core.double confidence; |
178 /** | 191 |
179 * Label level. | 192 /// Label level. |
180 * Possible string values are: | 193 /// Possible string values are: |
181 * - "LABEL_LEVEL_UNSPECIFIED" : Unspecified. | 194 /// - "LABEL_LEVEL_UNSPECIFIED" : Unspecified. |
182 * - "VIDEO_LEVEL" : Video-level. Corresponds to the whole video. | 195 /// - "VIDEO_LEVEL" : Video-level. Corresponds to the whole video. |
183 * - "SEGMENT_LEVEL" : Segment-level. Corresponds to one of | 196 /// - "SEGMENT_LEVEL" : Segment-level. Corresponds to one of |
184 * `AnnotateSpec.segments`. | 197 /// `AnnotateSpec.segments`. |
185 * - "SHOT_LEVEL" : Shot-level. Corresponds to a single shot (i.e. a series of | 198 /// - "SHOT_LEVEL" : Shot-level. Corresponds to a single shot (i.e. a series |
186 * frames | 199 /// of frames |
187 * without a major camera position or background change). | 200 /// without a major camera position or background change). |
188 * - "FRAME_LEVEL" : Frame-level. Corresponds to a single video frame. | 201 /// - "FRAME_LEVEL" : Frame-level. Corresponds to a single video frame. |
189 */ | |
190 core.String level; | 202 core.String level; |
191 /** | 203 |
192 * Video segment. Unset for video-level labels. | 204 /// Video segment. Unset for video-level labels. |
193 * Set to a frame timestamp for frame-level labels. | 205 /// Set to a frame timestamp for frame-level labels. |
194 * Otherwise, corresponds to one of `AnnotateSpec.segments` | 206 /// Otherwise, corresponds to one of `AnnotateSpec.segments` |
195 * (if specified) or to shot boundaries (if requested). | 207 /// (if specified) or to shot boundaries (if requested). |
196 */ | |
197 GoogleCloudVideointelligenceV1VideoSegment segment; | 208 GoogleCloudVideointelligenceV1VideoSegment segment; |
198 | 209 |
199 GoogleCloudVideointelligenceV1LabelLocation(); | 210 GoogleCloudVideointelligenceV1LabelLocation(); |
200 | 211 |
201 GoogleCloudVideointelligenceV1LabelLocation.fromJson(core.Map _json) { | 212 GoogleCloudVideointelligenceV1LabelLocation.fromJson(core.Map _json) { |
202 if (_json.containsKey("confidence")) { | 213 if (_json.containsKey("confidence")) { |
203 confidence = _json["confidence"]; | 214 confidence = _json["confidence"]; |
204 } | 215 } |
205 if (_json.containsKey("level")) { | 216 if (_json.containsKey("level")) { |
206 level = _json["level"]; | 217 level = _json["level"]; |
207 } | 218 } |
208 if (_json.containsKey("segment")) { | 219 if (_json.containsKey("segment")) { |
209 segment = new GoogleCloudVideointelligenceV1VideoSegment.fromJson(_json["s
egment"]); | 220 segment = new GoogleCloudVideointelligenceV1VideoSegment.fromJson( |
| 221 _json["segment"]); |
210 } | 222 } |
211 } | 223 } |
212 | 224 |
213 core.Map<core.String, core.Object> toJson() { | 225 core.Map<core.String, core.Object> toJson() { |
214 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 226 final core.Map<core.String, core.Object> _json = |
| 227 new core.Map<core.String, core.Object>(); |
215 if (confidence != null) { | 228 if (confidence != null) { |
216 _json["confidence"] = confidence; | 229 _json["confidence"] = confidence; |
217 } | 230 } |
218 if (level != null) { | 231 if (level != null) { |
219 _json["level"] = level; | 232 _json["level"] = level; |
220 } | 233 } |
221 if (segment != null) { | 234 if (segment != null) { |
222 _json["segment"] = (segment).toJson(); | 235 _json["segment"] = (segment).toJson(); |
223 } | 236 } |
224 return _json; | 237 return _json; |
225 } | 238 } |
226 } | 239 } |
227 | 240 |
228 /** | 241 /// Safe search annotation (based on per-frame visual signals only). |
229 * Safe search annotation (based on per-frame visual signals only). | 242 /// If no unsafe content has been detected in a frame, no annotations |
230 * If no unsafe content has been detected in a frame, no annotations | 243 /// are present for that frame. |
231 * are present for that frame. | |
232 */ | |
233 class GoogleCloudVideointelligenceV1SafeSearchAnnotation { | 244 class GoogleCloudVideointelligenceV1SafeSearchAnnotation { |
234 /** | 245 /// Likelihood of adult content. |
235 * Likelihood of adult content. | 246 /// Possible string values are: |
236 * Possible string values are: | 247 /// - "UNKNOWN" : Unknown likelihood. |
237 * - "UNKNOWN" : Unknown likelihood. | 248 /// - "VERY_UNLIKELY" : Very unlikely. |
238 * - "VERY_UNLIKELY" : Very unlikely. | 249 /// - "UNLIKELY" : Unlikely. |
239 * - "UNLIKELY" : Unlikely. | 250 /// - "POSSIBLE" : Possible. |
240 * - "POSSIBLE" : Possible. | 251 /// - "LIKELY" : Likely. |
241 * - "LIKELY" : Likely. | 252 /// - "VERY_LIKELY" : Very likely. |
242 * - "VERY_LIKELY" : Very likely. | |
243 */ | |
244 core.String adult; | 253 core.String adult; |
245 /** | 254 |
246 * Time-offset, relative to the beginning of the video, | 255 /// Time-offset, relative to the beginning of the video, |
247 * corresponding to the video frame for this annotation. | 256 /// corresponding to the video frame for this annotation. |
248 */ | |
249 core.String time; | 257 core.String time; |
250 | 258 |
251 GoogleCloudVideointelligenceV1SafeSearchAnnotation(); | 259 GoogleCloudVideointelligenceV1SafeSearchAnnotation(); |
252 | 260 |
253 GoogleCloudVideointelligenceV1SafeSearchAnnotation.fromJson(core.Map _json) { | 261 GoogleCloudVideointelligenceV1SafeSearchAnnotation.fromJson(core.Map _json) { |
254 if (_json.containsKey("adult")) { | 262 if (_json.containsKey("adult")) { |
255 adult = _json["adult"]; | 263 adult = _json["adult"]; |
256 } | 264 } |
257 if (_json.containsKey("time")) { | 265 if (_json.containsKey("time")) { |
258 time = _json["time"]; | 266 time = _json["time"]; |
259 } | 267 } |
260 } | 268 } |
261 | 269 |
262 core.Map<core.String, core.Object> toJson() { | 270 core.Map<core.String, core.Object> toJson() { |
263 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 271 final core.Map<core.String, core.Object> _json = |
| 272 new core.Map<core.String, core.Object>(); |
264 if (adult != null) { | 273 if (adult != null) { |
265 _json["adult"] = adult; | 274 _json["adult"] = adult; |
266 } | 275 } |
267 if (time != null) { | 276 if (time != null) { |
268 _json["time"] = time; | 277 _json["time"] = time; |
269 } | 278 } |
270 return _json; | 279 return _json; |
271 } | 280 } |
272 } | 281 } |
273 | 282 |
274 /** Annotation progress for a single video. */ | 283 /// Annotation progress for a single video. |
275 class GoogleCloudVideointelligenceV1VideoAnnotationProgress { | 284 class GoogleCloudVideointelligenceV1VideoAnnotationProgress { |
276 /** | 285 /// Video file location in |
277 * Video file location in | 286 /// [Google Cloud Storage](https://cloud.google.com/storage/). |
278 * [Google Cloud Storage](https://cloud.google.com/storage/). | |
279 */ | |
280 core.String inputUri; | 287 core.String inputUri; |
281 /** | 288 |
282 * Approximate percentage processed thus far. | 289 /// Approximate percentage processed thus far. |
283 * Guaranteed to be 100 when fully processed. | 290 /// Guaranteed to be 100 when fully processed. |
284 */ | |
285 core.int progressPercent; | 291 core.int progressPercent; |
286 /** Time when the request was received. */ | 292 |
| 293 /// Time when the request was received. |
287 core.String startTime; | 294 core.String startTime; |
288 /** Time of the most recent update. */ | 295 |
| 296 /// Time of the most recent update. |
289 core.String updateTime; | 297 core.String updateTime; |
290 | 298 |
291 GoogleCloudVideointelligenceV1VideoAnnotationProgress(); | 299 GoogleCloudVideointelligenceV1VideoAnnotationProgress(); |
292 | 300 |
293 GoogleCloudVideointelligenceV1VideoAnnotationProgress.fromJson(core.Map _json)
{ | 301 GoogleCloudVideointelligenceV1VideoAnnotationProgress.fromJson( |
| 302 core.Map _json) { |
294 if (_json.containsKey("inputUri")) { | 303 if (_json.containsKey("inputUri")) { |
295 inputUri = _json["inputUri"]; | 304 inputUri = _json["inputUri"]; |
296 } | 305 } |
297 if (_json.containsKey("progressPercent")) { | 306 if (_json.containsKey("progressPercent")) { |
298 progressPercent = _json["progressPercent"]; | 307 progressPercent = _json["progressPercent"]; |
299 } | 308 } |
300 if (_json.containsKey("startTime")) { | 309 if (_json.containsKey("startTime")) { |
301 startTime = _json["startTime"]; | 310 startTime = _json["startTime"]; |
302 } | 311 } |
303 if (_json.containsKey("updateTime")) { | 312 if (_json.containsKey("updateTime")) { |
304 updateTime = _json["updateTime"]; | 313 updateTime = _json["updateTime"]; |
305 } | 314 } |
306 } | 315 } |
307 | 316 |
308 core.Map<core.String, core.Object> toJson() { | 317 core.Map<core.String, core.Object> toJson() { |
309 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 318 final core.Map<core.String, core.Object> _json = |
| 319 new core.Map<core.String, core.Object>(); |
310 if (inputUri != null) { | 320 if (inputUri != null) { |
311 _json["inputUri"] = inputUri; | 321 _json["inputUri"] = inputUri; |
312 } | 322 } |
313 if (progressPercent != null) { | 323 if (progressPercent != null) { |
314 _json["progressPercent"] = progressPercent; | 324 _json["progressPercent"] = progressPercent; |
315 } | 325 } |
316 if (startTime != null) { | 326 if (startTime != null) { |
317 _json["startTime"] = startTime; | 327 _json["startTime"] = startTime; |
318 } | 328 } |
319 if (updateTime != null) { | 329 if (updateTime != null) { |
320 _json["updateTime"] = updateTime; | 330 _json["updateTime"] = updateTime; |
321 } | 331 } |
322 return _json; | 332 return _json; |
323 } | 333 } |
324 } | 334 } |
325 | 335 |
326 /** Annotation results for a single video. */ | 336 /// Annotation results for a single video. |
327 class GoogleCloudVideointelligenceV1VideoAnnotationResults { | 337 class GoogleCloudVideointelligenceV1VideoAnnotationResults { |
328 /** | 338 /// If set, indicates an error. Note that for a single `AnnotateVideoRequest` |
329 * If set, indicates an error. Note that for a single `AnnotateVideoRequest` | 339 /// some videos may succeed and some may fail. |
330 * some videos may succeed and some may fail. | |
331 */ | |
332 GoogleRpcStatus error; | 340 GoogleRpcStatus error; |
333 /** | 341 |
334 * Video file location in | 342 /// Video file location in |
335 * [Google Cloud Storage](https://cloud.google.com/storage/). | 343 /// [Google Cloud Storage](https://cloud.google.com/storage/). |
336 */ | |
337 core.String inputUri; | 344 core.String inputUri; |
338 /** Label annotations. There is exactly one element for each unique label. */ | 345 |
| 346 /// Label annotations. There is exactly one element for each unique label. |
339 core.List<GoogleCloudVideointelligenceV1LabelAnnotation> labelAnnotations; | 347 core.List<GoogleCloudVideointelligenceV1LabelAnnotation> labelAnnotations; |
340 /** Safe search annotations. */ | 348 |
341 core.List<GoogleCloudVideointelligenceV1SafeSearchAnnotation> safeSearchAnnota
tions; | 349 /// Safe search annotations. |
342 /** Shot annotations. Each shot is represented as a video segment. */ | 350 core.List<GoogleCloudVideointelligenceV1SafeSearchAnnotation> |
| 351 safeSearchAnnotations; |
| 352 |
| 353 /// Shot annotations. Each shot is represented as a video segment. |
343 core.List<GoogleCloudVideointelligenceV1VideoSegment> shotAnnotations; | 354 core.List<GoogleCloudVideointelligenceV1VideoSegment> shotAnnotations; |
344 | 355 |
345 GoogleCloudVideointelligenceV1VideoAnnotationResults(); | 356 GoogleCloudVideointelligenceV1VideoAnnotationResults(); |
346 | 357 |
347 GoogleCloudVideointelligenceV1VideoAnnotationResults.fromJson(core.Map _json)
{ | 358 GoogleCloudVideointelligenceV1VideoAnnotationResults.fromJson( |
| 359 core.Map _json) { |
348 if (_json.containsKey("error")) { | 360 if (_json.containsKey("error")) { |
349 error = new GoogleRpcStatus.fromJson(_json["error"]); | 361 error = new GoogleRpcStatus.fromJson(_json["error"]); |
350 } | 362 } |
351 if (_json.containsKey("inputUri")) { | 363 if (_json.containsKey("inputUri")) { |
352 inputUri = _json["inputUri"]; | 364 inputUri = _json["inputUri"]; |
353 } | 365 } |
354 if (_json.containsKey("labelAnnotations")) { | 366 if (_json.containsKey("labelAnnotations")) { |
355 labelAnnotations = _json["labelAnnotations"].map((value) => new GoogleClou
dVideointelligenceV1LabelAnnotation.fromJson(value)).toList(); | 367 labelAnnotations = _json["labelAnnotations"] |
| 368 .map((value) => |
| 369 new GoogleCloudVideointelligenceV1LabelAnnotation.fromJson(value)) |
| 370 .toList(); |
356 } | 371 } |
357 if (_json.containsKey("safeSearchAnnotations")) { | 372 if (_json.containsKey("safeSearchAnnotations")) { |
358 safeSearchAnnotations = _json["safeSearchAnnotations"].map((value) => new
GoogleCloudVideointelligenceV1SafeSearchAnnotation.fromJson(value)).toList(); | 373 safeSearchAnnotations = _json["safeSearchAnnotations"] |
| 374 .map((value) => |
| 375 new GoogleCloudVideointelligenceV1SafeSearchAnnotation.fromJson( |
| 376 value)) |
| 377 .toList(); |
359 } | 378 } |
360 if (_json.containsKey("shotAnnotations")) { | 379 if (_json.containsKey("shotAnnotations")) { |
361 shotAnnotations = _json["shotAnnotations"].map((value) => new GoogleCloudV
ideointelligenceV1VideoSegment.fromJson(value)).toList(); | 380 shotAnnotations = _json["shotAnnotations"] |
| 381 .map((value) => |
| 382 new GoogleCloudVideointelligenceV1VideoSegment.fromJson(value)) |
| 383 .toList(); |
362 } | 384 } |
363 } | 385 } |
364 | 386 |
365 core.Map<core.String, core.Object> toJson() { | 387 core.Map<core.String, core.Object> toJson() { |
366 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 388 final core.Map<core.String, core.Object> _json = |
| 389 new core.Map<core.String, core.Object>(); |
367 if (error != null) { | 390 if (error != null) { |
368 _json["error"] = (error).toJson(); | 391 _json["error"] = (error).toJson(); |
369 } | 392 } |
370 if (inputUri != null) { | 393 if (inputUri != null) { |
371 _json["inputUri"] = inputUri; | 394 _json["inputUri"] = inputUri; |
372 } | 395 } |
373 if (labelAnnotations != null) { | 396 if (labelAnnotations != null) { |
374 _json["labelAnnotations"] = labelAnnotations.map((value) => (value).toJson
()).toList(); | 397 _json["labelAnnotations"] = |
| 398 labelAnnotations.map((value) => (value).toJson()).toList(); |
375 } | 399 } |
376 if (safeSearchAnnotations != null) { | 400 if (safeSearchAnnotations != null) { |
377 _json["safeSearchAnnotations"] = safeSearchAnnotations.map((value) => (val
ue).toJson()).toList(); | 401 _json["safeSearchAnnotations"] = |
| 402 safeSearchAnnotations.map((value) => (value).toJson()).toList(); |
378 } | 403 } |
379 if (shotAnnotations != null) { | 404 if (shotAnnotations != null) { |
380 _json["shotAnnotations"] = shotAnnotations.map((value) => (value).toJson()
).toList(); | 405 _json["shotAnnotations"] = |
| 406 shotAnnotations.map((value) => (value).toJson()).toList(); |
381 } | 407 } |
382 return _json; | 408 return _json; |
383 } | 409 } |
384 } | 410 } |
385 | 411 |
386 /** Video segment. */ | 412 /// Video segment. |
387 class GoogleCloudVideointelligenceV1VideoSegment { | 413 class GoogleCloudVideointelligenceV1VideoSegment { |
388 /** | 414 /// Time-offset, relative to the beginning of the video, |
389 * Time-offset, relative to the beginning of the video, | 415 /// corresponding to the end of the segment (inclusive). |
390 * corresponding to the end of the segment (inclusive). | |
391 */ | |
392 core.String endTime; | 416 core.String endTime; |
393 /** | 417 |
394 * Time-offset, relative to the beginning of the video, | 418 /// Time-offset, relative to the beginning of the video, |
395 * corresponding to the start of the segment (inclusive). | 419 /// corresponding to the start of the segment (inclusive). |
396 */ | |
397 core.String startTime; | 420 core.String startTime; |
398 | 421 |
399 GoogleCloudVideointelligenceV1VideoSegment(); | 422 GoogleCloudVideointelligenceV1VideoSegment(); |
400 | 423 |
401 GoogleCloudVideointelligenceV1VideoSegment.fromJson(core.Map _json) { | 424 GoogleCloudVideointelligenceV1VideoSegment.fromJson(core.Map _json) { |
402 if (_json.containsKey("endTime")) { | 425 if (_json.containsKey("endTime")) { |
403 endTime = _json["endTime"]; | 426 endTime = _json["endTime"]; |
404 } | 427 } |
405 if (_json.containsKey("startTime")) { | 428 if (_json.containsKey("startTime")) { |
406 startTime = _json["startTime"]; | 429 startTime = _json["startTime"]; |
407 } | 430 } |
408 } | 431 } |
409 | 432 |
410 core.Map<core.String, core.Object> toJson() { | 433 core.Map<core.String, core.Object> toJson() { |
411 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 434 final core.Map<core.String, core.Object> _json = |
| 435 new core.Map<core.String, core.Object>(); |
412 if (endTime != null) { | 436 if (endTime != null) { |
413 _json["endTime"] = endTime; | 437 _json["endTime"] = endTime; |
414 } | 438 } |
415 if (startTime != null) { | 439 if (startTime != null) { |
416 _json["startTime"] = startTime; | 440 _json["startTime"] = startTime; |
417 } | 441 } |
418 return _json; | 442 return _json; |
419 } | 443 } |
420 } | 444 } |
421 | 445 |
422 /** | 446 /// Video annotation progress. Included in the `metadata` |
423 * Video annotation progress. Included in the `metadata` | 447 /// field of the `Operation` returned by the `GetOperation` |
424 * field of the `Operation` returned by the `GetOperation` | 448 /// call of the `google::longrunning::Operations` service. |
425 * call of the `google::longrunning::Operations` service. | |
426 */ | |
427 class GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress { | 449 class GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress { |
428 /** Progress metadata for all videos specified in `AnnotateVideoRequest`. */ | 450 /// Progress metadata for all videos specified in `AnnotateVideoRequest`. |
429 core.List<GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress> annotati
onProgress; | 451 core.List<GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress> |
| 452 annotationProgress; |
430 | 453 |
431 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress(); | 454 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress(); |
432 | 455 |
433 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress.fromJson(core.Map _js
on) { | 456 GoogleCloudVideointelligenceV1beta1AnnotateVideoProgress.fromJson( |
| 457 core.Map _json) { |
434 if (_json.containsKey("annotationProgress")) { | 458 if (_json.containsKey("annotationProgress")) { |
435 annotationProgress = _json["annotationProgress"].map((value) => new Google
CloudVideointelligenceV1beta1VideoAnnotationProgress.fromJson(value)).toList(); | 459 annotationProgress = _json["annotationProgress"] |
| 460 .map((value) => |
| 461 new GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress |
| 462 .fromJson(value)) |
| 463 .toList(); |
436 } | 464 } |
437 } | 465 } |
438 | 466 |
439 core.Map<core.String, core.Object> toJson() { | 467 core.Map<core.String, core.Object> toJson() { |
440 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 468 final core.Map<core.String, core.Object> _json = |
| 469 new core.Map<core.String, core.Object>(); |
441 if (annotationProgress != null) { | 470 if (annotationProgress != null) { |
442 _json["annotationProgress"] = annotationProgress.map((value) => (value).to
Json()).toList(); | 471 _json["annotationProgress"] = |
| 472 annotationProgress.map((value) => (value).toJson()).toList(); |
443 } | 473 } |
444 return _json; | 474 return _json; |
445 } | 475 } |
446 } | 476 } |
447 | 477 |
448 /** Video annotation request. */ | 478 /// Video annotation request. |
449 class GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest { | 479 class GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest { |
450 /** Requested video annotation features. */ | 480 /// Requested video annotation features. |
451 core.List<core.String> features; | 481 core.List<core.String> features; |
452 /** | 482 |
453 * The video data bytes. Encoding: base64. If unset, the input video(s) | 483 /// The video data bytes. Encoding: base64. If unset, the input video(s) |
454 * should be specified via `input_uri`. If set, `input_uri` should be unset. | 484 /// should be specified via `input_uri`. If set, `input_uri` should be unset. |
455 */ | |
456 core.String inputContent; | 485 core.String inputContent; |
457 /** | 486 |
458 * Input video location. Currently, only | 487 /// Input video location. Currently, only |
459 * [Google Cloud Storage](https://cloud.google.com/storage/) URIs are | 488 /// [Google Cloud Storage](https://cloud.google.com/storage/) URIs are |
460 * supported, which must be specified in the following format: | 489 /// supported, which must be specified in the following format: |
461 * `gs://bucket-id/object-id` (other URI formats return | 490 /// `gs://bucket-id/object-id` (other URI formats return |
462 * google.rpc.Code.INVALID_ARGUMENT). For more information, see | 491 /// google.rpc.Code.INVALID_ARGUMENT). For more information, see |
463 * [Request URIs](/storage/docs/reference-uris). | 492 /// [Request URIs](/storage/docs/reference-uris). |
464 * A video URI may include wildcards in `object-id`, and thus identify | 493 /// A video URI may include wildcards in `object-id`, and thus identify |
465 * multiple videos. Supported wildcards: '*' to match 0 or more characters; | 494 /// multiple videos. Supported wildcards: '*' to match 0 or more characters; |
466 * '?' to match 1 character. If unset, the input video should be embedded | 495 /// '?' to match 1 character. If unset, the input video should be embedded |
467 * in the request as `input_content`. If set, `input_content` should be unset. | 496 /// in the request as `input_content`. If set, `input_content` should be |
468 */ | 497 /// unset. |
469 core.String inputUri; | 498 core.String inputUri; |
470 /** | 499 |
471 * Optional cloud region where annotation should take place. Supported cloud | 500 /// Optional cloud region where annotation should take place. Supported cloud |
472 * regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region | 501 /// regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no |
473 * is specified, a region will be determined based on video file location. | 502 /// region |
474 */ | 503 /// is specified, a region will be determined based on video file location. |
475 core.String locationId; | 504 core.String locationId; |
476 /** | 505 |
477 * Optional location where the output (in JSON format) should be stored. | 506 /// Optional location where the output (in JSON format) should be stored. |
478 * Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) | 507 /// Currently, only [Google Cloud Storage](https://cloud.google.com/storage/) |
479 * URIs are supported, which must be specified in the following format: | 508 /// URIs are supported, which must be specified in the following format: |
480 * `gs://bucket-id/object-id` (other URI formats return | 509 /// `gs://bucket-id/object-id` (other URI formats return |
481 * google.rpc.Code.INVALID_ARGUMENT). For more information, see | 510 /// google.rpc.Code.INVALID_ARGUMENT). For more information, see |
482 * [Request URIs](/storage/docs/reference-uris). | 511 /// [Request URIs](/storage/docs/reference-uris). |
483 */ | |
484 core.String outputUri; | 512 core.String outputUri; |
485 /** Additional video context and/or feature-specific parameters. */ | 513 |
| 514 /// Additional video context and/or feature-specific parameters. |
486 GoogleCloudVideointelligenceV1beta1VideoContext videoContext; | 515 GoogleCloudVideointelligenceV1beta1VideoContext videoContext; |
487 | 516 |
488 GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest(); | 517 GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest(); |
489 | 518 |
490 GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest.fromJson(core.Map _jso
n) { | 519 GoogleCloudVideointelligenceV1beta1AnnotateVideoRequest.fromJson( |
| 520 core.Map _json) { |
491 if (_json.containsKey("features")) { | 521 if (_json.containsKey("features")) { |
492 features = _json["features"]; | 522 features = _json["features"]; |
493 } | 523 } |
494 if (_json.containsKey("inputContent")) { | 524 if (_json.containsKey("inputContent")) { |
495 inputContent = _json["inputContent"]; | 525 inputContent = _json["inputContent"]; |
496 } | 526 } |
497 if (_json.containsKey("inputUri")) { | 527 if (_json.containsKey("inputUri")) { |
498 inputUri = _json["inputUri"]; | 528 inputUri = _json["inputUri"]; |
499 } | 529 } |
500 if (_json.containsKey("locationId")) { | 530 if (_json.containsKey("locationId")) { |
501 locationId = _json["locationId"]; | 531 locationId = _json["locationId"]; |
502 } | 532 } |
503 if (_json.containsKey("outputUri")) { | 533 if (_json.containsKey("outputUri")) { |
504 outputUri = _json["outputUri"]; | 534 outputUri = _json["outputUri"]; |
505 } | 535 } |
506 if (_json.containsKey("videoContext")) { | 536 if (_json.containsKey("videoContext")) { |
507 videoContext = new GoogleCloudVideointelligenceV1beta1VideoContext.fromJso
n(_json["videoContext"]); | 537 videoContext = |
| 538 new GoogleCloudVideointelligenceV1beta1VideoContext.fromJson( |
| 539 _json["videoContext"]); |
508 } | 540 } |
509 } | 541 } |
510 | 542 |
511 core.Map<core.String, core.Object> toJson() { | 543 core.Map<core.String, core.Object> toJson() { |
512 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 544 final core.Map<core.String, core.Object> _json = |
| 545 new core.Map<core.String, core.Object>(); |
513 if (features != null) { | 546 if (features != null) { |
514 _json["features"] = features; | 547 _json["features"] = features; |
515 } | 548 } |
516 if (inputContent != null) { | 549 if (inputContent != null) { |
517 _json["inputContent"] = inputContent; | 550 _json["inputContent"] = inputContent; |
518 } | 551 } |
519 if (inputUri != null) { | 552 if (inputUri != null) { |
520 _json["inputUri"] = inputUri; | 553 _json["inputUri"] = inputUri; |
521 } | 554 } |
522 if (locationId != null) { | 555 if (locationId != null) { |
523 _json["locationId"] = locationId; | 556 _json["locationId"] = locationId; |
524 } | 557 } |
525 if (outputUri != null) { | 558 if (outputUri != null) { |
526 _json["outputUri"] = outputUri; | 559 _json["outputUri"] = outputUri; |
527 } | 560 } |
528 if (videoContext != null) { | 561 if (videoContext != null) { |
529 _json["videoContext"] = (videoContext).toJson(); | 562 _json["videoContext"] = (videoContext).toJson(); |
530 } | 563 } |
531 return _json; | 564 return _json; |
532 } | 565 } |
533 } | 566 } |
534 | 567 |
535 /** | 568 /// Video annotation response. Included in the `response` |
536 * Video annotation response. Included in the `response` | 569 /// field of the `Operation` returned by the `GetOperation` |
537 * field of the `Operation` returned by the `GetOperation` | 570 /// call of the `google::longrunning::Operations` service. |
538 * call of the `google::longrunning::Operations` service. | |
539 */ | |
540 class GoogleCloudVideointelligenceV1beta1AnnotateVideoResponse { | 571 class GoogleCloudVideointelligenceV1beta1AnnotateVideoResponse { |
541 /** Annotation results for all videos specified in `AnnotateVideoRequest`. */ | 572 /// Annotation results for all videos specified in `AnnotateVideoRequest`. |
542 core.List<GoogleCloudVideointelligenceV1beta1VideoAnnotationResults> annotatio
nResults; | 573 core.List<GoogleCloudVideointelligenceV1beta1VideoAnnotationResults> |
| 574 annotationResults; |
543 | 575 |
544 GoogleCloudVideointelligenceV1beta1AnnotateVideoResponse(); | 576 GoogleCloudVideointelligenceV1beta1AnnotateVideoResponse(); |
545 | 577 |
546 GoogleCloudVideointelligenceV1beta1AnnotateVideoResponse.fromJson(core.Map _js
on) { | 578 GoogleCloudVideointelligenceV1beta1AnnotateVideoResponse.fromJson( |
| 579 core.Map _json) { |
547 if (_json.containsKey("annotationResults")) { | 580 if (_json.containsKey("annotationResults")) { |
548 annotationResults = _json["annotationResults"].map((value) => new GoogleCl
oudVideointelligenceV1beta1VideoAnnotationResults.fromJson(value)).toList(); | 581 annotationResults = _json["annotationResults"] |
| 582 .map((value) => |
| 583 new GoogleCloudVideointelligenceV1beta1VideoAnnotationResults |
| 584 .fromJson(value)) |
| 585 .toList(); |
549 } | 586 } |
550 } | 587 } |
551 | 588 |
552 core.Map<core.String, core.Object> toJson() { | 589 core.Map<core.String, core.Object> toJson() { |
553 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 590 final core.Map<core.String, core.Object> _json = |
| 591 new core.Map<core.String, core.Object>(); |
554 if (annotationResults != null) { | 592 if (annotationResults != null) { |
555 _json["annotationResults"] = annotationResults.map((value) => (value).toJs
on()).toList(); | 593 _json["annotationResults"] = |
| 594 annotationResults.map((value) => (value).toJson()).toList(); |
556 } | 595 } |
557 return _json; | 596 return _json; |
558 } | 597 } |
559 } | 598 } |
560 | 599 |
561 /** Label annotation. */ | 600 /// Label annotation. |
562 class GoogleCloudVideointelligenceV1beta1LabelAnnotation { | 601 class GoogleCloudVideointelligenceV1beta1LabelAnnotation { |
563 /** Textual description, e.g. `Fixed-gear bicycle`. */ | 602 /// Textual description, e.g. `Fixed-gear bicycle`. |
564 core.String description; | 603 core.String description; |
565 /** Language code for `description` in BCP-47 format. */ | 604 |
| 605 /// Language code for `description` in BCP-47 format. |
566 core.String languageCode; | 606 core.String languageCode; |
567 /** Where the label was detected and with what confidence. */ | 607 |
| 608 /// Where the label was detected and with what confidence. |
568 core.List<GoogleCloudVideointelligenceV1beta1LabelLocation> locations; | 609 core.List<GoogleCloudVideointelligenceV1beta1LabelLocation> locations; |
569 | 610 |
570 GoogleCloudVideointelligenceV1beta1LabelAnnotation(); | 611 GoogleCloudVideointelligenceV1beta1LabelAnnotation(); |
571 | 612 |
572 GoogleCloudVideointelligenceV1beta1LabelAnnotation.fromJson(core.Map _json) { | 613 GoogleCloudVideointelligenceV1beta1LabelAnnotation.fromJson(core.Map _json) { |
573 if (_json.containsKey("description")) { | 614 if (_json.containsKey("description")) { |
574 description = _json["description"]; | 615 description = _json["description"]; |
575 } | 616 } |
576 if (_json.containsKey("languageCode")) { | 617 if (_json.containsKey("languageCode")) { |
577 languageCode = _json["languageCode"]; | 618 languageCode = _json["languageCode"]; |
578 } | 619 } |
579 if (_json.containsKey("locations")) { | 620 if (_json.containsKey("locations")) { |
580 locations = _json["locations"].map((value) => new GoogleCloudVideointellig
enceV1beta1LabelLocation.fromJson(value)).toList(); | 621 locations = _json["locations"] |
| 622 .map((value) => |
| 623 new GoogleCloudVideointelligenceV1beta1LabelLocation.fromJson( |
| 624 value)) |
| 625 .toList(); |
581 } | 626 } |
582 } | 627 } |
583 | 628 |
584 core.Map<core.String, core.Object> toJson() { | 629 core.Map<core.String, core.Object> toJson() { |
585 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 630 final core.Map<core.String, core.Object> _json = |
| 631 new core.Map<core.String, core.Object>(); |
586 if (description != null) { | 632 if (description != null) { |
587 _json["description"] = description; | 633 _json["description"] = description; |
588 } | 634 } |
589 if (languageCode != null) { | 635 if (languageCode != null) { |
590 _json["languageCode"] = languageCode; | 636 _json["languageCode"] = languageCode; |
591 } | 637 } |
592 if (locations != null) { | 638 if (locations != null) { |
593 _json["locations"] = locations.map((value) => (value).toJson()).toList(); | 639 _json["locations"] = locations.map((value) => (value).toJson()).toList(); |
594 } | 640 } |
595 return _json; | 641 return _json; |
596 } | 642 } |
597 } | 643 } |
598 | 644 |
599 /** Label location. */ | 645 /// Label location. |
600 class GoogleCloudVideointelligenceV1beta1LabelLocation { | 646 class GoogleCloudVideointelligenceV1beta1LabelLocation { |
601 /** Confidence that the label is accurate. Range: [0, 1]. */ | 647 /// Confidence that the label is accurate. Range: [0, 1]. |
602 core.double confidence; | 648 core.double confidence; |
603 /** | 649 |
604 * Label level. | 650 /// Label level. |
605 * Possible string values are: | 651 /// Possible string values are: |
606 * - "LABEL_LEVEL_UNSPECIFIED" : Unspecified. | 652 /// - "LABEL_LEVEL_UNSPECIFIED" : Unspecified. |
607 * - "VIDEO_LEVEL" : Video-level. Corresponds to the whole video. | 653 /// - "VIDEO_LEVEL" : Video-level. Corresponds to the whole video. |
608 * - "SEGMENT_LEVEL" : Segment-level. Corresponds to one of | 654 /// - "SEGMENT_LEVEL" : Segment-level. Corresponds to one of |
609 * `AnnotateSpec.segments`. | 655 /// `AnnotateSpec.segments`. |
610 * - "SHOT_LEVEL" : Shot-level. Corresponds to a single shot (i.e. a series of | 656 /// - "SHOT_LEVEL" : Shot-level. Corresponds to a single shot (i.e. a series |
611 * frames | 657 /// of frames |
612 * without a major camera position or background change). | 658 /// without a major camera position or background change). |
613 * - "FRAME_LEVEL" : Frame-level. Corresponds to a single video frame. | 659 /// - "FRAME_LEVEL" : Frame-level. Corresponds to a single video frame. |
614 */ | |
615 core.String level; | 660 core.String level; |
616 /** | 661 |
617 * Video segment. Set to [-1, -1] for video-level labels. | 662 /// Video segment. Set to [-1, -1] for video-level labels. |
618 * Set to [timestamp, timestamp] for frame-level labels. | 663 /// Set to [timestamp, timestamp] for frame-level labels. |
619 * Otherwise, corresponds to one of `AnnotateSpec.segments` | 664 /// Otherwise, corresponds to one of `AnnotateSpec.segments` |
620 * (if specified) or to shot boundaries (if requested). | 665 /// (if specified) or to shot boundaries (if requested). |
621 */ | |
622 GoogleCloudVideointelligenceV1beta1VideoSegment segment; | 666 GoogleCloudVideointelligenceV1beta1VideoSegment segment; |
623 | 667 |
624 GoogleCloudVideointelligenceV1beta1LabelLocation(); | 668 GoogleCloudVideointelligenceV1beta1LabelLocation(); |
625 | 669 |
626 GoogleCloudVideointelligenceV1beta1LabelLocation.fromJson(core.Map _json) { | 670 GoogleCloudVideointelligenceV1beta1LabelLocation.fromJson(core.Map _json) { |
627 if (_json.containsKey("confidence")) { | 671 if (_json.containsKey("confidence")) { |
628 confidence = _json["confidence"]; | 672 confidence = _json["confidence"]; |
629 } | 673 } |
630 if (_json.containsKey("level")) { | 674 if (_json.containsKey("level")) { |
631 level = _json["level"]; | 675 level = _json["level"]; |
632 } | 676 } |
633 if (_json.containsKey("segment")) { | 677 if (_json.containsKey("segment")) { |
634 segment = new GoogleCloudVideointelligenceV1beta1VideoSegment.fromJson(_js
on["segment"]); | 678 segment = new GoogleCloudVideointelligenceV1beta1VideoSegment.fromJson( |
| 679 _json["segment"]); |
635 } | 680 } |
636 } | 681 } |
637 | 682 |
638 core.Map<core.String, core.Object> toJson() { | 683 core.Map<core.String, core.Object> toJson() { |
639 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 684 final core.Map<core.String, core.Object> _json = |
| 685 new core.Map<core.String, core.Object>(); |
640 if (confidence != null) { | 686 if (confidence != null) { |
641 _json["confidence"] = confidence; | 687 _json["confidence"] = confidence; |
642 } | 688 } |
643 if (level != null) { | 689 if (level != null) { |
644 _json["level"] = level; | 690 _json["level"] = level; |
645 } | 691 } |
646 if (segment != null) { | 692 if (segment != null) { |
647 _json["segment"] = (segment).toJson(); | 693 _json["segment"] = (segment).toJson(); |
648 } | 694 } |
649 return _json; | 695 return _json; |
650 } | 696 } |
651 } | 697 } |
652 | 698 |
653 /** | 699 /// Safe search annotation (based on per-frame visual signals only). |
654 * Safe search annotation (based on per-frame visual signals only). | 700 /// If no unsafe content has been detected in a frame, no annotations |
655 * If no unsafe content has been detected in a frame, no annotations | 701 /// are present for that frame. If only some types of unsafe content |
656 * are present for that frame. If only some types of unsafe content | 702 /// have been detected in a frame, the likelihood is set to `UNKNOWN` |
657 * have been detected in a frame, the likelihood is set to `UNKNOWN` | 703 /// for all other types of unsafe content. |
658 * for all other types of unsafe content. | |
659 */ | |
660 class GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation { | 704 class GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation { |
661 /** | 705 /// Likelihood of adult content. |
662 * Likelihood of adult content. | 706 /// Possible string values are: |
663 * Possible string values are: | 707 /// - "UNKNOWN" : Unknown likelihood. |
664 * - "UNKNOWN" : Unknown likelihood. | 708 /// - "VERY_UNLIKELY" : Very unlikely. |
665 * - "VERY_UNLIKELY" : Very unlikely. | 709 /// - "UNLIKELY" : Unlikely. |
666 * - "UNLIKELY" : Unlikely. | 710 /// - "POSSIBLE" : Possible. |
667 * - "POSSIBLE" : Possible. | 711 /// - "LIKELY" : Likely. |
668 * - "LIKELY" : Likely. | 712 /// - "VERY_LIKELY" : Very likely. |
669 * - "VERY_LIKELY" : Very likely. | |
670 */ | |
671 core.String adult; | 713 core.String adult; |
672 /** | 714 |
673 * Likelihood of medical content. | 715 /// Likelihood of medical content. |
674 * Possible string values are: | 716 /// Possible string values are: |
675 * - "UNKNOWN" : Unknown likelihood. | 717 /// - "UNKNOWN" : Unknown likelihood. |
676 * - "VERY_UNLIKELY" : Very unlikely. | 718 /// - "VERY_UNLIKELY" : Very unlikely. |
677 * - "UNLIKELY" : Unlikely. | 719 /// - "UNLIKELY" : Unlikely. |
678 * - "POSSIBLE" : Possible. | 720 /// - "POSSIBLE" : Possible. |
679 * - "LIKELY" : Likely. | 721 /// - "LIKELY" : Likely. |
680 * - "VERY_LIKELY" : Very likely. | 722 /// - "VERY_LIKELY" : Very likely. |
681 */ | |
682 core.String medical; | 723 core.String medical; |
683 /** | 724 |
684 * Likelihood of racy content. | 725 /// Likelihood of racy content. |
685 * Possible string values are: | 726 /// Possible string values are: |
686 * - "UNKNOWN" : Unknown likelihood. | 727 /// - "UNKNOWN" : Unknown likelihood. |
687 * - "VERY_UNLIKELY" : Very unlikely. | 728 /// - "VERY_UNLIKELY" : Very unlikely. |
688 * - "UNLIKELY" : Unlikely. | 729 /// - "UNLIKELY" : Unlikely. |
689 * - "POSSIBLE" : Possible. | 730 /// - "POSSIBLE" : Possible. |
690 * - "LIKELY" : Likely. | 731 /// - "LIKELY" : Likely. |
691 * - "VERY_LIKELY" : Very likely. | 732 /// - "VERY_LIKELY" : Very likely. |
692 */ | |
693 core.String racy; | 733 core.String racy; |
694 /** | 734 |
695 * Likelihood that an obvious modification was made to the original | 735 /// Likelihood that an obvious modification was made to the original |
696 * version to make it appear funny or offensive. | 736 /// version to make it appear funny or offensive. |
697 * Possible string values are: | 737 /// Possible string values are: |
698 * - "UNKNOWN" : Unknown likelihood. | 738 /// - "UNKNOWN" : Unknown likelihood. |
699 * - "VERY_UNLIKELY" : Very unlikely. | 739 /// - "VERY_UNLIKELY" : Very unlikely. |
700 * - "UNLIKELY" : Unlikely. | 740 /// - "UNLIKELY" : Unlikely. |
701 * - "POSSIBLE" : Possible. | 741 /// - "POSSIBLE" : Possible. |
702 * - "LIKELY" : Likely. | 742 /// - "LIKELY" : Likely. |
703 * - "VERY_LIKELY" : Very likely. | 743 /// - "VERY_LIKELY" : Very likely. |
704 */ | |
705 core.String spoof; | 744 core.String spoof; |
706 /** Video time offset in microseconds. */ | 745 |
| 746 /// Video time offset in microseconds. |
707 core.String timeOffset; | 747 core.String timeOffset; |
708 /** | 748 |
709 * Likelihood of violent content. | 749 /// Likelihood of violent content. |
710 * Possible string values are: | 750 /// Possible string values are: |
711 * - "UNKNOWN" : Unknown likelihood. | 751 /// - "UNKNOWN" : Unknown likelihood. |
712 * - "VERY_UNLIKELY" : Very unlikely. | 752 /// - "VERY_UNLIKELY" : Very unlikely. |
713 * - "UNLIKELY" : Unlikely. | 753 /// - "UNLIKELY" : Unlikely. |
714 * - "POSSIBLE" : Possible. | 754 /// - "POSSIBLE" : Possible. |
715 * - "LIKELY" : Likely. | 755 /// - "LIKELY" : Likely. |
716 * - "VERY_LIKELY" : Very likely. | 756 /// - "VERY_LIKELY" : Very likely. |
717 */ | |
718 core.String violent; | 757 core.String violent; |
719 | 758 |
720 GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation(); | 759 GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation(); |
721 | 760 |
722 GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation.fromJson(core.Map _jso
n) { | 761 GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation.fromJson( |
| 762 core.Map _json) { |
723 if (_json.containsKey("adult")) { | 763 if (_json.containsKey("adult")) { |
724 adult = _json["adult"]; | 764 adult = _json["adult"]; |
725 } | 765 } |
726 if (_json.containsKey("medical")) { | 766 if (_json.containsKey("medical")) { |
727 medical = _json["medical"]; | 767 medical = _json["medical"]; |
728 } | 768 } |
729 if (_json.containsKey("racy")) { | 769 if (_json.containsKey("racy")) { |
730 racy = _json["racy"]; | 770 racy = _json["racy"]; |
731 } | 771 } |
732 if (_json.containsKey("spoof")) { | 772 if (_json.containsKey("spoof")) { |
733 spoof = _json["spoof"]; | 773 spoof = _json["spoof"]; |
734 } | 774 } |
735 if (_json.containsKey("timeOffset")) { | 775 if (_json.containsKey("timeOffset")) { |
736 timeOffset = _json["timeOffset"]; | 776 timeOffset = _json["timeOffset"]; |
737 } | 777 } |
738 if (_json.containsKey("violent")) { | 778 if (_json.containsKey("violent")) { |
739 violent = _json["violent"]; | 779 violent = _json["violent"]; |
740 } | 780 } |
741 } | 781 } |
742 | 782 |
743 core.Map<core.String, core.Object> toJson() { | 783 core.Map<core.String, core.Object> toJson() { |
744 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 784 final core.Map<core.String, core.Object> _json = |
| 785 new core.Map<core.String, core.Object>(); |
745 if (adult != null) { | 786 if (adult != null) { |
746 _json["adult"] = adult; | 787 _json["adult"] = adult; |
747 } | 788 } |
748 if (medical != null) { | 789 if (medical != null) { |
749 _json["medical"] = medical; | 790 _json["medical"] = medical; |
750 } | 791 } |
751 if (racy != null) { | 792 if (racy != null) { |
752 _json["racy"] = racy; | 793 _json["racy"] = racy; |
753 } | 794 } |
754 if (spoof != null) { | 795 if (spoof != null) { |
755 _json["spoof"] = spoof; | 796 _json["spoof"] = spoof; |
756 } | 797 } |
757 if (timeOffset != null) { | 798 if (timeOffset != null) { |
758 _json["timeOffset"] = timeOffset; | 799 _json["timeOffset"] = timeOffset; |
759 } | 800 } |
760 if (violent != null) { | 801 if (violent != null) { |
761 _json["violent"] = violent; | 802 _json["violent"] = violent; |
762 } | 803 } |
763 return _json; | 804 return _json; |
764 } | 805 } |
765 } | 806 } |
766 | 807 |
767 /** Annotation progress for a single video. */ | 808 /// Annotation progress for a single video. |
768 class GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress { | 809 class GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress { |
769 /** | 810 /// Video file location in |
770 * Video file location in | 811 /// [Google Cloud Storage](https://cloud.google.com/storage/). |
771 * [Google Cloud Storage](https://cloud.google.com/storage/). | |
772 */ | |
773 core.String inputUri; | 812 core.String inputUri; |
774 /** | 813 |
775 * Approximate percentage processed thus far. | 814 /// Approximate percentage processed thus far. |
776 * Guaranteed to be 100 when fully processed. | 815 /// Guaranteed to be 100 when fully processed. |
777 */ | |
778 core.int progressPercent; | 816 core.int progressPercent; |
779 /** Time when the request was received. */ | 817 |
| 818 /// Time when the request was received. |
780 core.String startTime; | 819 core.String startTime; |
781 /** Time of the most recent update. */ | 820 |
| 821 /// Time of the most recent update. |
782 core.String updateTime; | 822 core.String updateTime; |
783 | 823 |
784 GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress(); | 824 GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress(); |
785 | 825 |
786 GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress.fromJson(core.Map _
json) { | 826 GoogleCloudVideointelligenceV1beta1VideoAnnotationProgress.fromJson( |
| 827 core.Map _json) { |
787 if (_json.containsKey("inputUri")) { | 828 if (_json.containsKey("inputUri")) { |
788 inputUri = _json["inputUri"]; | 829 inputUri = _json["inputUri"]; |
789 } | 830 } |
790 if (_json.containsKey("progressPercent")) { | 831 if (_json.containsKey("progressPercent")) { |
791 progressPercent = _json["progressPercent"]; | 832 progressPercent = _json["progressPercent"]; |
792 } | 833 } |
793 if (_json.containsKey("startTime")) { | 834 if (_json.containsKey("startTime")) { |
794 startTime = _json["startTime"]; | 835 startTime = _json["startTime"]; |
795 } | 836 } |
796 if (_json.containsKey("updateTime")) { | 837 if (_json.containsKey("updateTime")) { |
797 updateTime = _json["updateTime"]; | 838 updateTime = _json["updateTime"]; |
798 } | 839 } |
799 } | 840 } |
800 | 841 |
801 core.Map<core.String, core.Object> toJson() { | 842 core.Map<core.String, core.Object> toJson() { |
802 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 843 final core.Map<core.String, core.Object> _json = |
| 844 new core.Map<core.String, core.Object>(); |
803 if (inputUri != null) { | 845 if (inputUri != null) { |
804 _json["inputUri"] = inputUri; | 846 _json["inputUri"] = inputUri; |
805 } | 847 } |
806 if (progressPercent != null) { | 848 if (progressPercent != null) { |
807 _json["progressPercent"] = progressPercent; | 849 _json["progressPercent"] = progressPercent; |
808 } | 850 } |
809 if (startTime != null) { | 851 if (startTime != null) { |
810 _json["startTime"] = startTime; | 852 _json["startTime"] = startTime; |
811 } | 853 } |
812 if (updateTime != null) { | 854 if (updateTime != null) { |
813 _json["updateTime"] = updateTime; | 855 _json["updateTime"] = updateTime; |
814 } | 856 } |
815 return _json; | 857 return _json; |
816 } | 858 } |
817 } | 859 } |
818 | 860 |
819 /** Annotation results for a single video. */ | 861 /// Annotation results for a single video. |
820 class GoogleCloudVideointelligenceV1beta1VideoAnnotationResults { | 862 class GoogleCloudVideointelligenceV1beta1VideoAnnotationResults { |
821 /** | 863 /// If set, indicates an error. Note that for a single `AnnotateVideoRequest` |
822 * If set, indicates an error. Note that for a single `AnnotateVideoRequest` | 864 /// some videos may succeed and some may fail. |
823 * some videos may succeed and some may fail. | |
824 */ | |
825 GoogleRpcStatus error; | 865 GoogleRpcStatus error; |
826 /** | 866 |
827 * Video file location in | 867 /// Video file location in |
828 * [Google Cloud Storage](https://cloud.google.com/storage/). | 868 /// [Google Cloud Storage](https://cloud.google.com/storage/). |
829 */ | |
830 core.String inputUri; | 869 core.String inputUri; |
831 /** Label annotations. There is exactly one element for each unique label. */ | 870 |
832 core.List<GoogleCloudVideointelligenceV1beta1LabelAnnotation> labelAnnotations
; | 871 /// Label annotations. There is exactly one element for each unique label. |
833 /** Safe search annotations. */ | 872 core.List<GoogleCloudVideointelligenceV1beta1LabelAnnotation> |
834 core.List<GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation> safeSearchA
nnotations; | 873 labelAnnotations; |
835 /** Shot annotations. Each shot is represented as a video segment. */ | 874 |
| 875 /// Safe search annotations. |
| 876 core.List<GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation> |
| 877 safeSearchAnnotations; |
| 878 |
| 879 /// Shot annotations. Each shot is represented as a video segment. |
836 core.List<GoogleCloudVideointelligenceV1beta1VideoSegment> shotAnnotations; | 880 core.List<GoogleCloudVideointelligenceV1beta1VideoSegment> shotAnnotations; |
837 | 881 |
838 GoogleCloudVideointelligenceV1beta1VideoAnnotationResults(); | 882 GoogleCloudVideointelligenceV1beta1VideoAnnotationResults(); |
839 | 883 |
840 GoogleCloudVideointelligenceV1beta1VideoAnnotationResults.fromJson(core.Map _j
son) { | 884 GoogleCloudVideointelligenceV1beta1VideoAnnotationResults.fromJson( |
| 885 core.Map _json) { |
841 if (_json.containsKey("error")) { | 886 if (_json.containsKey("error")) { |
842 error = new GoogleRpcStatus.fromJson(_json["error"]); | 887 error = new GoogleRpcStatus.fromJson(_json["error"]); |
843 } | 888 } |
844 if (_json.containsKey("inputUri")) { | 889 if (_json.containsKey("inputUri")) { |
845 inputUri = _json["inputUri"]; | 890 inputUri = _json["inputUri"]; |
846 } | 891 } |
847 if (_json.containsKey("labelAnnotations")) { | 892 if (_json.containsKey("labelAnnotations")) { |
848 labelAnnotations = _json["labelAnnotations"].map((value) => new GoogleClou
dVideointelligenceV1beta1LabelAnnotation.fromJson(value)).toList(); | 893 labelAnnotations = _json["labelAnnotations"] |
| 894 .map((value) => |
| 895 new GoogleCloudVideointelligenceV1beta1LabelAnnotation.fromJson( |
| 896 value)) |
| 897 .toList(); |
849 } | 898 } |
850 if (_json.containsKey("safeSearchAnnotations")) { | 899 if (_json.containsKey("safeSearchAnnotations")) { |
851 safeSearchAnnotations = _json["safeSearchAnnotations"].map((value) => new
GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation.fromJson(value)).toList(
); | 900 safeSearchAnnotations = _json["safeSearchAnnotations"] |
| 901 .map((value) => |
| 902 new GoogleCloudVideointelligenceV1beta1SafeSearchAnnotation |
| 903 .fromJson(value)) |
| 904 .toList(); |
852 } | 905 } |
853 if (_json.containsKey("shotAnnotations")) { | 906 if (_json.containsKey("shotAnnotations")) { |
854 shotAnnotations = _json["shotAnnotations"].map((value) => new GoogleCloudV
ideointelligenceV1beta1VideoSegment.fromJson(value)).toList(); | 907 shotAnnotations = _json["shotAnnotations"] |
| 908 .map((value) => |
| 909 new GoogleCloudVideointelligenceV1beta1VideoSegment.fromJson( |
| 910 value)) |
| 911 .toList(); |
855 } | 912 } |
856 } | 913 } |
857 | 914 |
858 core.Map<core.String, core.Object> toJson() { | 915 core.Map<core.String, core.Object> toJson() { |
859 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 916 final core.Map<core.String, core.Object> _json = |
| 917 new core.Map<core.String, core.Object>(); |
860 if (error != null) { | 918 if (error != null) { |
861 _json["error"] = (error).toJson(); | 919 _json["error"] = (error).toJson(); |
862 } | 920 } |
863 if (inputUri != null) { | 921 if (inputUri != null) { |
864 _json["inputUri"] = inputUri; | 922 _json["inputUri"] = inputUri; |
865 } | 923 } |
866 if (labelAnnotations != null) { | 924 if (labelAnnotations != null) { |
867 _json["labelAnnotations"] = labelAnnotations.map((value) => (value).toJson
()).toList(); | 925 _json["labelAnnotations"] = |
| 926 labelAnnotations.map((value) => (value).toJson()).toList(); |
868 } | 927 } |
869 if (safeSearchAnnotations != null) { | 928 if (safeSearchAnnotations != null) { |
870 _json["safeSearchAnnotations"] = safeSearchAnnotations.map((value) => (val
ue).toJson()).toList(); | 929 _json["safeSearchAnnotations"] = |
| 930 safeSearchAnnotations.map((value) => (value).toJson()).toList(); |
871 } | 931 } |
872 if (shotAnnotations != null) { | 932 if (shotAnnotations != null) { |
873 _json["shotAnnotations"] = shotAnnotations.map((value) => (value).toJson()
).toList(); | 933 _json["shotAnnotations"] = |
| 934 shotAnnotations.map((value) => (value).toJson()).toList(); |
874 } | 935 } |
875 return _json; | 936 return _json; |
876 } | 937 } |
877 } | 938 } |
878 | 939 |
879 /** Video context and/or feature-specific parameters. */ | 940 /// Video context and/or feature-specific parameters. |
880 class GoogleCloudVideointelligenceV1beta1VideoContext { | 941 class GoogleCloudVideointelligenceV1beta1VideoContext { |
881 /** | 942 /// If label detection has been requested, what labels should be detected |
882 * If label detection has been requested, what labels should be detected | 943 /// in addition to video-level labels or segment-level labels. If |
883 * in addition to video-level labels or segment-level labels. If unspecified, | 944 /// unspecified, |
884 * defaults to `SHOT_MODE`. | 945 /// defaults to `SHOT_MODE`. |
885 * Possible string values are: | 946 /// Possible string values are: |
886 * - "LABEL_DETECTION_MODE_UNSPECIFIED" : Unspecified. | 947 /// - "LABEL_DETECTION_MODE_UNSPECIFIED" : Unspecified. |
887 * - "SHOT_MODE" : Detect shot-level labels. | 948 /// - "SHOT_MODE" : Detect shot-level labels. |
888 * - "FRAME_MODE" : Detect frame-level labels. | 949 /// - "FRAME_MODE" : Detect frame-level labels. |
889 * - "SHOT_AND_FRAME_MODE" : Detect both shot-level and frame-level labels. | 950 /// - "SHOT_AND_FRAME_MODE" : Detect both shot-level and frame-level labels. |
890 */ | |
891 core.String labelDetectionMode; | 951 core.String labelDetectionMode; |
892 /** | 952 |
893 * Model to use for label detection. | 953 /// Model to use for label detection. |
894 * Supported values: "latest" and "stable" (the default). | 954 /// Supported values: "latest" and "stable" (the default). |
895 */ | |
896 core.String labelDetectionModel; | 955 core.String labelDetectionModel; |
897 /** | 956 |
898 * Model to use for safe search detection. | 957 /// Model to use for safe search detection. |
899 * Supported values: "latest" and "stable" (the default). | 958 /// Supported values: "latest" and "stable" (the default). |
900 */ | |
901 core.String safeSearchDetectionModel; | 959 core.String safeSearchDetectionModel; |
902 /** | 960 |
903 * Video segments to annotate. The segments may overlap and are not required | 961 /// Video segments to annotate. The segments may overlap and are not required |
904 * to be contiguous or span the whole video. If unspecified, each video | 962 /// to be contiguous or span the whole video. If unspecified, each video |
905 * is treated as a single segment. | 963 /// is treated as a single segment. |
906 */ | |
907 core.List<GoogleCloudVideointelligenceV1beta1VideoSegment> segments; | 964 core.List<GoogleCloudVideointelligenceV1beta1VideoSegment> segments; |
908 /** | 965 |
909 * Model to use for shot change detection. | 966 /// Model to use for shot change detection. |
910 * Supported values: "latest" and "stable" (the default). | 967 /// Supported values: "latest" and "stable" (the default). |
911 */ | |
912 core.String shotChangeDetectionModel; | 968 core.String shotChangeDetectionModel; |
913 /** | 969 |
914 * Whether the video has been shot from a stationary (i.e. non-moving) camera. | 970 /// Whether the video has been shot from a stationary (i.e. non-moving) |
915 * When set to true, might improve detection accuracy for moving objects. | 971 /// camera. |
916 */ | 972 /// When set to true, might improve detection accuracy for moving objects. |
917 core.bool stationaryCamera; | 973 core.bool stationaryCamera; |
918 | 974 |
919 GoogleCloudVideointelligenceV1beta1VideoContext(); | 975 GoogleCloudVideointelligenceV1beta1VideoContext(); |
920 | 976 |
921 GoogleCloudVideointelligenceV1beta1VideoContext.fromJson(core.Map _json) { | 977 GoogleCloudVideointelligenceV1beta1VideoContext.fromJson(core.Map _json) { |
922 if (_json.containsKey("labelDetectionMode")) { | 978 if (_json.containsKey("labelDetectionMode")) { |
923 labelDetectionMode = _json["labelDetectionMode"]; | 979 labelDetectionMode = _json["labelDetectionMode"]; |
924 } | 980 } |
925 if (_json.containsKey("labelDetectionModel")) { | 981 if (_json.containsKey("labelDetectionModel")) { |
926 labelDetectionModel = _json["labelDetectionModel"]; | 982 labelDetectionModel = _json["labelDetectionModel"]; |
927 } | 983 } |
928 if (_json.containsKey("safeSearchDetectionModel")) { | 984 if (_json.containsKey("safeSearchDetectionModel")) { |
929 safeSearchDetectionModel = _json["safeSearchDetectionModel"]; | 985 safeSearchDetectionModel = _json["safeSearchDetectionModel"]; |
930 } | 986 } |
931 if (_json.containsKey("segments")) { | 987 if (_json.containsKey("segments")) { |
932 segments = _json["segments"].map((value) => new GoogleCloudVideointelligen
ceV1beta1VideoSegment.fromJson(value)).toList(); | 988 segments = _json["segments"] |
| 989 .map((value) => |
| 990 new GoogleCloudVideointelligenceV1beta1VideoSegment.fromJson( |
| 991 value)) |
| 992 .toList(); |
933 } | 993 } |
934 if (_json.containsKey("shotChangeDetectionModel")) { | 994 if (_json.containsKey("shotChangeDetectionModel")) { |
935 shotChangeDetectionModel = _json["shotChangeDetectionModel"]; | 995 shotChangeDetectionModel = _json["shotChangeDetectionModel"]; |
936 } | 996 } |
937 if (_json.containsKey("stationaryCamera")) { | 997 if (_json.containsKey("stationaryCamera")) { |
938 stationaryCamera = _json["stationaryCamera"]; | 998 stationaryCamera = _json["stationaryCamera"]; |
939 } | 999 } |
940 } | 1000 } |
941 | 1001 |
942 core.Map<core.String, core.Object> toJson() { | 1002 core.Map<core.String, core.Object> toJson() { |
943 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1003 final core.Map<core.String, core.Object> _json = |
| 1004 new core.Map<core.String, core.Object>(); |
944 if (labelDetectionMode != null) { | 1005 if (labelDetectionMode != null) { |
945 _json["labelDetectionMode"] = labelDetectionMode; | 1006 _json["labelDetectionMode"] = labelDetectionMode; |
946 } | 1007 } |
947 if (labelDetectionModel != null) { | 1008 if (labelDetectionModel != null) { |
948 _json["labelDetectionModel"] = labelDetectionModel; | 1009 _json["labelDetectionModel"] = labelDetectionModel; |
949 } | 1010 } |
950 if (safeSearchDetectionModel != null) { | 1011 if (safeSearchDetectionModel != null) { |
951 _json["safeSearchDetectionModel"] = safeSearchDetectionModel; | 1012 _json["safeSearchDetectionModel"] = safeSearchDetectionModel; |
952 } | 1013 } |
953 if (segments != null) { | 1014 if (segments != null) { |
954 _json["segments"] = segments.map((value) => (value).toJson()).toList(); | 1015 _json["segments"] = segments.map((value) => (value).toJson()).toList(); |
955 } | 1016 } |
956 if (shotChangeDetectionModel != null) { | 1017 if (shotChangeDetectionModel != null) { |
957 _json["shotChangeDetectionModel"] = shotChangeDetectionModel; | 1018 _json["shotChangeDetectionModel"] = shotChangeDetectionModel; |
958 } | 1019 } |
959 if (stationaryCamera != null) { | 1020 if (stationaryCamera != null) { |
960 _json["stationaryCamera"] = stationaryCamera; | 1021 _json["stationaryCamera"] = stationaryCamera; |
961 } | 1022 } |
962 return _json; | 1023 return _json; |
963 } | 1024 } |
964 } | 1025 } |
965 | 1026 |
966 /** Video segment. */ | 1027 /// Video segment. |
967 class GoogleCloudVideointelligenceV1beta1VideoSegment { | 1028 class GoogleCloudVideointelligenceV1beta1VideoSegment { |
968 /** End offset in microseconds (inclusive). Unset means 0. */ | 1029 /// End offset in microseconds (inclusive). Unset means 0. |
969 core.String endTimeOffset; | 1030 core.String endTimeOffset; |
970 /** Start offset in microseconds (inclusive). Unset means 0. */ | 1031 |
| 1032 /// Start offset in microseconds (inclusive). Unset means 0. |
971 core.String startTimeOffset; | 1033 core.String startTimeOffset; |
972 | 1034 |
973 GoogleCloudVideointelligenceV1beta1VideoSegment(); | 1035 GoogleCloudVideointelligenceV1beta1VideoSegment(); |
974 | 1036 |
975 GoogleCloudVideointelligenceV1beta1VideoSegment.fromJson(core.Map _json) { | 1037 GoogleCloudVideointelligenceV1beta1VideoSegment.fromJson(core.Map _json) { |
976 if (_json.containsKey("endTimeOffset")) { | 1038 if (_json.containsKey("endTimeOffset")) { |
977 endTimeOffset = _json["endTimeOffset"]; | 1039 endTimeOffset = _json["endTimeOffset"]; |
978 } | 1040 } |
979 if (_json.containsKey("startTimeOffset")) { | 1041 if (_json.containsKey("startTimeOffset")) { |
980 startTimeOffset = _json["startTimeOffset"]; | 1042 startTimeOffset = _json["startTimeOffset"]; |
981 } | 1043 } |
982 } | 1044 } |
983 | 1045 |
984 core.Map<core.String, core.Object> toJson() { | 1046 core.Map<core.String, core.Object> toJson() { |
985 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1047 final core.Map<core.String, core.Object> _json = |
| 1048 new core.Map<core.String, core.Object>(); |
986 if (endTimeOffset != null) { | 1049 if (endTimeOffset != null) { |
987 _json["endTimeOffset"] = endTimeOffset; | 1050 _json["endTimeOffset"] = endTimeOffset; |
988 } | 1051 } |
| 1052 if (startTimeOffset != null) { |
| 1053 _json["startTimeOffset"] = startTimeOffset; |
| 1054 } |
| 1055 return _json; |
| 1056 } |
| 1057 } |
| 1058 |
| 1059 /// Video annotation progress. Included in the `metadata` |
| 1060 /// field of the `Operation` returned by the `GetOperation` |
| 1061 /// call of the `google::longrunning::Operations` service. |
| 1062 class GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress { |
| 1063 /// Progress metadata for all videos specified in `AnnotateVideoRequest`. |
| 1064 core.List<GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress> |
| 1065 annotationProgress; |
| 1066 |
| 1067 GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress(); |
| 1068 |
| 1069 GoogleCloudVideointelligenceV1beta2AnnotateVideoProgress.fromJson( |
| 1070 core.Map _json) { |
| 1071 if (_json.containsKey("annotationProgress")) { |
| 1072 annotationProgress = _json["annotationProgress"] |
| 1073 .map((value) => |
| 1074 new GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress |
| 1075 .fromJson(value)) |
| 1076 .toList(); |
| 1077 } |
| 1078 } |
| 1079 |
| 1080 core.Map<core.String, core.Object> toJson() { |
| 1081 final core.Map<core.String, core.Object> _json = |
| 1082 new core.Map<core.String, core.Object>(); |
| 1083 if (annotationProgress != null) { |
| 1084 _json["annotationProgress"] = |
| 1085 annotationProgress.map((value) => (value).toJson()).toList(); |
| 1086 } |
| 1087 return _json; |
| 1088 } |
| 1089 } |
| 1090 |
| 1091 /// Video annotation response. Included in the `response` |
| 1092 /// field of the `Operation` returned by the `GetOperation` |
| 1093 /// call of the `google::longrunning::Operations` service. |
| 1094 class GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse { |
| 1095 /// Annotation results for all videos specified in `AnnotateVideoRequest`. |
| 1096 core.List<GoogleCloudVideointelligenceV1beta2VideoAnnotationResults> |
| 1097 annotationResults; |
| 1098 |
| 1099 GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse(); |
| 1100 |
| 1101 GoogleCloudVideointelligenceV1beta2AnnotateVideoResponse.fromJson( |
| 1102 core.Map _json) { |
| 1103 if (_json.containsKey("annotationResults")) { |
| 1104 annotationResults = _json["annotationResults"] |
| 1105 .map((value) => |
| 1106 new GoogleCloudVideointelligenceV1beta2VideoAnnotationResults |
| 1107 .fromJson(value)) |
| 1108 .toList(); |
| 1109 } |
| 1110 } |
| 1111 |
| 1112 core.Map<core.String, core.Object> toJson() { |
| 1113 final core.Map<core.String, core.Object> _json = |
| 1114 new core.Map<core.String, core.Object>(); |
| 1115 if (annotationResults != null) { |
| 1116 _json["annotationResults"] = |
| 1117 annotationResults.map((value) => (value).toJson()).toList(); |
| 1118 } |
| 1119 return _json; |
| 1120 } |
| 1121 } |
| 1122 |
| 1123 /// Detected entity from video analysis. |
| 1124 class GoogleCloudVideointelligenceV1beta2Entity { |
| 1125 /// Textual description, e.g. `Fixed-gear bicycle`. |
| 1126 core.String description; |
| 1127 |
| 1128 /// Opaque entity ID. Some IDs may be available in |
| 1129 /// [Google Knowledge Graph Search |
| 1130 /// API](https://developers.google.com/knowledge-graph/). |
| 1131 core.String entityId; |
| 1132 |
| 1133 /// Language code for `description` in BCP-47 format. |
| 1134 core.String languageCode; |
| 1135 |
| 1136 GoogleCloudVideointelligenceV1beta2Entity(); |
| 1137 |
| 1138 GoogleCloudVideointelligenceV1beta2Entity.fromJson(core.Map _json) { |
| 1139 if (_json.containsKey("description")) { |
| 1140 description = _json["description"]; |
| 1141 } |
| 1142 if (_json.containsKey("entityId")) { |
| 1143 entityId = _json["entityId"]; |
| 1144 } |
| 1145 if (_json.containsKey("languageCode")) { |
| 1146 languageCode = _json["languageCode"]; |
| 1147 } |
| 1148 } |
| 1149 |
| 1150 core.Map<core.String, core.Object> toJson() { |
| 1151 final core.Map<core.String, core.Object> _json = |
| 1152 new core.Map<core.String, core.Object>(); |
| 1153 if (description != null) { |
| 1154 _json["description"] = description; |
| 1155 } |
| 1156 if (entityId != null) { |
| 1157 _json["entityId"] = entityId; |
| 1158 } |
| 1159 if (languageCode != null) { |
| 1160 _json["languageCode"] = languageCode; |
| 1161 } |
| 1162 return _json; |
| 1163 } |
| 1164 } |
| 1165 |
| 1166 /// Explicit content annotation (based on per-frame visual signals only). |
| 1167 /// If no explicit content has been detected in a frame, no annotations are |
| 1168 /// present for that frame. |
| 1169 class GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation { |
| 1170 /// All video frames where explicit content was detected. |
| 1171 core.List<GoogleCloudVideointelligenceV1beta2ExplicitContentFrame> frames; |
| 1172 |
| 1173 GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation(); |
| 1174 |
| 1175 GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation.fromJson( |
| 1176 core.Map _json) { |
| 1177 if (_json.containsKey("frames")) { |
| 1178 frames = _json["frames"] |
| 1179 .map((value) => |
| 1180 new GoogleCloudVideointelligenceV1beta2ExplicitContentFrame |
| 1181 .fromJson(value)) |
| 1182 .toList(); |
| 1183 } |
| 1184 } |
| 1185 |
| 1186 core.Map<core.String, core.Object> toJson() { |
| 1187 final core.Map<core.String, core.Object> _json = |
| 1188 new core.Map<core.String, core.Object>(); |
| 1189 if (frames != null) { |
| 1190 _json["frames"] = frames.map((value) => (value).toJson()).toList(); |
| 1191 } |
| 1192 return _json; |
| 1193 } |
| 1194 } |
| 1195 |
| 1196 /// Video frame level annotation results for explicit content. |
| 1197 class GoogleCloudVideointelligenceV1beta2ExplicitContentFrame { |
| 1198 /// Likelihood of the pornography content.. |
| 1199 /// Possible string values are: |
| 1200 /// - "LIKELIHOOD_UNSPECIFIED" : Unspecified likelihood. |
| 1201 /// - "VERY_UNLIKELY" : Very unlikely. |
| 1202 /// - "UNLIKELY" : Unlikely. |
| 1203 /// - "POSSIBLE" : Possible. |
| 1204 /// - "LIKELY" : Likely. |
| 1205 /// - "VERY_LIKELY" : Very likely. |
| 1206 core.String pornographyLikelihood; |
| 1207 |
| 1208 /// Time-offset, relative to the beginning of the video, corresponding to the |
| 1209 /// video frame for this location. |
| 1210 core.String timeOffset; |
| 1211 |
| 1212 GoogleCloudVideointelligenceV1beta2ExplicitContentFrame(); |
| 1213 |
| 1214 GoogleCloudVideointelligenceV1beta2ExplicitContentFrame.fromJson( |
| 1215 core.Map _json) { |
| 1216 if (_json.containsKey("pornographyLikelihood")) { |
| 1217 pornographyLikelihood = _json["pornographyLikelihood"]; |
| 1218 } |
| 1219 if (_json.containsKey("timeOffset")) { |
| 1220 timeOffset = _json["timeOffset"]; |
| 1221 } |
| 1222 } |
| 1223 |
| 1224 core.Map<core.String, core.Object> toJson() { |
| 1225 final core.Map<core.String, core.Object> _json = |
| 1226 new core.Map<core.String, core.Object>(); |
| 1227 if (pornographyLikelihood != null) { |
| 1228 _json["pornographyLikelihood"] = pornographyLikelihood; |
| 1229 } |
| 1230 if (timeOffset != null) { |
| 1231 _json["timeOffset"] = timeOffset; |
| 1232 } |
| 1233 return _json; |
| 1234 } |
| 1235 } |
| 1236 |
| 1237 /// Label annotation. |
| 1238 class GoogleCloudVideointelligenceV1beta2LabelAnnotation { |
| 1239 /// Common categories for the detected entity. |
| 1240 /// E.g. when the label is `Terrier` the category is likely `dog`. And in |
| 1241 /// some |
| 1242 /// cases there might be more than one categories e.g. `Terrier` could also |
| 1243 /// be |
| 1244 /// a `pet`. |
| 1245 core.List<GoogleCloudVideointelligenceV1beta2Entity> categoryEntities; |
| 1246 |
| 1247 /// Detected entity. |
| 1248 GoogleCloudVideointelligenceV1beta2Entity entity; |
| 1249 |
| 1250 /// All video frames where a label was detected. |
| 1251 core.List<GoogleCloudVideointelligenceV1beta2LabelFrame> frames; |
| 1252 |
| 1253 /// All video segments where a label was detected. |
| 1254 core.List<GoogleCloudVideointelligenceV1beta2LabelSegment> segments; |
| 1255 |
| 1256 GoogleCloudVideointelligenceV1beta2LabelAnnotation(); |
| 1257 |
| 1258 GoogleCloudVideointelligenceV1beta2LabelAnnotation.fromJson(core.Map _json) { |
| 1259 if (_json.containsKey("categoryEntities")) { |
| 1260 categoryEntities = _json["categoryEntities"] |
| 1261 .map((value) => |
| 1262 new GoogleCloudVideointelligenceV1beta2Entity.fromJson(value)) |
| 1263 .toList(); |
| 1264 } |
| 1265 if (_json.containsKey("entity")) { |
| 1266 entity = new GoogleCloudVideointelligenceV1beta2Entity.fromJson( |
| 1267 _json["entity"]); |
| 1268 } |
| 1269 if (_json.containsKey("frames")) { |
| 1270 frames = _json["frames"] |
| 1271 .map((value) => |
| 1272 new GoogleCloudVideointelligenceV1beta2LabelFrame.fromJson(value)) |
| 1273 .toList(); |
| 1274 } |
| 1275 if (_json.containsKey("segments")) { |
| 1276 segments = _json["segments"] |
| 1277 .map((value) => |
| 1278 new GoogleCloudVideointelligenceV1beta2LabelSegment.fromJson( |
| 1279 value)) |
| 1280 .toList(); |
| 1281 } |
| 1282 } |
| 1283 |
| 1284 core.Map<core.String, core.Object> toJson() { |
| 1285 final core.Map<core.String, core.Object> _json = |
| 1286 new core.Map<core.String, core.Object>(); |
| 1287 if (categoryEntities != null) { |
| 1288 _json["categoryEntities"] = |
| 1289 categoryEntities.map((value) => (value).toJson()).toList(); |
| 1290 } |
| 1291 if (entity != null) { |
| 1292 _json["entity"] = (entity).toJson(); |
| 1293 } |
| 1294 if (frames != null) { |
| 1295 _json["frames"] = frames.map((value) => (value).toJson()).toList(); |
| 1296 } |
| 1297 if (segments != null) { |
| 1298 _json["segments"] = segments.map((value) => (value).toJson()).toList(); |
| 1299 } |
| 1300 return _json; |
| 1301 } |
| 1302 } |
| 1303 |
| 1304 /// Video frame level annotation results for label detection. |
| 1305 class GoogleCloudVideointelligenceV1beta2LabelFrame { |
| 1306 /// Confidence that the label is accurate. Range: [0, 1]. |
| 1307 core.double confidence; |
| 1308 |
| 1309 /// Time-offset, relative to the beginning of the video, corresponding to the |
| 1310 /// video frame for this location. |
| 1311 core.String timeOffset; |
| 1312 |
| 1313 GoogleCloudVideointelligenceV1beta2LabelFrame(); |
| 1314 |
| 1315 GoogleCloudVideointelligenceV1beta2LabelFrame.fromJson(core.Map _json) { |
| 1316 if (_json.containsKey("confidence")) { |
| 1317 confidence = _json["confidence"]; |
| 1318 } |
| 1319 if (_json.containsKey("timeOffset")) { |
| 1320 timeOffset = _json["timeOffset"]; |
| 1321 } |
| 1322 } |
| 1323 |
| 1324 core.Map<core.String, core.Object> toJson() { |
| 1325 final core.Map<core.String, core.Object> _json = |
| 1326 new core.Map<core.String, core.Object>(); |
| 1327 if (confidence != null) { |
| 1328 _json["confidence"] = confidence; |
| 1329 } |
| 1330 if (timeOffset != null) { |
| 1331 _json["timeOffset"] = timeOffset; |
| 1332 } |
| 1333 return _json; |
| 1334 } |
| 1335 } |
| 1336 |
| 1337 /// Video segment level annotation results for label detection. |
| 1338 class GoogleCloudVideointelligenceV1beta2LabelSegment { |
| 1339 /// Confidence that the label is accurate. Range: [0, 1]. |
| 1340 core.double confidence; |
| 1341 |
| 1342 /// Video segment where a label was detected. |
| 1343 GoogleCloudVideointelligenceV1beta2VideoSegment segment; |
| 1344 |
| 1345 GoogleCloudVideointelligenceV1beta2LabelSegment(); |
| 1346 |
| 1347 GoogleCloudVideointelligenceV1beta2LabelSegment.fromJson(core.Map _json) { |
| 1348 if (_json.containsKey("confidence")) { |
| 1349 confidence = _json["confidence"]; |
| 1350 } |
| 1351 if (_json.containsKey("segment")) { |
| 1352 segment = new GoogleCloudVideointelligenceV1beta2VideoSegment.fromJson( |
| 1353 _json["segment"]); |
| 1354 } |
| 1355 } |
| 1356 |
| 1357 core.Map<core.String, core.Object> toJson() { |
| 1358 final core.Map<core.String, core.Object> _json = |
| 1359 new core.Map<core.String, core.Object>(); |
| 1360 if (confidence != null) { |
| 1361 _json["confidence"] = confidence; |
| 1362 } |
| 1363 if (segment != null) { |
| 1364 _json["segment"] = (segment).toJson(); |
| 1365 } |
| 1366 return _json; |
| 1367 } |
| 1368 } |
| 1369 |
| 1370 /// Annotation progress for a single video. |
| 1371 class GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress { |
| 1372 /// Video file location in |
| 1373 /// [Google Cloud Storage](https://cloud.google.com/storage/). |
| 1374 core.String inputUri; |
| 1375 |
| 1376 /// Approximate percentage processed thus far. |
| 1377 /// Guaranteed to be 100 when fully processed. |
| 1378 core.int progressPercent; |
| 1379 |
| 1380 /// Time when the request was received. |
| 1381 core.String startTime; |
| 1382 |
| 1383 /// Time of the most recent update. |
| 1384 core.String updateTime; |
| 1385 |
| 1386 GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress(); |
| 1387 |
| 1388 GoogleCloudVideointelligenceV1beta2VideoAnnotationProgress.fromJson( |
| 1389 core.Map _json) { |
| 1390 if (_json.containsKey("inputUri")) { |
| 1391 inputUri = _json["inputUri"]; |
| 1392 } |
| 1393 if (_json.containsKey("progressPercent")) { |
| 1394 progressPercent = _json["progressPercent"]; |
| 1395 } |
| 1396 if (_json.containsKey("startTime")) { |
| 1397 startTime = _json["startTime"]; |
| 1398 } |
| 1399 if (_json.containsKey("updateTime")) { |
| 1400 updateTime = _json["updateTime"]; |
| 1401 } |
| 1402 } |
| 1403 |
| 1404 core.Map<core.String, core.Object> toJson() { |
| 1405 final core.Map<core.String, core.Object> _json = |
| 1406 new core.Map<core.String, core.Object>(); |
| 1407 if (inputUri != null) { |
| 1408 _json["inputUri"] = inputUri; |
| 1409 } |
| 1410 if (progressPercent != null) { |
| 1411 _json["progressPercent"] = progressPercent; |
| 1412 } |
| 1413 if (startTime != null) { |
| 1414 _json["startTime"] = startTime; |
| 1415 } |
| 1416 if (updateTime != null) { |
| 1417 _json["updateTime"] = updateTime; |
| 1418 } |
| 1419 return _json; |
| 1420 } |
| 1421 } |
| 1422 |
| 1423 /// Annotation results for a single video. |
| 1424 class GoogleCloudVideointelligenceV1beta2VideoAnnotationResults { |
| 1425 /// If set, indicates an error. Note that for a single `AnnotateVideoRequest` |
| 1426 /// some videos may succeed and some may fail. |
| 1427 GoogleRpcStatus error; |
| 1428 |
| 1429 /// Explicit content annotation. |
| 1430 GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation |
| 1431 explicitAnnotation; |
| 1432 |
| 1433 /// Label annotations on frame level. |
| 1434 /// There is exactly one element for each unique label. |
| 1435 core.List<GoogleCloudVideointelligenceV1beta2LabelAnnotation> |
| 1436 frameLabelAnnotations; |
| 1437 |
| 1438 /// Video file location in |
| 1439 /// [Google Cloud Storage](https://cloud.google.com/storage/). |
| 1440 core.String inputUri; |
| 1441 |
| 1442 /// Label annotations on video level or user specified segment level. |
| 1443 /// There is exactly one element for each unique label. |
| 1444 core.List<GoogleCloudVideointelligenceV1beta2LabelAnnotation> |
| 1445 segmentLabelAnnotations; |
| 1446 |
| 1447 /// Shot annotations. Each shot is represented as a video segment. |
| 1448 core.List<GoogleCloudVideointelligenceV1beta2VideoSegment> shotAnnotations; |
| 1449 |
| 1450 /// Label annotations on shot level. |
| 1451 /// There is exactly one element for each unique label. |
| 1452 core.List<GoogleCloudVideointelligenceV1beta2LabelAnnotation> |
| 1453 shotLabelAnnotations; |
| 1454 |
| 1455 GoogleCloudVideointelligenceV1beta2VideoAnnotationResults(); |
| 1456 |
| 1457 GoogleCloudVideointelligenceV1beta2VideoAnnotationResults.fromJson( |
| 1458 core.Map _json) { |
| 1459 if (_json.containsKey("error")) { |
| 1460 error = new GoogleRpcStatus.fromJson(_json["error"]); |
| 1461 } |
| 1462 if (_json.containsKey("explicitAnnotation")) { |
| 1463 explicitAnnotation = |
| 1464 new GoogleCloudVideointelligenceV1beta2ExplicitContentAnnotation |
| 1465 .fromJson(_json["explicitAnnotation"]); |
| 1466 } |
| 1467 if (_json.containsKey("frameLabelAnnotations")) { |
| 1468 frameLabelAnnotations = _json["frameLabelAnnotations"] |
| 1469 .map((value) => |
| 1470 new GoogleCloudVideointelligenceV1beta2LabelAnnotation.fromJson( |
| 1471 value)) |
| 1472 .toList(); |
| 1473 } |
| 1474 if (_json.containsKey("inputUri")) { |
| 1475 inputUri = _json["inputUri"]; |
| 1476 } |
| 1477 if (_json.containsKey("segmentLabelAnnotations")) { |
| 1478 segmentLabelAnnotations = _json["segmentLabelAnnotations"] |
| 1479 .map((value) => |
| 1480 new GoogleCloudVideointelligenceV1beta2LabelAnnotation.fromJson( |
| 1481 value)) |
| 1482 .toList(); |
| 1483 } |
| 1484 if (_json.containsKey("shotAnnotations")) { |
| 1485 shotAnnotations = _json["shotAnnotations"] |
| 1486 .map((value) => |
| 1487 new GoogleCloudVideointelligenceV1beta2VideoSegment.fromJson( |
| 1488 value)) |
| 1489 .toList(); |
| 1490 } |
| 1491 if (_json.containsKey("shotLabelAnnotations")) { |
| 1492 shotLabelAnnotations = _json["shotLabelAnnotations"] |
| 1493 .map((value) => |
| 1494 new GoogleCloudVideointelligenceV1beta2LabelAnnotation.fromJson( |
| 1495 value)) |
| 1496 .toList(); |
| 1497 } |
| 1498 } |
| 1499 |
| 1500 core.Map<core.String, core.Object> toJson() { |
| 1501 final core.Map<core.String, core.Object> _json = |
| 1502 new core.Map<core.String, core.Object>(); |
| 1503 if (error != null) { |
| 1504 _json["error"] = (error).toJson(); |
| 1505 } |
| 1506 if (explicitAnnotation != null) { |
| 1507 _json["explicitAnnotation"] = (explicitAnnotation).toJson(); |
| 1508 } |
| 1509 if (frameLabelAnnotations != null) { |
| 1510 _json["frameLabelAnnotations"] = |
| 1511 frameLabelAnnotations.map((value) => (value).toJson()).toList(); |
| 1512 } |
| 1513 if (inputUri != null) { |
| 1514 _json["inputUri"] = inputUri; |
| 1515 } |
| 1516 if (segmentLabelAnnotations != null) { |
| 1517 _json["segmentLabelAnnotations"] = |
| 1518 segmentLabelAnnotations.map((value) => (value).toJson()).toList(); |
| 1519 } |
| 1520 if (shotAnnotations != null) { |
| 1521 _json["shotAnnotations"] = |
| 1522 shotAnnotations.map((value) => (value).toJson()).toList(); |
| 1523 } |
| 1524 if (shotLabelAnnotations != null) { |
| 1525 _json["shotLabelAnnotations"] = |
| 1526 shotLabelAnnotations.map((value) => (value).toJson()).toList(); |
| 1527 } |
| 1528 return _json; |
| 1529 } |
| 1530 } |
| 1531 |
| 1532 /// Video segment. |
| 1533 class GoogleCloudVideointelligenceV1beta2VideoSegment { |
| 1534 /// Time-offset, relative to the beginning of the video, |
| 1535 /// corresponding to the end of the segment (inclusive). |
| 1536 core.String endTimeOffset; |
| 1537 |
| 1538 /// Time-offset, relative to the beginning of the video, |
| 1539 /// corresponding to the start of the segment (inclusive). |
| 1540 core.String startTimeOffset; |
| 1541 |
| 1542 GoogleCloudVideointelligenceV1beta2VideoSegment(); |
| 1543 |
| 1544 GoogleCloudVideointelligenceV1beta2VideoSegment.fromJson(core.Map _json) { |
| 1545 if (_json.containsKey("endTimeOffset")) { |
| 1546 endTimeOffset = _json["endTimeOffset"]; |
| 1547 } |
| 1548 if (_json.containsKey("startTimeOffset")) { |
| 1549 startTimeOffset = _json["startTimeOffset"]; |
| 1550 } |
| 1551 } |
| 1552 |
| 1553 core.Map<core.String, core.Object> toJson() { |
| 1554 final core.Map<core.String, core.Object> _json = |
| 1555 new core.Map<core.String, core.Object>(); |
| 1556 if (endTimeOffset != null) { |
| 1557 _json["endTimeOffset"] = endTimeOffset; |
| 1558 } |
989 if (startTimeOffset != null) { | 1559 if (startTimeOffset != null) { |
990 _json["startTimeOffset"] = startTimeOffset; | 1560 _json["startTimeOffset"] = startTimeOffset; |
991 } | 1561 } |
992 return _json; | 1562 return _json; |
993 } | 1563 } |
994 } | 1564 } |
995 | 1565 |
996 /** | 1566 /// This resource represents a long-running operation that is the result of a |
997 * This resource represents a long-running operation that is the result of a | 1567 /// network API call. |
998 * network API call. | |
999 */ | |
1000 class GoogleLongrunningOperation { | 1568 class GoogleLongrunningOperation { |
1001 /** | 1569 /// If the value is `false`, it means the operation is still in progress. |
1002 * If the value is `false`, it means the operation is still in progress. | 1570 /// If `true`, the operation is completed, and either `error` or `response` |
1003 * If true, the operation is completed, and either `error` or `response` is | 1571 /// is |
1004 * available. | 1572 /// available. |
1005 */ | |
1006 core.bool done; | 1573 core.bool done; |
1007 /** The error result of the operation in case of failure or cancellation. */ | 1574 |
| 1575 /// The error result of the operation in case of failure or cancellation. |
1008 GoogleRpcStatus error; | 1576 GoogleRpcStatus error; |
1009 /** | 1577 |
1010 * Service-specific metadata associated with the operation. It typically | 1578 /// Service-specific metadata associated with the operation. It typically |
1011 * contains progress information and common metadata such as create time. | 1579 /// contains progress information and common metadata such as create time. |
1012 * Some services might not provide such metadata. Any method that returns a | 1580 /// Some services might not provide such metadata. Any method that returns a |
1013 * long-running operation should document the metadata type, if any. | 1581 /// long-running operation should document the metadata type, if any. |
1014 * | 1582 /// |
1015 * The values for Object must be JSON objects. It can consist of `num`, | 1583 /// The values for Object must be JSON objects. It can consist of `num`, |
1016 * `String`, `bool` and `null` as well as `Map` and `List` values. | 1584 /// `String`, `bool` and `null` as well as `Map` and `List` values. |
1017 */ | |
1018 core.Map<core.String, core.Object> metadata; | 1585 core.Map<core.String, core.Object> metadata; |
1019 /** | 1586 |
1020 * The server-assigned name, which is only unique within the same service that | 1587 /// The server-assigned name, which is only unique within the same service |
1021 * originally returns it. If you use the default HTTP mapping, the | 1588 /// that |
1022 * `name` should have the format of `operations/some/unique/name`. | 1589 /// originally returns it. If you use the default HTTP mapping, the |
1023 */ | 1590 /// `name` should have the format of `operations/some/unique/name`. |
1024 core.String name; | 1591 core.String name; |
1025 /** | 1592 |
1026 * The normal response of the operation in case of success. If the original | 1593 /// The normal response of the operation in case of success. If the original |
1027 * method returns no data on success, such as `Delete`, the response is | 1594 /// method returns no data on success, such as `Delete`, the response is |
1028 * `google.protobuf.Empty`. If the original method is standard | 1595 /// `google.protobuf.Empty`. If the original method is standard |
1029 * `Get`/`Create`/`Update`, the response should be the resource. For other | 1596 /// `Get`/`Create`/`Update`, the response should be the resource. For other |
1030 * methods, the response should have the type `XxxResponse`, where `Xxx` | 1597 /// methods, the response should have the type `XxxResponse`, where `Xxx` |
1031 * is the original method name. For example, if the original method name | 1598 /// is the original method name. For example, if the original method name |
1032 * is `TakeSnapshot()`, the inferred response type is | 1599 /// is `TakeSnapshot()`, the inferred response type is |
1033 * `TakeSnapshotResponse`. | 1600 /// `TakeSnapshotResponse`. |
1034 * | 1601 /// |
1035 * The values for Object must be JSON objects. It can consist of `num`, | 1602 /// The values for Object must be JSON objects. It can consist of `num`, |
1036 * `String`, `bool` and `null` as well as `Map` and `List` values. | 1603 /// `String`, `bool` and `null` as well as `Map` and `List` values. |
1037 */ | |
1038 core.Map<core.String, core.Object> response; | 1604 core.Map<core.String, core.Object> response; |
1039 | 1605 |
1040 GoogleLongrunningOperation(); | 1606 GoogleLongrunningOperation(); |
1041 | 1607 |
1042 GoogleLongrunningOperation.fromJson(core.Map _json) { | 1608 GoogleLongrunningOperation.fromJson(core.Map _json) { |
1043 if (_json.containsKey("done")) { | 1609 if (_json.containsKey("done")) { |
1044 done = _json["done"]; | 1610 done = _json["done"]; |
1045 } | 1611 } |
1046 if (_json.containsKey("error")) { | 1612 if (_json.containsKey("error")) { |
1047 error = new GoogleRpcStatus.fromJson(_json["error"]); | 1613 error = new GoogleRpcStatus.fromJson(_json["error"]); |
1048 } | 1614 } |
1049 if (_json.containsKey("metadata")) { | 1615 if (_json.containsKey("metadata")) { |
1050 metadata = _json["metadata"]; | 1616 metadata = _json["metadata"]; |
1051 } | 1617 } |
1052 if (_json.containsKey("name")) { | 1618 if (_json.containsKey("name")) { |
1053 name = _json["name"]; | 1619 name = _json["name"]; |
1054 } | 1620 } |
1055 if (_json.containsKey("response")) { | 1621 if (_json.containsKey("response")) { |
1056 response = _json["response"]; | 1622 response = _json["response"]; |
1057 } | 1623 } |
1058 } | 1624 } |
1059 | 1625 |
1060 core.Map<core.String, core.Object> toJson() { | 1626 core.Map<core.String, core.Object> toJson() { |
1061 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1627 final core.Map<core.String, core.Object> _json = |
| 1628 new core.Map<core.String, core.Object>(); |
1062 if (done != null) { | 1629 if (done != null) { |
1063 _json["done"] = done; | 1630 _json["done"] = done; |
1064 } | 1631 } |
1065 if (error != null) { | 1632 if (error != null) { |
1066 _json["error"] = (error).toJson(); | 1633 _json["error"] = (error).toJson(); |
1067 } | 1634 } |
1068 if (metadata != null) { | 1635 if (metadata != null) { |
1069 _json["metadata"] = metadata; | 1636 _json["metadata"] = metadata; |
1070 } | 1637 } |
1071 if (name != null) { | 1638 if (name != null) { |
1072 _json["name"] = name; | 1639 _json["name"] = name; |
1073 } | 1640 } |
1074 if (response != null) { | 1641 if (response != null) { |
1075 _json["response"] = response; | 1642 _json["response"] = response; |
1076 } | 1643 } |
1077 return _json; | 1644 return _json; |
1078 } | 1645 } |
1079 } | 1646 } |
1080 | 1647 |
1081 /** | 1648 /// The `Status` type defines a logical error model that is suitable for |
1082 * The `Status` type defines a logical error model that is suitable for | 1649 /// different |
1083 * different | 1650 /// programming environments, including REST APIs and RPC APIs. It is used by |
1084 * programming environments, including REST APIs and RPC APIs. It is used by | 1651 /// [gRPC](https://github.com/grpc). The error model is designed to be: |
1085 * [gRPC](https://github.com/grpc). The error model is designed to be: | 1652 /// |
1086 * | 1653 /// - Simple to use and understand for most users |
1087 * - Simple to use and understand for most users | 1654 /// - Flexible enough to meet unexpected needs |
1088 * - Flexible enough to meet unexpected needs | 1655 /// |
1089 * | 1656 /// # Overview |
1090 * # Overview | 1657 /// |
1091 * | 1658 /// The `Status` message contains three pieces of data: error code, error |
1092 * The `Status` message contains three pieces of data: error code, error | 1659 /// message, |
1093 * message, | 1660 /// and error details. The error code should be an enum value of |
1094 * and error details. The error code should be an enum value of | 1661 /// google.rpc.Code, but it may accept additional error codes if needed. The |
1095 * google.rpc.Code, but it may accept additional error codes if needed. The | 1662 /// error message should be a developer-facing English message that helps |
1096 * error message should be a developer-facing English message that helps | 1663 /// developers *understand* and *resolve* the error. If a localized user-facing |
1097 * developers *understand* and *resolve* the error. If a localized user-facing | 1664 /// error message is needed, put the localized message in the error details or |
1098 * error message is needed, put the localized message in the error details or | 1665 /// localize it in the client. The optional error details may contain arbitrary |
1099 * localize it in the client. The optional error details may contain arbitrary | 1666 /// information about the error. There is a predefined set of error detail |
1100 * information about the error. There is a predefined set of error detail types | 1667 /// types |
1101 * in the package `google.rpc` that can be used for common error conditions. | 1668 /// in the package `google.rpc` that can be used for common error conditions. |
1102 * | 1669 /// |
1103 * # Language mapping | 1670 /// # Language mapping |
1104 * | 1671 /// |
1105 * The `Status` message is the logical representation of the error model, but it | 1672 /// The `Status` message is the logical representation of the error model, but |
1106 * is not necessarily the actual wire format. When the `Status` message is | 1673 /// it |
1107 * exposed in different client libraries and different wire protocols, it can be | 1674 /// is not necessarily the actual wire format. When the `Status` message is |
1108 * mapped differently. For example, it will likely be mapped to some exceptions | 1675 /// exposed in different client libraries and different wire protocols, it can |
1109 * in Java, but more likely mapped to some error codes in C. | 1676 /// be |
1110 * | 1677 /// mapped differently. For example, it will likely be mapped to some |
1111 * # Other uses | 1678 /// exceptions |
1112 * | 1679 /// in Java, but more likely mapped to some error codes in C. |
1113 * The error model and the `Status` message can be used in a variety of | 1680 /// |
1114 * environments, either with or without APIs, to provide a | 1681 /// # Other uses |
1115 * consistent developer experience across different environments. | 1682 /// |
1116 * | 1683 /// The error model and the `Status` message can be used in a variety of |
1117 * Example uses of this error model include: | 1684 /// environments, either with or without APIs, to provide a |
1118 * | 1685 /// consistent developer experience across different environments. |
1119 * - Partial errors. If a service needs to return partial errors to the client, | 1686 /// |
1120 * it may embed the `Status` in the normal response to indicate the partial | 1687 /// Example uses of this error model include: |
1121 * errors. | 1688 /// |
1122 * | 1689 /// - Partial errors. If a service needs to return partial errors to the |
1123 * - Workflow errors. A typical workflow has multiple steps. Each step may | 1690 /// client, |
1124 * have a `Status` message for error reporting. | 1691 /// it may embed the `Status` in the normal response to indicate the partial |
1125 * | 1692 /// errors. |
1126 * - Batch operations. If a client uses batch request and batch response, the | 1693 /// |
1127 * `Status` message should be used directly inside batch response, one for | 1694 /// - Workflow errors. A typical workflow has multiple steps. Each step may |
1128 * each error sub-response. | 1695 /// have a `Status` message for error reporting. |
1129 * | 1696 /// |
1130 * - Asynchronous operations. If an API call embeds asynchronous operation | 1697 /// - Batch operations. If a client uses batch request and batch response, the |
1131 * results in its response, the status of those operations should be | 1698 /// `Status` message should be used directly inside batch response, one for |
1132 * represented directly using the `Status` message. | 1699 /// each error sub-response. |
1133 * | 1700 /// |
1134 * - Logging. If some API errors are stored in logs, the message `Status` could | 1701 /// - Asynchronous operations. If an API call embeds asynchronous operation |
1135 * be used directly after any stripping needed for security/privacy reasons. | 1702 /// results in its response, the status of those operations should be |
1136 */ | 1703 /// represented directly using the `Status` message. |
| 1704 /// |
| 1705 /// - Logging. If some API errors are stored in logs, the message `Status` |
| 1706 /// could |
| 1707 /// be used directly after any stripping needed for security/privacy reasons. |
1137 class GoogleRpcStatus { | 1708 class GoogleRpcStatus { |
1138 /** The status code, which should be an enum value of google.rpc.Code. */ | 1709 /// The status code, which should be an enum value of google.rpc.Code. |
1139 core.int code; | 1710 core.int code; |
1140 /** | 1711 |
1141 * A list of messages that carry the error details. There is a common set of | 1712 /// A list of messages that carry the error details. There is a common set |
1142 * message types for APIs to use. | 1713 /// of |
1143 * | 1714 /// message types for APIs to use. |
1144 * The values for Object must be JSON objects. It can consist of `num`, | 1715 /// |
1145 * `String`, `bool` and `null` as well as `Map` and `List` values. | 1716 /// The values for Object must be JSON objects. It can consist of `num`, |
1146 */ | 1717 /// `String`, `bool` and `null` as well as `Map` and `List` values. |
1147 core.List<core.Map<core.String, core.Object>> details; | 1718 core.List<core.Map<core.String, core.Object>> details; |
1148 /** | 1719 |
1149 * A developer-facing error message, which should be in English. Any | 1720 /// A developer-facing error message, which should be in English. Any |
1150 * user-facing error message should be localized and sent in the | 1721 /// user-facing error message should be localized and sent in the |
1151 * google.rpc.Status.details field, or localized by the client. | 1722 /// google.rpc.Status.details field, or localized by the client. |
1152 */ | |
1153 core.String message; | 1723 core.String message; |
1154 | 1724 |
1155 GoogleRpcStatus(); | 1725 GoogleRpcStatus(); |
1156 | 1726 |
1157 GoogleRpcStatus.fromJson(core.Map _json) { | 1727 GoogleRpcStatus.fromJson(core.Map _json) { |
1158 if (_json.containsKey("code")) { | 1728 if (_json.containsKey("code")) { |
1159 code = _json["code"]; | 1729 code = _json["code"]; |
1160 } | 1730 } |
1161 if (_json.containsKey("details")) { | 1731 if (_json.containsKey("details")) { |
1162 details = _json["details"]; | 1732 details = _json["details"]; |
1163 } | 1733 } |
1164 if (_json.containsKey("message")) { | 1734 if (_json.containsKey("message")) { |
1165 message = _json["message"]; | 1735 message = _json["message"]; |
1166 } | 1736 } |
1167 } | 1737 } |
1168 | 1738 |
1169 core.Map<core.String, core.Object> toJson() { | 1739 core.Map<core.String, core.Object> toJson() { |
1170 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 1740 final core.Map<core.String, core.Object> _json = |
| 1741 new core.Map<core.String, core.Object>(); |
1171 if (code != null) { | 1742 if (code != null) { |
1172 _json["code"] = code; | 1743 _json["code"] = code; |
1173 } | 1744 } |
1174 if (details != null) { | 1745 if (details != null) { |
1175 _json["details"] = details; | 1746 _json["details"] = details; |
1176 } | 1747 } |
1177 if (message != null) { | 1748 if (message != null) { |
1178 _json["message"] = message; | 1749 _json["message"] = message; |
1179 } | 1750 } |
1180 return _json; | 1751 return _json; |
1181 } | 1752 } |
1182 } | 1753 } |
OLD | NEW |