OLD | NEW |
1 { | 1 { |
2 "auth": { | 2 "auth": { |
3 "oauth2": { | 3 "oauth2": { |
4 "scopes": { | 4 "scopes": { |
5 "https://www.googleapis.com/auth/cloud-platform": { | 5 "https://www.googleapis.com/auth/cloud-platform": { |
6 "description": "View and manage your data across Google Clou
d Platform services" | 6 "description": "View and manage your data across Google Clou
d Platform services" |
7 } | 7 } |
8 } | 8 } |
9 } | 9 } |
10 }, | 10 }, |
11 "basePath": "", | 11 "basePath": "", |
12 "baseUrl": "https://speech.googleapis.com/", | 12 "baseUrl": "https://speech.googleapis.com/", |
13 "batchPath": "batch", | 13 "batchPath": "batch", |
14 "canonicalName": "Speech", | 14 "canonicalName": "Speech", |
15 "description": "Converts audio to text by applying powerful neural network m
odels.", | 15 "description": "Converts audio to text by applying powerful neural network m
odels.", |
16 "discoveryVersion": "v1", | 16 "discoveryVersion": "v1", |
17 "documentationLink": "https://cloud.google.com/speech/", | 17 "documentationLink": "https://cloud.google.com/speech/", |
18 "icons": { | 18 "icons": { |
19 "x16": "http://www.google.com/images/icons/product/search-16.gif", | 19 "x16": "http://www.google.com/images/icons/product/search-16.gif", |
20 "x32": "http://www.google.com/images/icons/product/search-32.gif" | 20 "x32": "http://www.google.com/images/icons/product/search-32.gif" |
21 }, | 21 }, |
22 "id": "speech:v1", | 22 "id": "speech:v1", |
23 "kind": "discovery#restDescription", | 23 "kind": "discovery#restDescription", |
24 "name": "speech", | 24 "name": "speech", |
25 "ownerDomain": "google.com", | 25 "ownerDomain": "google.com", |
26 "ownerName": "Google", | 26 "ownerName": "Google", |
27 "parameters": { | 27 "parameters": { |
28 "upload_protocol": { | |
29 "description": "Upload protocol for media (e.g. \"raw\", \"multipart
\").", | |
30 "location": "query", | |
31 "type": "string" | |
32 }, | |
33 "prettyPrint": { | |
34 "default": "true", | |
35 "description": "Returns response with indentations and line breaks."
, | |
36 "location": "query", | |
37 "type": "boolean" | |
38 }, | |
39 "uploadType": { | |
40 "description": "Legacy upload protocol for media (e.g. \"media\", \"
multipart\").", | |
41 "location": "query", | |
42 "type": "string" | |
43 }, | |
44 "fields": { | |
45 "description": "Selector specifying which fields to include in a par
tial response.", | |
46 "location": "query", | |
47 "type": "string" | |
48 }, | |
49 "callback": { | |
50 "description": "JSONP", | |
51 "location": "query", | |
52 "type": "string" | |
53 }, | |
54 "$.xgafv": { | |
55 "description": "V1 error format.", | |
56 "enum": [ | |
57 "1", | |
58 "2" | |
59 ], | |
60 "enumDescriptions": [ | |
61 "v1 error format", | |
62 "v2 error format" | |
63 ], | |
64 "location": "query", | |
65 "type": "string" | |
66 }, | |
67 "alt": { | 28 "alt": { |
68 "default": "json", | 29 "default": "json", |
69 "description": "Data format for response.", | 30 "description": "Data format for response.", |
70 "enum": [ | 31 "enum": [ |
71 "json", | 32 "json", |
72 "media", | 33 "media", |
73 "proto" | 34 "proto" |
74 ], | 35 ], |
75 "enumDescriptions": [ | 36 "enumDescriptions": [ |
76 "Responses with Content-Type of application/json", | 37 "Responses with Content-Type of application/json", |
(...skipping 17 matching lines...) Expand all Loading... |
94 "description": "Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should not exc
eed 40 characters.", | 55 "description": "Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should not exc
eed 40 characters.", |
95 "location": "query", | 56 "location": "query", |
96 "type": "string" | 57 "type": "string" |
97 }, | 58 }, |
98 "pp": { | 59 "pp": { |
99 "default": "true", | 60 "default": "true", |
100 "description": "Pretty-print response.", | 61 "description": "Pretty-print response.", |
101 "location": "query", | 62 "location": "query", |
102 "type": "boolean" | 63 "type": "boolean" |
103 }, | 64 }, |
| 65 "bearer_token": { |
| 66 "description": "OAuth bearer token.", |
| 67 "location": "query", |
| 68 "type": "string" |
| 69 }, |
104 "oauth_token": { | 70 "oauth_token": { |
105 "description": "OAuth 2.0 token for the current user.", | 71 "description": "OAuth 2.0 token for the current user.", |
106 "location": "query", | 72 "location": "query", |
107 "type": "string" | 73 "type": "string" |
108 }, | 74 }, |
109 "bearer_token": { | 75 "upload_protocol": { |
110 "description": "OAuth bearer token.", | 76 "description": "Upload protocol for media (e.g. \"raw\", \"multipart
\").", |
| 77 "location": "query", |
| 78 "type": "string" |
| 79 }, |
| 80 "prettyPrint": { |
| 81 "default": "true", |
| 82 "description": "Returns response with indentations and line breaks."
, |
| 83 "location": "query", |
| 84 "type": "boolean" |
| 85 }, |
| 86 "uploadType": { |
| 87 "description": "Legacy upload protocol for media (e.g. \"media\", \"
multipart\").", |
| 88 "location": "query", |
| 89 "type": "string" |
| 90 }, |
| 91 "fields": { |
| 92 "description": "Selector specifying which fields to include in a par
tial response.", |
| 93 "location": "query", |
| 94 "type": "string" |
| 95 }, |
| 96 "$.xgafv": { |
| 97 "description": "V1 error format.", |
| 98 "enum": [ |
| 99 "1", |
| 100 "2" |
| 101 ], |
| 102 "enumDescriptions": [ |
| 103 "v1 error format", |
| 104 "v2 error format" |
| 105 ], |
| 106 "location": "query", |
| 107 "type": "string" |
| 108 }, |
| 109 "callback": { |
| 110 "description": "JSONP", |
111 "location": "query", | 111 "location": "query", |
112 "type": "string" | 112 "type": "string" |
113 } | 113 } |
114 }, | 114 }, |
115 "protocol": "rest", | 115 "protocol": "rest", |
116 "resources": { | 116 "resources": { |
117 "speech": { | |
118 "methods": { | |
119 "recognize": { | |
120 "description": "Performs synchronous speech recognition: rec
eive results after all audio\nhas been sent and processed.", | |
121 "httpMethod": "POST", | |
122 "id": "speech.speech.recognize", | |
123 "parameterOrder": [], | |
124 "parameters": {}, | |
125 "path": "v1/speech:recognize", | |
126 "request": { | |
127 "$ref": "RecognizeRequest" | |
128 }, | |
129 "response": { | |
130 "$ref": "RecognizeResponse" | |
131 }, | |
132 "scopes": [ | |
133 "https://www.googleapis.com/auth/cloud-platform" | |
134 ] | |
135 }, | |
136 "longrunningrecognize": { | |
137 "description": "Performs asynchronous speech recognition: re
ceive results via the\ngoogle.longrunning.Operations interface. Returns either a
n\n`Operation.error` or an `Operation.response` which contains\na `LongRunningRe
cognizeResponse` message.", | |
138 "httpMethod": "POST", | |
139 "id": "speech.speech.longrunningrecognize", | |
140 "parameterOrder": [], | |
141 "parameters": {}, | |
142 "path": "v1/speech:longrunningrecognize", | |
143 "request": { | |
144 "$ref": "LongRunningRecognizeRequest" | |
145 }, | |
146 "response": { | |
147 "$ref": "Operation" | |
148 }, | |
149 "scopes": [ | |
150 "https://www.googleapis.com/auth/cloud-platform" | |
151 ] | |
152 } | |
153 } | |
154 }, | |
155 "operations": { | 117 "operations": { |
156 "methods": { | 118 "methods": { |
157 "cancel": { | 119 "cancel": { |
158 "description": "Starts asynchronous cancellation on a long-r
unning operation. The server\nmakes a best effort to cancel the operation, but
success is not\nguaranteed. If the server doesn't support this method, it retur
ns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation o
r\nother methods to check whether the cancellation succeeded or whether the\nope
ration completed despite cancellation. On successful cancellation,\nthe operatio
n is not deleted; instead, it becomes an operation with\nan Operation.error valu
e with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", | 120 "description": "Starts asynchronous cancellation on a long-r
unning operation. The server\nmakes a best effort to cancel the operation, but
success is not\nguaranteed. If the server doesn't support this method, it retur
ns\n`google.rpc.Code.UNIMPLEMENTED`. Clients can use\nOperations.GetOperation o
r\nother methods to check whether the cancellation succeeded or whether the\nope
ration completed despite cancellation. On successful cancellation,\nthe operatio
n is not deleted; instead, it becomes an operation with\nan Operation.error valu
e with a google.rpc.Status.code of 1,\ncorresponding to `Code.CANCELLED`.", |
159 "httpMethod": "POST", | 121 "httpMethod": "POST", |
160 "id": "speech.operations.cancel", | 122 "id": "speech.operations.cancel", |
161 "parameterOrder": [ | 123 "parameterOrder": [ |
162 "name" | 124 "name" |
163 ], | 125 ], |
164 "parameters": { | 126 "parameters": { |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
259 }, | 221 }, |
260 "path": "v1/operations", | 222 "path": "v1/operations", |
261 "response": { | 223 "response": { |
262 "$ref": "ListOperationsResponse" | 224 "$ref": "ListOperationsResponse" |
263 }, | 225 }, |
264 "scopes": [ | 226 "scopes": [ |
265 "https://www.googleapis.com/auth/cloud-platform" | 227 "https://www.googleapis.com/auth/cloud-platform" |
266 ] | 228 ] |
267 } | 229 } |
268 } | 230 } |
| 231 }, |
| 232 "speech": { |
| 233 "methods": { |
| 234 "longrunningrecognize": { |
| 235 "description": "Performs asynchronous speech recognition: re
ceive results via the\ngoogle.longrunning.Operations interface. Returns either a
n\n`Operation.error` or an `Operation.response` which contains\na `LongRunningRe
cognizeResponse` message.", |
| 236 "httpMethod": "POST", |
| 237 "id": "speech.speech.longrunningrecognize", |
| 238 "parameterOrder": [], |
| 239 "parameters": {}, |
| 240 "path": "v1/speech:longrunningrecognize", |
| 241 "request": { |
| 242 "$ref": "LongRunningRecognizeRequest" |
| 243 }, |
| 244 "response": { |
| 245 "$ref": "Operation" |
| 246 }, |
| 247 "scopes": [ |
| 248 "https://www.googleapis.com/auth/cloud-platform" |
| 249 ] |
| 250 }, |
| 251 "recognize": { |
| 252 "description": "Performs synchronous speech recognition: rec
eive results after all audio\nhas been sent and processed.", |
| 253 "httpMethod": "POST", |
| 254 "id": "speech.speech.recognize", |
| 255 "parameterOrder": [], |
| 256 "parameters": {}, |
| 257 "path": "v1/speech:recognize", |
| 258 "request": { |
| 259 "$ref": "RecognizeRequest" |
| 260 }, |
| 261 "response": { |
| 262 "$ref": "RecognizeResponse" |
| 263 }, |
| 264 "scopes": [ |
| 265 "https://www.googleapis.com/auth/cloud-platform" |
| 266 ] |
| 267 } |
| 268 } |
269 } | 269 } |
270 }, | 270 }, |
271 "revision": "20170705", | 271 "revision": "20170728", |
272 "rootUrl": "https://speech.googleapis.com/", | 272 "rootUrl": "https://speech.googleapis.com/", |
273 "schemas": { | 273 "schemas": { |
| 274 "Operation": { |
| 275 "description": "This resource represents a long-running operation th
at is the result of a\nnetwork API call.", |
| 276 "id": "Operation", |
| 277 "properties": { |
| 278 "response": { |
| 279 "additionalProperties": { |
| 280 "description": "Properties of the object. Contains field
@type with type URL.", |
| 281 "type": "any" |
| 282 }, |
| 283 "description": "The normal response of the operation in case
of success. If the original\nmethod returns no data on success, such as `Delet
e`, the response is\n`google.protobuf.Empty`. If the original method is standar
d\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmet
hods, the response should have the type `XxxResponse`, where `Xxx`\nis the origi
nal method name. For example, if the original method name\nis `TakeSnapshot()`,
the inferred response type is\n`TakeSnapshotResponse`.", |
| 284 "type": "object" |
| 285 }, |
| 286 "name": { |
| 287 "description": "The server-assigned name, which is only uniq
ue within the same service that\noriginally returns it. If you use the default H
TTP mapping, the\n`name` should have the format of `operations/some/unique/name`
.", |
| 288 "type": "string" |
| 289 }, |
| 290 "error": { |
| 291 "$ref": "Status", |
| 292 "description": "The error result of the operation in case of
failure or cancellation." |
| 293 }, |
| 294 "metadata": { |
| 295 "additionalProperties": { |
| 296 "description": "Properties of the object. Contains field
@type with type URL.", |
| 297 "type": "any" |
| 298 }, |
| 299 "description": "Service-specific metadata associated with th
e operation. It typically\ncontains progress information and common metadata su
ch as create time.\nSome services might not provide such metadata. Any method t
hat returns a\nlong-running operation should document the metadata type, if any.
", |
| 300 "type": "object" |
| 301 }, |
| 302 "done": { |
| 303 "description": "If the value is `false`, it means the operat
ion is still in progress.\nIf true, the operation is completed, and either `erro
r` or `response` is\navailable.", |
| 304 "type": "boolean" |
| 305 } |
| 306 }, |
| 307 "type": "object" |
| 308 }, |
| 309 "RecognitionConfig": { |
| 310 "description": "Provides information to the recognizer that specifie
s how to process the\nrequest.", |
| 311 "id": "RecognitionConfig", |
| 312 "properties": { |
| 313 "enableWordTimeOffsets": { |
| 314 "description": "*Optional* If `true`, a list of `words` are
returned in the top result,\ncontaining the start and end timestamps for those w
ords. The default value,\n'false' does not return any word-level timing informat
ion.", |
| 315 "type": "boolean" |
| 316 }, |
| 317 "maxAlternatives": { |
| 318 "description": "*Optional* Maximum number of recognition hyp
otheses to be returned.\nSpecifically, the maximum number of `SpeechRecognitionA
lternative` messages\nwithin each `SpeechRecognitionResult`.\nThe server may ret
urn fewer than `max_alternatives`.\nValid values are `0`-`30`. A value of `0` or
`1` will return a maximum of\none. If omitted, will return a maximum of one.", |
| 319 "format": "int32", |
| 320 "type": "integer" |
| 321 }, |
| 322 "languageCode": { |
| 323 "description": "*Required* The language of the supplied audi
o as a\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.\nEx
ample: \"en-US\".\nSee [Language Support](https://cloud.google.com/speech/docs/l
anguages)\nfor a list of the currently supported language codes.", |
| 324 "type": "string" |
| 325 }, |
| 326 "speechContexts": { |
| 327 "description": "*Optional* A means to provide context to ass
ist the speech recognition.", |
| 328 "items": { |
| 329 "$ref": "SpeechContext" |
| 330 }, |
| 331 "type": "array" |
| 332 }, |
| 333 "profanityFilter": { |
| 334 "description": "*Optional* If set to `true`, the server will
attempt to filter out\nprofanities, replacing all but the initial character in
each filtered word\nwith asterisks, e.g. \"f***\". If set to `false` or omitted,
profanities\nwon't be filtered out.", |
| 335 "type": "boolean" |
| 336 }, |
| 337 "encoding": { |
| 338 "description": "*Required* Encoding of audio data sent in al
l `RecognitionAudio` messages.", |
| 339 "enum": [ |
| 340 "ENCODING_UNSPECIFIED", |
| 341 "LINEAR16", |
| 342 "FLAC", |
| 343 "MULAW", |
| 344 "AMR", |
| 345 "AMR_WB", |
| 346 "OGG_OPUS", |
| 347 "SPEEX_WITH_HEADER_BYTE" |
| 348 ], |
| 349 "enumDescriptions": [ |
| 350 "Not specified. Will return result google.rpc.Code.INVAL
ID_ARGUMENT.", |
| 351 "Uncompressed 16-bit signed little-endian samples (Linea
r PCM).", |
| 352 "[`FLAC`](https://xiph.org/flac/documentation.html) (Fre
e Lossless Audio\nCodec) is the recommended encoding because it is\nlossless--th
erefore recognition is not compromised--and\nrequires only about half the bandwi
dth of `LINEAR16`. `FLAC` stream\nencoding supports 16-bit and 24-bit samples, h
owever, not all fields in\n`STREAMINFO` are supported.", |
| 353 "8-bit samples that compand 14-bit audio samples using G
.711 PCMU/mu-law.", |
| 354 "Adaptive Multi-Rate Narrowband codec. `sample_rate_hert
z` must be 8000.", |
| 355 "Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
must be 16000.", |
| 356 "Opus encoded audio frames in Ogg container\n([OggOpus](
https://wiki.xiph.org/OggOpus)).\n`sample_rate_hertz` must be 16000.", |
| 357 "Although the use of lossy encodings is not recommended,
if a very low\nbitrate encoding is required, `OGG_OPUS` is highly preferred ove
r\nSpeex encoding. The [Speex](https://speex.org/) encoding supported by\nCloud
Speech API has a header byte in each block, as in MIME type\n`audio/x-speex-wit
h-header-byte`.\nIt is a variant of the RTP Speex encoding defined in\n[RFC 5574
](https://tools.ietf.org/html/rfc5574).\nThe stream is a sequence of blocks, one
block per RTP packet. Each block\nstarts with a byte containing the length of t
he block, in bytes, followed\nby one or more frames of Speex data, padded to an
integral number of\nbytes (octets) as specified in RFC 5574. In other words, eac
h RTP header\nis replaced with a single byte containing the block length. Only S
peex\nwideband is supported. `sample_rate_hertz` must be 16000." |
| 358 ], |
| 359 "type": "string" |
| 360 }, |
| 361 "sampleRateHertz": { |
| 362 "description": "*Required* Sample rate in Hertz of the audio
data sent in all\n`RecognitionAudio` messages. Valid values are: 8000-48000.\n1
6000 is optimal. For best results, set the sampling rate of the audio\nsource to
16000 Hz. If that's not possible, use the native sample rate of\nthe audio sour
ce (instead of re-sampling).", |
| 363 "format": "int32", |
| 364 "type": "integer" |
| 365 } |
| 366 }, |
| 367 "type": "object" |
| 368 }, |
| 369 "WordInfo": { |
| 370 "description": "Word-specific information detected along with speech
recognition when certain\nrequest parameters are set.", |
| 371 "id": "WordInfo", |
| 372 "properties": { |
| 373 "startTime": { |
| 374 "description": "*Output-only* Time offset relative to the be
ginning of the audio,\nand corresponding to the start of the spoken word.\nThis
field is only set if `enable_word_time_offsets=true` and only\nin the top hypoth
esis.\nThis is an experimental feature and the accuracy of the time offset can\n
vary.", |
| 375 "format": "google-duration", |
| 376 "type": "string" |
| 377 }, |
| 378 "word": { |
| 379 "description": "*Output-only* The word corresponding to this
set of information.", |
| 380 "type": "string" |
| 381 }, |
| 382 "endTime": { |
| 383 "description": "*Output-only* Time offset relative to the be
ginning of the audio,\nand corresponding to the end of the spoken word.\nThis fi
eld is only set if `enable_word_time_offsets=true` and only\nin the top hypothes
is.\nThis is an experimental feature and the accuracy of the time offset can\nva
ry.", |
| 384 "format": "google-duration", |
| 385 "type": "string" |
| 386 } |
| 387 }, |
| 388 "type": "object" |
| 389 }, |
274 "Status": { | 390 "Status": { |
275 "description": "The `Status` type defines a logical error model that
is suitable for different\nprogramming environments, including REST APIs and RP
C APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is desig
ned to be:\n\n- Simple to use and understand for most users\n- Flexible enough t
o meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pie
ces of data: error code, error message,\nand error details. The error code shoul
d be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes
if needed. The\nerror message should be a developer-facing English message tha
t helps\ndevelopers *understand* and *resolve* the error. If a localized user-fa
cing\nerror message is needed, put the localized message in the error details or
\nlocalize it in the client. The optional error details may contain arbitrary\ni
nformation about the error. There is a predefined set of error detail types\nin
the package `google.rpc` that can be used for common error conditions.\n\n# Lang
uage mapping\n\nThe `Status` message is the logical representation of the error
model, but it\nis not necessarily the actual wire format. When the `Status` mess
age is\nexposed in different client libraries and different wire protocols, it c
an be\nmapped differently. For example, it will likely be mapped to some excepti
ons\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n
\nThe error model and the `Status` message can be used in a variety of\nenvironm
ents, either with or without APIs, to provide a\nconsistent developer experience
across different environments.\n\nExample uses of this error model include:\n\n
- Partial errors. If a service needs to return partial errors to the client,\n
it may embed the `Status` in the normal response to indicate the partial\n
errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step m
ay\n have a `Status` message for error reporting.\n\n- Batch operations. If a
client uses batch request and batch response, the\n `Status` message should
be used directly inside batch response, one for\n each error sub-response.\n\
n- Asynchronous operations. If an API call embeds asynchronous operation\n re
sults in its response, the status of those operations should be\n represented
directly using the `Status` message.\n\n- Logging. If some API errors are store
d in logs, the message `Status` could\n be used directly after any stripping
needed for security/privacy reasons.", | 391 "description": "The `Status` type defines a logical error model that
is suitable for different\nprogramming environments, including REST APIs and RP
C APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is desig
ned to be:\n\n- Simple to use and understand for most users\n- Flexible enough t
o meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pie
ces of data: error code, error message,\nand error details. The error code shoul
d be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes
if needed. The\nerror message should be a developer-facing English message tha
t helps\ndevelopers *understand* and *resolve* the error. If a localized user-fa
cing\nerror message is needed, put the localized message in the error details or
\nlocalize it in the client. The optional error details may contain arbitrary\ni
nformation about the error. There is a predefined set of error detail types\nin
the package `google.rpc` that can be used for common error conditions.\n\n# Lang
uage mapping\n\nThe `Status` message is the logical representation of the error
model, but it\nis not necessarily the actual wire format. When the `Status` mess
age is\nexposed in different client libraries and different wire protocols, it c
an be\nmapped differently. For example, it will likely be mapped to some excepti
ons\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n
\nThe error model and the `Status` message can be used in a variety of\nenvironm
ents, either with or without APIs, to provide a\nconsistent developer experience
across different environments.\n\nExample uses of this error model include:\n\n
- Partial errors. If a service needs to return partial errors to the client,\n
it may embed the `Status` in the normal response to indicate the partial\n
errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step m
ay\n have a `Status` message for error reporting.\n\n- Batch operations. If a
client uses batch request and batch response, the\n `Status` message should
be used directly inside batch response, one for\n each error sub-response.\n\
n- Asynchronous operations. If an API call embeds asynchronous operation\n re
sults in its response, the status of those operations should be\n represented
directly using the `Status` message.\n\n- Logging. If some API errors are store
d in logs, the message `Status` could\n be used directly after any stripping
needed for security/privacy reasons.", |
276 "id": "Status", | 392 "id": "Status", |
277 "properties": { | 393 "properties": { |
278 "details": { | 394 "details": { |
279 "description": "A list of messages that carry the error deta
ils. There will be a\ncommon set of message types for APIs to use.", | 395 "description": "A list of messages that carry the error deta
ils. There is a common set of\nmessage types for APIs to use.", |
280 "items": { | 396 "items": { |
281 "additionalProperties": { | 397 "additionalProperties": { |
282 "description": "Properties of the object. Contains f
ield @type with type URL.", | 398 "description": "Properties of the object. Contains f
ield @type with type URL.", |
283 "type": "any" | 399 "type": "any" |
284 }, | 400 }, |
285 "type": "object" | 401 "type": "object" |
286 }, | 402 }, |
287 "type": "array" | 403 "type": "array" |
288 }, | 404 }, |
289 "code": { | 405 "code": { |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
321 }, | 437 }, |
322 "SpeechRecognitionAlternative": { | 438 "SpeechRecognitionAlternative": { |
323 "description": "Alternative hypotheses (a.k.a. n-best list).", | 439 "description": "Alternative hypotheses (a.k.a. n-best list).", |
324 "id": "SpeechRecognitionAlternative", | 440 "id": "SpeechRecognitionAlternative", |
325 "properties": { | 441 "properties": { |
326 "confidence": { | 442 "confidence": { |
327 "description": "*Output-only* The confidence estimate betwee
n 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that t
he recognized words are\ncorrect. This field is typically provided only for the
top hypothesis, and\nonly for `is_final=true` results. Clients should not rely o
n the\n`confidence` field as it is not guaranteed to be accurate or consistent.\
nThe default of 0.0 is a sentinel value indicating `confidence` was not set.", | 443 "description": "*Output-only* The confidence estimate betwee
n 0.0 and 1.0. A higher number\nindicates an estimated greater likelihood that t
he recognized words are\ncorrect. This field is typically provided only for the
top hypothesis, and\nonly for `is_final=true` results. Clients should not rely o
n the\n`confidence` field as it is not guaranteed to be accurate or consistent.\
nThe default of 0.0 is a sentinel value indicating `confidence` was not set.", |
328 "format": "float", | 444 "format": "float", |
329 "type": "number" | 445 "type": "number" |
330 }, | 446 }, |
| 447 "words": { |
| 448 "description": "*Output-only* List of word-specific informat
ion for each recognized word.", |
| 449 "items": { |
| 450 "$ref": "WordInfo" |
| 451 }, |
| 452 "type": "array" |
| 453 }, |
331 "transcript": { | 454 "transcript": { |
332 "description": "*Output-only* Transcript text representing t
he words that the user spoke.", | 455 "description": "*Output-only* Transcript text representing t
he words that the user spoke.", |
333 "type": "string" | 456 "type": "string" |
334 } | 457 } |
335 }, | 458 }, |
336 "type": "object" | 459 "type": "object" |
337 }, | 460 }, |
338 "ListOperationsResponse": { | 461 "ListOperationsResponse": { |
339 "description": "The response message for Operations.ListOperations."
, | 462 "description": "The response message for Operations.ListOperations."
, |
340 "id": "ListOperationsResponse", | 463 "id": "ListOperationsResponse", |
341 "properties": { | 464 "properties": { |
| 465 "nextPageToken": { |
| 466 "description": "The standard List next-page token.", |
| 467 "type": "string" |
| 468 }, |
342 "operations": { | 469 "operations": { |
343 "description": "A list of operations that matches the specif
ied filter in the request.", | 470 "description": "A list of operations that matches the specif
ied filter in the request.", |
344 "items": { | 471 "items": { |
345 "$ref": "Operation" | 472 "$ref": "Operation" |
346 }, | 473 }, |
347 "type": "array" | 474 "type": "array" |
348 }, | |
349 "nextPageToken": { | |
350 "description": "The standard List next-page token.", | |
351 "type": "string" | |
352 } | 475 } |
353 }, | 476 }, |
354 "type": "object" | 477 "type": "object" |
355 }, | 478 }, |
356 "SpeechContext": { | 479 "SpeechContext": { |
357 "description": "Provides \"hints\" to the speech recognizer to favor
specific words and phrases\nin the results.", | 480 "description": "Provides \"hints\" to the speech recognizer to favor
specific words and phrases\nin the results.", |
358 "id": "SpeechContext", | 481 "id": "SpeechContext", |
359 "properties": { | 482 "properties": { |
360 "phrases": { | 483 "phrases": { |
361 "description": "*Optional* A list of strings containing word
s and phrases \"hints\" so that\nthe speech recognition is more likely to recogn
ize them. This can be used\nto improve the accuracy for specific words and phras
es, for example, if\nspecific commands are typically spoken by the user. This ca
n also be used\nto add additional words to the vocabulary of the recognizer. See
\n[usage limits](https://cloud.google.com/speech/limits#content).", | 484 "description": "*Optional* A list of strings containing word
s and phrases \"hints\" so that\nthe speech recognition is more likely to recogn
ize them. This can be used\nto improve the accuracy for specific words and phras
es, for example, if\nspecific commands are typically spoken by the user. This ca
n also be used\nto add additional words to the vocabulary of the recognizer. See
\n[usage limits](https://cloud.google.com/speech/limits#content).", |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
424 "type": "array" | 547 "type": "array" |
425 } | 548 } |
426 }, | 549 }, |
427 "type": "object" | 550 "type": "object" |
428 }, | 551 }, |
429 "CancelOperationRequest": { | 552 "CancelOperationRequest": { |
430 "description": "The request message for Operations.CancelOperation."
, | 553 "description": "The request message for Operations.CancelOperation."
, |
431 "id": "CancelOperationRequest", | 554 "id": "CancelOperationRequest", |
432 "properties": {}, | 555 "properties": {}, |
433 "type": "object" | 556 "type": "object" |
434 }, | |
435 "Operation": { | |
436 "description": "This resource represents a long-running operation th
at is the result of a\nnetwork API call.", | |
437 "id": "Operation", | |
438 "properties": { | |
439 "response": { | |
440 "additionalProperties": { | |
441 "description": "Properties of the object. Contains field
@type with type URL.", | |
442 "type": "any" | |
443 }, | |
444 "description": "The normal response of the operation in case
of success. If the original\nmethod returns no data on success, such as `Delet
e`, the response is\n`google.protobuf.Empty`. If the original method is standar
d\n`Get`/`Create`/`Update`, the response should be the resource. For other\nmet
hods, the response should have the type `XxxResponse`, where `Xxx`\nis the origi
nal method name. For example, if the original method name\nis `TakeSnapshot()`,
the inferred response type is\n`TakeSnapshotResponse`.", | |
445 "type": "object" | |
446 }, | |
447 "name": { | |
448 "description": "The server-assigned name, which is only uniq
ue within the same service that\noriginally returns it. If you use the default H
TTP mapping, the\n`name` should have the format of `operations/some/unique/name`
.", | |
449 "type": "string" | |
450 }, | |
451 "error": { | |
452 "$ref": "Status", | |
453 "description": "The error result of the operation in case of
failure or cancellation." | |
454 }, | |
455 "metadata": { | |
456 "additionalProperties": { | |
457 "description": "Properties of the object. Contains field
@type with type URL.", | |
458 "type": "any" | |
459 }, | |
460 "description": "Service-specific metadata associated with th
e operation. It typically\ncontains progress information and common metadata su
ch as create time.\nSome services might not provide such metadata. Any method t
hat returns a\nlong-running operation should document the metadata type, if any.
", | |
461 "type": "object" | |
462 }, | |
463 "done": { | |
464 "description": "If the value is `false`, it means the operat
ion is still in progress.\nIf true, the operation is completed, and either `erro
r` or `response` is\navailable.", | |
465 "type": "boolean" | |
466 } | |
467 }, | |
468 "type": "object" | |
469 }, | |
470 "RecognitionConfig": { | |
471 "description": "Provides information to the recognizer that specifie
s how to process the\nrequest.", | |
472 "id": "RecognitionConfig", | |
473 "properties": { | |
474 "sampleRateHertz": { | |
475 "description": "*Required* Sample rate in Hertz of the audio
data sent in all\n`RecognitionAudio` messages. Valid values are: 8000-48000.\n1
6000 is optimal. For best results, set the sampling rate of the audio\nsource to
16000 Hz. If that's not possible, use the native sample rate of\nthe audio sour
ce (instead of re-sampling).", | |
476 "format": "int32", | |
477 "type": "integer" | |
478 }, | |
479 "maxAlternatives": { | |
480 "description": "*Optional* Maximum number of recognition hyp
otheses to be returned.\nSpecifically, the maximum number of `SpeechRecognitionA
lternative` messages\nwithin each `SpeechRecognitionResult`.\nThe server may ret
urn fewer than `max_alternatives`.\nValid values are `0`-`30`. A value of `0` or
`1` will return a maximum of\none. If omitted, will return a maximum of one.", | |
481 "format": "int32", | |
482 "type": "integer" | |
483 }, | |
484 "languageCode": { | |
485 "description": "*Required* The language of the supplied audi
o as a\n[BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.\nEx
ample: \"en-US\".\nSee [Language Support](https://cloud.google.com/speech/docs/l
anguages)\nfor a list of the currently supported language codes.", | |
486 "type": "string" | |
487 }, | |
488 "speechContexts": { | |
489 "description": "*Optional* A means to provide context to ass
ist the speech recognition.", | |
490 "items": { | |
491 "$ref": "SpeechContext" | |
492 }, | |
493 "type": "array" | |
494 }, | |
495 "encoding": { | |
496 "description": "*Required* Encoding of audio data sent in al
l `RecognitionAudio` messages.", | |
497 "enum": [ | |
498 "ENCODING_UNSPECIFIED", | |
499 "LINEAR16", | |
500 "FLAC", | |
501 "MULAW", | |
502 "AMR", | |
503 "AMR_WB", | |
504 "OGG_OPUS", | |
505 "SPEEX_WITH_HEADER_BYTE" | |
506 ], | |
507 "enumDescriptions": [ | |
508 "Not specified. Will return result google.rpc.Code.INVAL
ID_ARGUMENT.", | |
509 "Uncompressed 16-bit signed little-endian samples (Linea
r PCM).", | |
510 "[`FLAC`](https://xiph.org/flac/documentation.html) (Fre
e Lossless Audio\nCodec) is the recommended encoding because it is\nlossless--th
erefore recognition is not compromised--and\nrequires only about half the bandwi
dth of `LINEAR16`. `FLAC` stream\nencoding supports 16-bit and 24-bit samples, h
owever, not all fields in\n`STREAMINFO` are supported.", | |
511 "8-bit samples that compand 14-bit audio samples using G
.711 PCMU/mu-law.", | |
512 "Adaptive Multi-Rate Narrowband codec. `sample_rate_hert
z` must be 8000.", | |
513 "Adaptive Multi-Rate Wideband codec. `sample_rate_hertz`
must be 16000.", | |
514 "Opus encoded audio frames in Ogg container\n([OggOpus](
https://wiki.xiph.org/OggOpus)).\n`sample_rate_hertz` must be 16000.", | |
515 "Although the use of lossy encodings is not recommended,
if a very low\nbitrate encoding is required, `OGG_OPUS` is highly preferred ove
r\nSpeex encoding. The [Speex](https://speex.org/) encoding supported by\nCloud
Speech API has a header byte in each block, as in MIME type\n`audio/x-speex-wit
h-header-byte`.\nIt is a variant of the RTP Speex encoding defined in\n[RFC 5574
](https://tools.ietf.org/html/rfc5574).\nThe stream is a sequence of blocks, one
block per RTP packet. Each block\nstarts with a byte containing the length of t
he block, in bytes, followed\nby one or more frames of Speex data, padded to an
integral number of\nbytes (octets) as specified in RFC 5574. In other words, eac
h RTP header\nis replaced with a single byte containing the block length. Only S
peex\nwideband is supported. `sample_rate_hertz` must be 16000." | |
516 ], | |
517 "type": "string" | |
518 }, | |
519 "profanityFilter": { | |
520 "description": "*Optional* If set to `true`, the server will
attempt to filter out\nprofanities, replacing all but the initial character in
each filtered word\nwith asterisks, e.g. \"f***\". If set to `false` or omitted,
profanities\nwon't be filtered out.", | |
521 "type": "boolean" | |
522 } | |
523 }, | |
524 "type": "object" | |
525 } | 557 } |
526 }, | 558 }, |
527 "servicePath": "", | 559 "servicePath": "", |
528 "title": "Google Cloud Speech API", | 560 "title": "Google Cloud Speech API", |
529 "version": "v1" | 561 "version": "v1" |
530 } | 562 } |
OLD | NEW |