Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(366)

Side by Side Diff: discovery/googleapis_beta/language__v1beta2.json

Issue 2987103002: Api-Roll 52: 2017-07-31 (Closed)
Patch Set: Created 3 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 { 1 {
2 "auth": { 2 "auth": {
3 "oauth2": { 3 "oauth2": {
4 "scopes": { 4 "scopes": {
5 "https://www.googleapis.com/auth/cloud-platform": { 5 "https://www.googleapis.com/auth/cloud-platform": {
6 "description": "View and manage your data across Google Clou d Platform services" 6 "description": "View and manage your data across Google Clou d Platform services"
7 },
8 "https://www.googleapis.com/auth/cloud-language": {
9 "description": "Apply machine learning models to reveal the structure and meaning of text"
7 } 10 }
8 } 11 }
9 } 12 }
10 }, 13 },
11 "basePath": "", 14 "basePath": "",
12 "baseUrl": "https://language.googleapis.com/", 15 "baseUrl": "https://language.googleapis.com/",
13 "batchPath": "batch", 16 "batchPath": "batch",
14 "canonicalName": "Cloud Natural Language", 17 "canonicalName": "Cloud Natural Language",
15 "description": "Provides natural language understanding technologies to deve lopers. Examples include sentiment analysis, entity recognition, entity sentimen t analysis, and text annotations.", 18 "description": "Provides natural language understanding technologies to deve lopers. Examples include sentiment analysis, entity recognition, entity sentimen t analysis, and text annotations.",
16 "discoveryVersion": "v1", 19 "discoveryVersion": "v1",
17 "documentationLink": "https://cloud.google.com/natural-language/", 20 "documentationLink": "https://cloud.google.com/natural-language/",
18 "icons": { 21 "icons": {
19 "x16": "http://www.google.com/images/icons/product/search-16.gif", 22 "x16": "http://www.google.com/images/icons/product/search-16.gif",
20 "x32": "http://www.google.com/images/icons/product/search-32.gif" 23 "x32": "http://www.google.com/images/icons/product/search-32.gif"
21 }, 24 },
22 "id": "language:v1beta2", 25 "id": "language:v1beta2",
23 "kind": "discovery#restDescription", 26 "kind": "discovery#restDescription",
24 "name": "language", 27 "name": "language",
25 "ownerDomain": "google.com", 28 "ownerDomain": "google.com",
26 "ownerName": "Google", 29 "ownerName": "Google",
27 "parameters": { 30 "parameters": {
31 "quotaUser": {
32 "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exc eed 40 characters.",
33 "location": "query",
34 "type": "string"
35 },
36 "pp": {
37 "default": "true",
38 "description": "Pretty-print response.",
39 "location": "query",
40 "type": "boolean"
41 },
28 "oauth_token": { 42 "oauth_token": {
29 "description": "OAuth 2.0 token for the current user.", 43 "description": "OAuth 2.0 token for the current user.",
30 "location": "query", 44 "location": "query",
31 "type": "string" 45 "type": "string"
32 }, 46 },
33 "bearer_token": { 47 "bearer_token": {
34 "description": "OAuth bearer token.", 48 "description": "OAuth bearer token.",
35 "location": "query", 49 "location": "query",
36 "type": "string" 50 "type": "string"
37 }, 51 },
(...skipping 11 matching lines...) Expand all
49 "uploadType": { 63 "uploadType": {
50 "description": "Legacy upload protocol for media (e.g. \"media\", \" multipart\").", 64 "description": "Legacy upload protocol for media (e.g. \"media\", \" multipart\").",
51 "location": "query", 65 "location": "query",
52 "type": "string" 66 "type": "string"
53 }, 67 },
54 "fields": { 68 "fields": {
55 "description": "Selector specifying which fields to include in a par tial response.", 69 "description": "Selector specifying which fields to include in a par tial response.",
56 "location": "query", 70 "location": "query",
57 "type": "string" 71 "type": "string"
58 }, 72 },
73 "callback": {
74 "description": "JSONP",
75 "location": "query",
76 "type": "string"
77 },
59 "$.xgafv": { 78 "$.xgafv": {
60 "description": "V1 error format.", 79 "description": "V1 error format.",
61 "enum": [ 80 "enum": [
62 "1", 81 "1",
63 "2" 82 "2"
64 ], 83 ],
65 "enumDescriptions": [ 84 "enumDescriptions": [
66 "v1 error format", 85 "v1 error format",
67 "v2 error format" 86 "v2 error format"
68 ], 87 ],
69 "location": "query", 88 "location": "query",
70 "type": "string" 89 "type": "string"
71 }, 90 },
72 "callback": {
73 "description": "JSONP",
74 "location": "query",
75 "type": "string"
76 },
77 "alt": { 91 "alt": {
78 "default": "json", 92 "default": "json",
79 "description": "Data format for response.", 93 "description": "Data format for response.",
80 "enum": [ 94 "enum": [
81 "json", 95 "json",
82 "media", 96 "media",
83 "proto" 97 "proto"
84 ], 98 ],
85 "enumDescriptions": [ 99 "enumDescriptions": [
86 "Responses with Content-Type of application/json", 100 "Responses with Content-Type of application/json",
87 "Media download with context-dependent Content-Type", 101 "Media download with context-dependent Content-Type",
88 "Responses with Content-Type of application/x-protobuf" 102 "Responses with Content-Type of application/x-protobuf"
89 ], 103 ],
90 "location": "query", 104 "location": "query",
91 "type": "string" 105 "type": "string"
92 }, 106 },
93 "access_token": { 107 "access_token": {
94 "description": "OAuth access token.", 108 "description": "OAuth access token.",
95 "location": "query", 109 "location": "query",
96 "type": "string" 110 "type": "string"
97 }, 111 },
98 "key": { 112 "key": {
99 "description": "API key. Your API key identifies your project and pr ovides you with API access, quota, and reports. Required unless you provide an O Auth 2.0 token.", 113 "description": "API key. Your API key identifies your project and pr ovides you with API access, quota, and reports. Required unless you provide an O Auth 2.0 token.",
100 "location": "query", 114 "location": "query",
101 "type": "string" 115 "type": "string"
102 },
103 "quotaUser": {
104 "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exc eed 40 characters.",
105 "location": "query",
106 "type": "string"
107 },
108 "pp": {
109 "default": "true",
110 "description": "Pretty-print response.",
111 "location": "query",
112 "type": "boolean"
113 } 116 }
114 }, 117 },
115 "protocol": "rest", 118 "protocol": "rest",
116 "resources": { 119 "resources": {
117 "documents": { 120 "documents": {
118 "methods": { 121 "methods": {
122 "annotateText": {
123 "description": "A convenience method that provides all synta x, sentiment, entity, and\nclassification features in one call.",
124 "httpMethod": "POST",
125 "id": "language.documents.annotateText",
126 "parameterOrder": [],
127 "parameters": {},
128 "path": "v1beta2/documents:annotateText",
129 "request": {
130 "$ref": "AnnotateTextRequest"
131 },
132 "response": {
133 "$ref": "AnnotateTextResponse"
134 },
135 "scopes": [
136 "https://www.googleapis.com/auth/cloud-language",
137 "https://www.googleapis.com/auth/cloud-platform"
138 ]
139 },
119 "analyzeEntitySentiment": { 140 "analyzeEntitySentiment": {
120 "description": "Finds entities, similar to AnalyzeEntities i n the text and analyzes\nsentiment associated with each entity and its mentions. ", 141 "description": "Finds entities, similar to AnalyzeEntities i n the text and analyzes\nsentiment associated with each entity and its mentions. ",
121 "httpMethod": "POST", 142 "httpMethod": "POST",
122 "id": "language.documents.analyzeEntitySentiment", 143 "id": "language.documents.analyzeEntitySentiment",
123 "parameterOrder": [], 144 "parameterOrder": [],
124 "parameters": {}, 145 "parameters": {},
125 "path": "v1beta2/documents:analyzeEntitySentiment", 146 "path": "v1beta2/documents:analyzeEntitySentiment",
126 "request": { 147 "request": {
127 "$ref": "AnalyzeEntitySentimentRequest" 148 "$ref": "AnalyzeEntitySentimentRequest"
128 }, 149 },
129 "response": { 150 "response": {
130 "$ref": "AnalyzeEntitySentimentResponse" 151 "$ref": "AnalyzeEntitySentimentResponse"
131 }, 152 },
132 "scopes": [ 153 "scopes": [
154 "https://www.googleapis.com/auth/cloud-language",
133 "https://www.googleapis.com/auth/cloud-platform" 155 "https://www.googleapis.com/auth/cloud-platform"
134 ] 156 ]
135 }, 157 },
136 "analyzeEntities": { 158 "analyzeEntities": {
137 "description": "Finds named entities (currently proper names and common nouns) in the text\nalong with entity types, salience, mentions for each entity, and\nother properties.", 159 "description": "Finds named entities (currently proper names and common nouns) in the text\nalong with entity types, salience, mentions for each entity, and\nother properties.",
138 "httpMethod": "POST", 160 "httpMethod": "POST",
139 "id": "language.documents.analyzeEntities", 161 "id": "language.documents.analyzeEntities",
140 "parameterOrder": [], 162 "parameterOrder": [],
141 "parameters": {}, 163 "parameters": {},
142 "path": "v1beta2/documents:analyzeEntities", 164 "path": "v1beta2/documents:analyzeEntities",
143 "request": { 165 "request": {
144 "$ref": "AnalyzeEntitiesRequest" 166 "$ref": "AnalyzeEntitiesRequest"
145 }, 167 },
146 "response": { 168 "response": {
147 "$ref": "AnalyzeEntitiesResponse" 169 "$ref": "AnalyzeEntitiesResponse"
148 }, 170 },
149 "scopes": [ 171 "scopes": [
172 "https://www.googleapis.com/auth/cloud-language",
150 "https://www.googleapis.com/auth/cloud-platform" 173 "https://www.googleapis.com/auth/cloud-platform"
151 ] 174 ]
152 }, 175 },
153 "analyzeSyntax": { 176 "analyzeSyntax": {
154 "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependenc y trees, and other\nproperties.", 177 "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependenc y trees, and other\nproperties.",
155 "httpMethod": "POST", 178 "httpMethod": "POST",
156 "id": "language.documents.analyzeSyntax", 179 "id": "language.documents.analyzeSyntax",
157 "parameterOrder": [], 180 "parameterOrder": [],
158 "parameters": {}, 181 "parameters": {},
159 "path": "v1beta2/documents:analyzeSyntax", 182 "path": "v1beta2/documents:analyzeSyntax",
160 "request": { 183 "request": {
161 "$ref": "AnalyzeSyntaxRequest" 184 "$ref": "AnalyzeSyntaxRequest"
162 }, 185 },
163 "response": { 186 "response": {
164 "$ref": "AnalyzeSyntaxResponse" 187 "$ref": "AnalyzeSyntaxResponse"
165 }, 188 },
166 "scopes": [ 189 "scopes": [
190 "https://www.googleapis.com/auth/cloud-language",
167 "https://www.googleapis.com/auth/cloud-platform" 191 "https://www.googleapis.com/auth/cloud-platform"
168 ] 192 ]
169 }, 193 },
170 "analyzeSentiment": { 194 "analyzeSentiment": {
171 "description": "Analyzes the sentiment of the provided text. ", 195 "description": "Analyzes the sentiment of the provided text. ",
172 "httpMethod": "POST", 196 "httpMethod": "POST",
173 "id": "language.documents.analyzeSentiment", 197 "id": "language.documents.analyzeSentiment",
174 "parameterOrder": [], 198 "parameterOrder": [],
175 "parameters": {}, 199 "parameters": {},
176 "path": "v1beta2/documents:analyzeSentiment", 200 "path": "v1beta2/documents:analyzeSentiment",
177 "request": { 201 "request": {
178 "$ref": "AnalyzeSentimentRequest" 202 "$ref": "AnalyzeSentimentRequest"
179 }, 203 },
180 "response": { 204 "response": {
181 "$ref": "AnalyzeSentimentResponse" 205 "$ref": "AnalyzeSentimentResponse"
182 }, 206 },
183 "scopes": [ 207 "scopes": [
184 "https://www.googleapis.com/auth/cloud-platform" 208 "https://www.googleapis.com/auth/cloud-language",
185 ]
186 },
187 "annotateText": {
188 "description": "A convenience method that provides all synta x, sentiment, entity, and\nclassification features in one call.",
189 "httpMethod": "POST",
190 "id": "language.documents.annotateText",
191 "parameterOrder": [],
192 "parameters": {},
193 "path": "v1beta2/documents:annotateText",
194 "request": {
195 "$ref": "AnnotateTextRequest"
196 },
197 "response": {
198 "$ref": "AnnotateTextResponse"
199 },
200 "scopes": [
201 "https://www.googleapis.com/auth/cloud-platform" 209 "https://www.googleapis.com/auth/cloud-platform"
202 ] 210 ]
203 } 211 }
204 } 212 }
205 } 213 }
206 }, 214 },
207 "revision": "20170619", 215 "revision": "20170726",
208 "rootUrl": "https://language.googleapis.com/", 216 "rootUrl": "https://language.googleapis.com/",
209 "schemas": { 217 "schemas": {
210 "Status": { 218 "AnalyzeSyntaxResponse": {
211 "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RP C APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is desig ned to be:\n\n- Simple to use and understand for most users\n- Flexible enough t o meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pie ces of data: error code, error message,\nand error details. The error code shoul d be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message tha t helps\ndevelopers *understand* and *resolve* the error. If a localized user-fa cing\nerror message is needed, put the localized message in the error details or \nlocalize it in the client. The optional error details may contain arbitrary\ni nformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Lang uage mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` mess age is\nexposed in different client libraries and different wire protocols, it c an be\nmapped differently. For example, it will likely be mapped to some excepti ons\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n \nThe error model and the `Status` message can be used in a variety of\nenvironm ents, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n - Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step m ay\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\ n- Asynchronous operations. If an API call embeds asynchronous operation\n re sults in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are store d in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.", 219 "description": "The syntax analysis response message.",
212 "id": "Status", 220 "id": "AnalyzeSyntaxResponse",
213 "properties": { 221 "properties": {
214 "details": { 222 "language": {
215 "description": "A list of messages that carry the error deta ils. There will be a\ncommon set of message types for APIs to use.", 223 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
224 "type": "string"
225 },
226 "sentences": {
227 "description": "Sentences in the input document.",
216 "items": { 228 "items": {
217 "additionalProperties": { 229 "$ref": "Sentence"
218 "description": "Properties of the object. Contains f ield @type with type URL.",
219 "type": "any"
220 },
221 "type": "object"
222 }, 230 },
223 "type": "array" 231 "type": "array"
224 }, 232 },
225 "code": { 233 "tokens": {
226 "description": "The status code, which should be an enum val ue of google.rpc.Code.", 234 "description": "Tokens, along with their syntactic informati on, in the input document.",
227 "format": "int32", 235 "items": {
228 "type": "integer" 236 "$ref": "Token"
229 }, 237 },
230 "message": { 238 "type": "array"
231 "description": "A developer-facing error message, which shou ld be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.",
232 "type": "string"
233 } 239 }
234 }, 240 },
235 "type": "object" 241 "type": "object"
236 }, 242 },
237 "EntityMention": { 243 "Entity": {
238 "description": "Represents a mention for an entity in the text. Curr ently, proper noun\nmentions are supported.", 244 "description": "Represents a phrase in the text that is a known enti ty, such as\na person, an organization, or location. The API associates informat ion, such\nas salience and mentions, with entities.",
239 "id": "EntityMention", 245 "id": "Entity",
240 "properties": { 246 "properties": {
241 "sentiment": { 247 "name": {
242 "$ref": "Sentiment", 248 "description": "The representative name for the entity.",
243 "description": "For calls to AnalyzeEntitySentiment or if\nA nnotateTextRequest.Features.extract_entity_sentiment is set to\ntrue, this field will contain the sentiment expressed for this mention of\nthe entity in the pro vided document." 249 "type": "string"
244 },
245 "text": {
246 "$ref": "TextSpan",
247 "description": "The mention text."
248 }, 250 },
249 "type": { 251 "type": {
250 "description": "The type of the entity mention.", 252 "description": "The entity type.",
251 "enum": [ 253 "enum": [
252 "TYPE_UNKNOWN", 254 "UNKNOWN",
253 "PROPER", 255 "PERSON",
254 "COMMON" 256 "LOCATION",
257 "ORGANIZATION",
258 "EVENT",
259 "WORK_OF_ART",
260 "CONSUMER_GOOD",
261 "OTHER"
255 ], 262 ],
256 "enumDescriptions": [ 263 "enumDescriptions": [
257 "Unknown", 264 "Unknown",
258 "Proper name", 265 "Person",
259 "Common noun (or noun compound)" 266 "Location",
267 "Organization",
268 "Event",
269 "Work of art",
270 "Consumer goods",
271 "Other types"
260 ], 272 ],
261 "type": "string" 273 "type": "string"
274 },
275 "metadata": {
276 "additionalProperties": {
277 "type": "string"
278 },
279 "description": "Metadata associated with the entity.\n\nCurr ently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.",
280 "type": "object"
281 },
282 "salience": {
283 "description": "The salience score associated with the entit y in the [0, 1.0] range.\n\nThe salience score for an entity provides informatio n about the\nimportance or centrality of that entity to the entire document text .\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\ns alient.",
284 "format": "float",
285 "type": "number"
286 },
287 "sentiment": {
288 "$ref": "Sentiment",
289 "description": "For calls to AnalyzeEntitySentiment or if\nA nnotateTextRequest.Features.extract_entity_sentiment is set to\ntrue, this field will contain the aggregate sentiment expressed for this\nentity in the provided document."
290 },
291 "mentions": {
292 "description": "The mentions of this entity in the input doc ument. The API currently\nsupports proper noun mentions.",
293 "items": {
294 "$ref": "EntityMention"
295 },
296 "type": "array"
262 } 297 }
263 }, 298 },
264 "type": "object" 299 "type": "object"
265 }, 300 },
266 "Features": { 301 "AnnotateTextRequest": {
267 "description": "All available features for sentiment, syntax, and se mantic analysis.\nSetting each one to true will enable that specific analysis fo r the input.", 302 "description": "The request message for the text annotation API, whi ch can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.",
268 "id": "Features", 303 "id": "AnnotateTextRequest",
269 "properties": {
270 "extractSyntax": {
271 "description": "Extract syntax information.",
272 "type": "boolean"
273 },
274 "extractDocumentSentiment": {
275 "description": "Extract document-level sentiment.",
276 "type": "boolean"
277 },
278 "extractEntitySentiment": {
279 "description": "Extract entities and their associated sentim ent.",
280 "type": "boolean"
281 },
282 "extractEntities": {
283 "description": "Extract entities.",
284 "type": "boolean"
285 }
286 },
287 "type": "object"
288 },
289 "Sentence": {
290 "description": "Represents a sentence in the input document.",
291 "id": "Sentence",
292 "properties": {
293 "text": {
294 "$ref": "TextSpan",
295 "description": "The sentence text."
296 },
297 "sentiment": {
298 "$ref": "Sentiment",
299 "description": "For calls to AnalyzeSentiment or if\nAnnotat eTextRequest.Features.extract_document_sentiment is set to\ntrue, this field wil l contain the sentiment for the sentence."
300 }
301 },
302 "type": "object"
303 },
304 "Document": {
305 "description": "#################################################### ############ #\n\nRepresents the input to API methods.",
306 "id": "Document",
307 "properties": {
308 "gcsContentUri": {
309 "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nN OTE: Cloud Storage object versioning is not supported.",
310 "type": "string"
311 },
312 "language": {
313 "description": "The language of the document (if not specifi ed, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.<br>\n[Language Support](/natural-language/docs/languages)\nlists currently supported languages for each API method.\nIf the language (either spe cified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.",
314 "type": "string"
315 },
316 "type": {
317 "description": "Required. If the type is not set or is `TYPE _UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.",
318 "enum": [
319 "TYPE_UNSPECIFIED",
320 "PLAIN_TEXT",
321 "HTML"
322 ],
323 "enumDescriptions": [
324 "The content type is not specified.",
325 "Plain text",
326 "HTML"
327 ],
328 "type": "string"
329 },
330 "content": {
331 "description": "The content of the input in string format.",
332 "type": "string"
333 }
334 },
335 "type": "object"
336 },
337 "Sentiment": {
338 "description": "Represents the feeling associated with the entire te xt or entities in\nthe text.",
339 "id": "Sentiment",
340 "properties": {
341 "score": {
342 "description": "Sentiment score between -1.0 (negative senti ment) and 1.0\n(positive sentiment).",
343 "format": "float",
344 "type": "number"
345 },
346 "magnitude": {
347 "description": "A non-negative number in the [0, +inf) range , which represents\nthe absolute magnitude of sentiment regardless of score (pos itive or\nnegative).",
348 "format": "float",
349 "type": "number"
350 }
351 },
352 "type": "object"
353 },
354 "AnalyzeEntitiesRequest": {
355 "description": "The entity analysis request message.",
356 "id": "AnalyzeEntitiesRequest",
357 "properties": { 304 "properties": {
358 "encodingType": { 305 "encodingType": {
359 "description": "The encoding type used by the API to calcula te offsets.", 306 "description": "The encoding type used by the API to calcula te offsets.",
360 "enum": [ 307 "enum": [
361 "NONE", 308 "NONE",
362 "UTF8", 309 "UTF8",
363 "UTF16", 310 "UTF16",
364 "UTF32" 311 "UTF32"
365 ], 312 ],
366 "enumDescriptions": [ 313 "enumDescriptions": [
367 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", 314 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
368 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.", 315 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
369 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.", 316 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
370 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." 317 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
371 ], 318 ],
372 "type": "string" 319 "type": "string"
373 }, 320 },
374 "document": { 321 "document": {
375 "$ref": "Document", 322 "$ref": "Document",
376 "description": "Input document." 323 "description": "Input document."
324 },
325 "features": {
326 "$ref": "Features",
327 "description": "The enabled features."
377 } 328 }
378 }, 329 },
379 "type": "object" 330 "type": "object"
380 }, 331 },
381 "AnalyzeEntitySentimentResponse": { 332 "AnnotateTextResponse": {
382 "description": "The entity-level sentiment analysis response message .", 333 "description": "The text annotations response message.",
383 "id": "AnalyzeEntitySentimentResponse", 334 "id": "AnnotateTextResponse",
384 "properties": { 335 "properties": {
336 "documentSentiment": {
337 "$ref": "Sentiment",
338 "description": "The overall sentiment for the document. Popu lated if the user enables\nAnnotateTextRequest.Features.extract_document_sentime nt."
339 },
385 "language": { 340 "language": {
386 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.", 341 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
387 "type": "string" 342 "type": "string"
388 }, 343 },
344 "sentences": {
345 "description": "Sentences in the input document. Populated i f the user enables\nAnnotateTextRequest.Features.extract_syntax.",
346 "items": {
347 "$ref": "Sentence"
348 },
349 "type": "array"
350 },
351 "tokens": {
352 "description": "Tokens, along with their syntactic informati on, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.F eatures.extract_syntax.",
353 "items": {
354 "$ref": "Token"
355 },
356 "type": "array"
357 },
389 "entities": { 358 "entities": {
390 "description": "The recognized entities in the input documen t with associated sentiments.", 359 "description": "Entities, along with their semantic informat ion, in the input document.\nPopulated if the user enables\nAnnotateTextRequest. Features.extract_entities.",
391 "items": { 360 "items": {
392 "$ref": "Entity" 361 "$ref": "Entity"
393 }, 362 },
394 "type": "array" 363 "type": "array"
395 } 364 }
396 }, 365 },
397 "type": "object" 366 "type": "object"
398 }, 367 },
399 "AnalyzeEntitySentimentRequest": { 368 "AnalyzeSentimentRequest": {
400 "description": "The entity-level sentiment analysis request message. ", 369 "description": "The sentiment analysis request message.",
401 "id": "AnalyzeEntitySentimentRequest", 370 "id": "AnalyzeSentimentRequest",
402 "properties": { 371 "properties": {
403 "encodingType": { 372 "encodingType": {
404 "description": "The encoding type used by the API to calcula te offsets.", 373 "description": "The encoding type used by the API to calcula te sentence offsets for the\nsentence sentiment.",
405 "enum": [ 374 "enum": [
406 "NONE", 375 "NONE",
407 "UTF8", 376 "UTF8",
408 "UTF16", 377 "UTF16",
409 "UTF32" 378 "UTF32"
410 ], 379 ],
411 "enumDescriptions": [ 380 "enumDescriptions": [
412 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", 381 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
413 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.", 382 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
414 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.", 383 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
415 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." 384 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
416 ], 385 ],
417 "type": "string" 386 "type": "string"
418 }, 387 },
419 "document": { 388 "document": {
420 "$ref": "Document", 389 "$ref": "Document",
421 "description": "Input document." 390 "description": "Input document."
422 } 391 }
423 }, 392 },
424 "type": "object" 393 "type": "object"
425 }, 394 },
395 "DependencyEdge": {
396 "description": "Represents dependency parse tree information for a t oken.",
397 "id": "DependencyEdge",
398 "properties": {
399 "label": {
400 "description": "The parse label for the token.",
401 "enum": [
402 "UNKNOWN",
403 "ABBREV",
404 "ACOMP",
405 "ADVCL",
406 "ADVMOD",
407 "AMOD",
408 "APPOS",
409 "ATTR",
410 "AUX",
411 "AUXPASS",
412 "CC",
413 "CCOMP",
414 "CONJ",
415 "CSUBJ",
416 "CSUBJPASS",
417 "DEP",
418 "DET",
419 "DISCOURSE",
420 "DOBJ",
421 "EXPL",
422 "GOESWITH",
423 "IOBJ",
424 "MARK",
425 "MWE",
426 "MWV",
427 "NEG",
428 "NN",
429 "NPADVMOD",
430 "NSUBJ",
431 "NSUBJPASS",
432 "NUM",
433 "NUMBER",
434 "P",
435 "PARATAXIS",
436 "PARTMOD",
437 "PCOMP",
438 "POBJ",
439 "POSS",
440 "POSTNEG",
441 "PRECOMP",
442 "PRECONJ",
443 "PREDET",
444 "PREF",
445 "PREP",
446 "PRONL",
447 "PRT",
448 "PS",
449 "QUANTMOD",
450 "RCMOD",
451 "RCMODREL",
452 "RDROP",
453 "REF",
454 "REMNANT",
455 "REPARANDUM",
456 "ROOT",
457 "SNUM",
458 "SUFF",
459 "TMOD",
460 "TOPIC",
461 "VMOD",
462 "VOCATIVE",
463 "XCOMP",
464 "SUFFIX",
465 "TITLE",
466 "ADVPHMOD",
467 "AUXCAUS",
468 "AUXVV",
469 "DTMOD",
470 "FOREIGN",
471 "KW",
472 "LIST",
473 "NOMC",
474 "NOMCSUBJ",
475 "NOMCSUBJPASS",
476 "NUMC",
477 "COP",
478 "DISLOCATED"
479 ],
480 "enumDescriptions": [
481 "Unknown",
482 "Abbreviation modifier",
483 "Adjectival complement",
484 "Adverbial clause modifier",
485 "Adverbial modifier",
486 "Adjectival modifier of an NP",
487 "Appositional modifier of an NP",
488 "Attribute dependent of a copular verb",
489 "Auxiliary (non-main) verb",
490 "Passive auxiliary",
491 "Coordinating conjunction",
492 "Clausal complement of a verb or adjective",
493 "Conjunct",
494 "Clausal subject",
495 "Clausal passive subject",
496 "Dependency (unable to determine)",
497 "Determiner",
498 "Discourse",
499 "Direct object",
500 "Expletive",
501 "Goes with (part of a word in a text not well edited)",
502 "Indirect object",
503 "Marker (word introducing a subordinate clause)",
504 "Multi-word expression",
505 "Multi-word verbal expression",
506 "Negation modifier",
507 "Noun compound modifier",
508 "Noun phrase used as an adverbial modifier",
509 "Nominal subject",
510 "Passive nominal subject",
511 "Numeric modifier of a noun",
512 "Element of compound number",
513 "Punctuation mark",
514 "Parataxis relation",
515 "Participial modifier",
516 "The complement of a preposition is a clause",
517 "Object of a preposition",
518 "Possession modifier",
519 "Postverbal negative particle",
520 "Predicate complement",
521 "Preconjunt",
522 "Predeterminer",
523 "Prefix",
524 "Prepositional modifier",
525 "The relationship between a verb and verbal morpheme",
526 "Particle",
527 "Associative or possessive marker",
528 "Quantifier phrase modifier",
529 "Relative clause modifier",
530 "Complementizer in relative clause",
531 "Ellipsis without a preceding predicate",
532 "Referent",
533 "Remnant",
534 "Reparandum",
535 "Root",
536 "Suffix specifying a unit of number",
537 "Suffix",
538 "Temporal modifier",
539 "Topic marker",
540 "Clause headed by an infinite form of the verb that modi fies a noun",
541 "Vocative",
542 "Open clausal complement",
543 "Name suffix",
544 "Name title",
545 "Adverbial phrase modifier",
546 "Causative auxiliary",
547 "Helper auxiliary",
548 "Rentaishi (Prenominal modifier)",
549 "Foreign words",
550 "Keyword",
551 "List for chains of comparable items",
552 "Nominalized clause",
553 "Nominalized clausal subject",
554 "Nominalized clausal passive",
555 "Compound of numeric modifier",
556 "Copula",
557 "Dislocated relation (for fronted/topicalized elements)"
558 ],
559 "type": "string"
560 },
561 "headTokenIndex": {
562 "description": "Represents the head of this token in the dep endency tree.\nThis is the index of the token which has an arc going to this tok en.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is i ts own index.",
563 "format": "int32",
564 "type": "integer"
565 }
566 },
567 "type": "object"
568 },
569 "Token": {
570 "description": "Represents the smallest syntactic building block of the text.",
571 "id": "Token",
572 "properties": {
573 "partOfSpeech": {
574 "$ref": "PartOfSpeech",
575 "description": "Parts of speech tag for this token."
576 },
577 "dependencyEdge": {
578 "$ref": "DependencyEdge",
579 "description": "Dependency tree parse for this token."
580 },
581 "text": {
582 "$ref": "TextSpan",
583 "description": "The token text."
584 },
585 "lemma": {
586 "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_ %28morphology%29) of the token.",
587 "type": "string"
588 }
589 },
590 "type": "object"
591 },
592 "TextSpan": {
593 "description": "Represents an output piece of text.",
594 "id": "TextSpan",
595 "properties": {
596 "beginOffset": {
597 "description": "The API calculates the beginning offset of t he content in the original\ndocument according to the EncodingType specified in the API request.",
598 "format": "int32",
599 "type": "integer"
600 },
601 "content": {
602 "description": "The content of the output text.",
603 "type": "string"
604 }
605 },
606 "type": "object"
607 },
608 "Status": {
609 "description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RP C APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is desig ned to be:\n\n- Simple to use and understand for most users\n- Flexible enough t o meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pie ces of data: error code, error message,\nand error details. The error code shoul d be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message tha t helps\ndevelopers *understand* and *resolve* the error. If a localized user-fa cing\nerror message is needed, put the localized message in the error details or \nlocalize it in the client. The optional error details may contain arbitrary\ni nformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` that can be used for common error conditions.\n\n# Lang uage mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` mess age is\nexposed in different client libraries and different wire protocols, it c an be\nmapped differently. For example, it will likely be mapped to some excepti ons\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n \nThe error model and the `Status` message can be used in a variety of\nenvironm ents, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n - Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step m ay\n have a `Status` message for error reporting.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\ n- Asynchronous operations. If an API call embeds asynchronous operation\n re sults in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are store d in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.",
610 "id": "Status",
611 "properties": {
612 "details": {
613 "description": "A list of messages that carry the error deta ils. There is a common set of\nmessage types for APIs to use.",
614 "items": {
615 "additionalProperties": {
616 "description": "Properties of the object. Contains f ield @type with type URL.",
617 "type": "any"
618 },
619 "type": "object"
620 },
621 "type": "array"
622 },
623 "code": {
624 "description": "The status code, which should be an enum val ue of google.rpc.Code.",
625 "format": "int32",
626 "type": "integer"
627 },
628 "message": {
629 "description": "A developer-facing error message, which shou ld be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.",
630 "type": "string"
631 }
632 },
633 "type": "object"
634 },
635 "Features": {
636 "description": "All available features for sentiment, syntax, and se mantic analysis.\nSetting each one to true will enable that specific analysis fo r the input.",
637 "id": "Features",
638 "properties": {
639 "extractEntities": {
640 "description": "Extract entities.",
641 "type": "boolean"
642 },
643 "extractEntitySentiment": {
644 "description": "Extract entities and their associated sentim ent.",
645 "type": "boolean"
646 },
647 "extractDocumentSentiment": {
648 "description": "Extract document-level sentiment.",
649 "type": "boolean"
650 },
651 "extractSyntax": {
652 "description": "Extract syntax information.",
653 "type": "boolean"
654 }
655 },
656 "type": "object"
657 },
658 "EntityMention": {
659 "description": "Represents a mention for an entity in the text. Curr ently, proper noun\nmentions are supported.",
660 "id": "EntityMention",
661 "properties": {
662 "type": {
663 "description": "The type of the entity mention.",
664 "enum": [
665 "TYPE_UNKNOWN",
666 "PROPER",
667 "COMMON"
668 ],
669 "enumDescriptions": [
670 "Unknown",
671 "Proper name",
672 "Common noun (or noun compound)"
673 ],
674 "type": "string"
675 },
676 "text": {
677 "$ref": "TextSpan",
678 "description": "The mention text."
679 },
680 "sentiment": {
681 "$ref": "Sentiment",
682 "description": "For calls to AnalyzeEntitySentiment or if\nA nnotateTextRequest.Features.extract_entity_sentiment is set to\ntrue, this field will contain the sentiment expressed for this mention of\nthe entity in the pro vided document."
683 }
684 },
685 "type": "object"
686 },
687 "Sentence": {
688 "description": "Represents a sentence in the input document.",
689 "id": "Sentence",
690 "properties": {
691 "text": {
692 "$ref": "TextSpan",
693 "description": "The sentence text."
694 },
695 "sentiment": {
696 "$ref": "Sentiment",
697 "description": "For calls to AnalyzeSentiment or if\nAnnotat eTextRequest.Features.extract_document_sentiment is set to\ntrue, this field wil l contain the sentiment for the sentence."
698 }
699 },
700 "type": "object"
701 },
702 "Document": {
703 "description": "#################################################### ############ #\n\nRepresents the input to API methods.",
704 "id": "Document",
705 "properties": {
706 "language": {
707 "description": "The language of the document (if not specifi ed, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.<br>\n[Language Support](/natural-language/docs/languages)\nlists currently supported languages for each API method.\nIf the language (either spe cified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.",
708 "type": "string"
709 },
710 "content": {
711 "description": "The content of the input in string format.",
712 "type": "string"
713 },
714 "type": {
715 "description": "Required. If the type is not set or is `TYPE _UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.",
716 "enum": [
717 "TYPE_UNSPECIFIED",
718 "PLAIN_TEXT",
719 "HTML"
720 ],
721 "enumDescriptions": [
722 "The content type is not specified.",
723 "Plain text",
724 "HTML"
725 ],
726 "type": "string"
727 },
728 "gcsContentUri": {
729 "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nN OTE: Cloud Storage object versioning is not supported.",
730 "type": "string"
731 }
732 },
733 "type": "object"
734 },
735 "Sentiment": {
736 "description": "Represents the feeling associated with the entire te xt or entities in\nthe text.",
737 "id": "Sentiment",
738 "properties": {
739 "score": {
740 "description": "Sentiment score between -1.0 (negative senti ment) and 1.0\n(positive sentiment).",
741 "format": "float",
742 "type": "number"
743 },
744 "magnitude": {
745 "description": "A non-negative number in the [0, +inf) range , which represents\nthe absolute magnitude of sentiment regardless of score (pos itive or\nnegative).",
746 "format": "float",
747 "type": "number"
748 }
749 },
750 "type": "object"
751 },
752 "AnalyzeEntitiesRequest": {
753 "description": "The entity analysis request message.",
754 "id": "AnalyzeEntitiesRequest",
755 "properties": {
756 "encodingType": {
757 "description": "The encoding type used by the API to calcula te offsets.",
758 "enum": [
759 "NONE",
760 "UTF8",
761 "UTF16",
762 "UTF32"
763 ],
764 "enumDescriptions": [
765 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
766 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
767 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
768 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
769 ],
770 "type": "string"
771 },
772 "document": {
773 "$ref": "Document",
774 "description": "Input document."
775 }
776 },
777 "type": "object"
778 },
779 "AnalyzeEntitySentimentResponse": {
780 "description": "The entity-level sentiment analysis response message .",
781 "id": "AnalyzeEntitySentimentResponse",
782 "properties": {
783 "language": {
784 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
785 "type": "string"
786 },
787 "entities": {
788 "description": "The recognized entities in the input documen t with associated sentiments.",
789 "items": {
790 "$ref": "Entity"
791 },
792 "type": "array"
793 }
794 },
795 "type": "object"
796 },
797 "AnalyzeEntitySentimentRequest": {
798 "description": "The entity-level sentiment analysis request message. ",
799 "id": "AnalyzeEntitySentimentRequest",
800 "properties": {
801 "encodingType": {
802 "description": "The encoding type used by the API to calcula te offsets.",
803 "enum": [
804 "NONE",
805 "UTF8",
806 "UTF16",
807 "UTF32"
808 ],
809 "enumDescriptions": [
810 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
811 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
812 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
813 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
814 ],
815 "type": "string"
816 },
817 "document": {
818 "$ref": "Document",
819 "description": "Input document."
820 }
821 },
822 "type": "object"
823 },
426 "PartOfSpeech": { 824 "PartOfSpeech": {
427 "description": "Represents part of speech information for a token.", 825 "description": "Represents part of speech information for a token.",
428 "id": "PartOfSpeech", 826 "id": "PartOfSpeech",
429 "properties": { 827 "properties": {
430 "mood": {
431 "description": "The grammatical mood.",
432 "enum": [
433 "MOOD_UNKNOWN",
434 "CONDITIONAL_MOOD",
435 "IMPERATIVE",
436 "INDICATIVE",
437 "INTERROGATIVE",
438 "JUSSIVE",
439 "SUBJUNCTIVE"
440 ],
441 "enumDescriptions": [
442 "Mood is not applicable in the analyzed language or is n ot predicted.",
443 "Conditional",
444 "Imperative",
445 "Indicative",
446 "Interrogative",
447 "Jussive",
448 "Subjunctive"
449 ],
450 "type": "string"
451 },
452 "tag": {
453 "description": "The part of speech tag.",
454 "enum": [
455 "UNKNOWN",
456 "ADJ",
457 "ADP",
458 "ADV",
459 "CONJ",
460 "DET",
461 "NOUN",
462 "NUM",
463 "PRON",
464 "PRT",
465 "PUNCT",
466 "VERB",
467 "X",
468 "AFFIX"
469 ],
470 "enumDescriptions": [
471 "Unknown",
472 "Adjective",
473 "Adposition (preposition and postposition)",
474 "Adverb",
475 "Conjunction",
476 "Determiner",
477 "Noun (common and proper)",
478 "Cardinal number",
479 "Pronoun",
480 "Particle or other function word",
481 "Punctuation",
482 "Verb (all tenses and modes)",
483 "Other: foreign words, typos, abbreviations",
484 "Affix"
485 ],
486 "type": "string"
487 },
488 "gender": {
489 "description": "The grammatical gender.",
490 "enum": [
491 "GENDER_UNKNOWN",
492 "FEMININE",
493 "MASCULINE",
494 "NEUTER"
495 ],
496 "enumDescriptions": [
497 "Gender is not applicable in the analyzed language or is not predicted.",
498 "Feminine",
499 "Masculine",
500 "Neuter"
501 ],
502 "type": "string"
503 },
504 "person": { 828 "person": {
505 "description": "The grammatical person.", 829 "description": "The grammatical person.",
506 "enum": [ 830 "enum": [
507 "PERSON_UNKNOWN", 831 "PERSON_UNKNOWN",
508 "FIRST", 832 "FIRST",
509 "SECOND", 833 "SECOND",
510 "THIRD", 834 "THIRD",
511 "REFLEXIVE_PERSON" 835 "REFLEXIVE_PERSON"
512 ], 836 ],
513 "enumDescriptions": [ 837 "enumDescriptions": [
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
679 "IMPERFECTIVE", 1003 "IMPERFECTIVE",
680 "PROGRESSIVE" 1004 "PROGRESSIVE"
681 ], 1005 ],
682 "enumDescriptions": [ 1006 "enumDescriptions": [
683 "Aspect is not applicable in the analyzed language or is not predicted.", 1007 "Aspect is not applicable in the analyzed language or is not predicted.",
684 "Perfective", 1008 "Perfective",
685 "Imperfective", 1009 "Imperfective",
686 "Progressive" 1010 "Progressive"
687 ], 1011 ],
688 "type": "string" 1012 "type": "string"
1013 },
1014 "mood": {
1015 "description": "The grammatical mood.",
1016 "enum": [
1017 "MOOD_UNKNOWN",
1018 "CONDITIONAL_MOOD",
1019 "IMPERATIVE",
1020 "INDICATIVE",
1021 "INTERROGATIVE",
1022 "JUSSIVE",
1023 "SUBJUNCTIVE"
1024 ],
1025 "enumDescriptions": [
1026 "Mood is not applicable in the analyzed language or is n ot predicted.",
1027 "Conditional",
1028 "Imperative",
1029 "Indicative",
1030 "Interrogative",
1031 "Jussive",
1032 "Subjunctive"
1033 ],
1034 "type": "string"
1035 },
1036 "tag": {
1037 "description": "The part of speech tag.",
1038 "enum": [
1039 "UNKNOWN",
1040 "ADJ",
1041 "ADP",
1042 "ADV",
1043 "CONJ",
1044 "DET",
1045 "NOUN",
1046 "NUM",
1047 "PRON",
1048 "PRT",
1049 "PUNCT",
1050 "VERB",
1051 "X",
1052 "AFFIX"
1053 ],
1054 "enumDescriptions": [
1055 "Unknown",
1056 "Adjective",
1057 "Adposition (preposition and postposition)",
1058 "Adverb",
1059 "Conjunction",
1060 "Determiner",
1061 "Noun (common and proper)",
1062 "Cardinal number",
1063 "Pronoun",
1064 "Particle or other function word",
1065 "Punctuation",
1066 "Verb (all tenses and modes)",
1067 "Other: foreign words, typos, abbreviations",
1068 "Affix"
1069 ],
1070 "type": "string"
1071 },
1072 "gender": {
1073 "description": "The grammatical gender.",
1074 "enum": [
1075 "GENDER_UNKNOWN",
1076 "FEMININE",
1077 "MASCULINE",
1078 "NEUTER"
1079 ],
1080 "enumDescriptions": [
1081 "Gender is not applicable in the analyzed language or is not predicted.",
1082 "Feminine",
1083 "Masculine",
1084 "Neuter"
1085 ],
1086 "type": "string"
689 } 1087 }
690 }, 1088 },
691 "type": "object" 1089 "type": "object"
692 }, 1090 },
693 "AnalyzeSyntaxRequest": { 1091 "AnalyzeSyntaxRequest": {
694 "description": "The syntax analysis request message.", 1092 "description": "The syntax analysis request message.",
695 "id": "AnalyzeSyntaxRequest", 1093 "id": "AnalyzeSyntaxRequest",
696 "properties": { 1094 "properties": {
697 "encodingType": { 1095 "encodingType": {
698 "description": "The encoding type used by the API to calcula te offsets.", 1096 "description": "The encoding type used by the API to calcula te offsets.",
699 "enum": [ 1097 "enum": [
700 "NONE", 1098 "NONE",
701 "UTF8", 1099 "UTF8",
702 "UTF16",
703 "UTF32"
704 ],
705 "enumDescriptions": [
706 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
707 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
708 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
709 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
710 ],
711 "type": "string"
712 },
713 "document": {
714 "$ref": "Document",
715 "description": "Input document."
716 }
717 },
718 "type": "object"
719 },
720 "AnalyzeSentimentResponse": {
721 "description": "The sentiment analysis response message.",
722 "id": "AnalyzeSentimentResponse",
723 "properties": {
724 "documentSentiment": {
725 "$ref": "Sentiment",
726 "description": "The overall sentiment of the input document. "
727 },
728 "language": {
729 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
730 "type": "string"
731 },
732 "sentences": {
733 "description": "The sentiment for all the sentences in the d ocument.",
734 "items": {
735 "$ref": "Sentence"
736 },
737 "type": "array"
738 }
739 },
740 "type": "object"
741 },
742 "AnalyzeEntitiesResponse": {
743 "description": "The entity analysis response message.",
744 "id": "AnalyzeEntitiesResponse",
745 "properties": {
746 "language": {
747 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
748 "type": "string"
749 },
750 "entities": {
751 "description": "The recognized entities in the input documen t.",
752 "items": {
753 "$ref": "Entity"
754 },
755 "type": "array"
756 }
757 },
758 "type": "object"
759 },
760 "Entity": {
761 "description": "Represents a phrase in the text that is a known enti ty, such as\na person, an organization, or location. The API associates informat ion, such\nas salience and mentions, with entities.",
762 "id": "Entity",
763 "properties": {
764 "sentiment": {
765 "$ref": "Sentiment",
766 "description": "For calls to AnalyzeEntitySentiment or if\nA nnotateTextRequest.Features.extract_entity_sentiment is set to\ntrue, this field will contain the aggregate sentiment expressed for this\nentity in the provided document."
767 },
768 "mentions": {
769 "description": "The mentions of this entity in the input doc ument. The API currently\nsupports proper noun mentions.",
770 "items": {
771 "$ref": "EntityMention"
772 },
773 "type": "array"
774 },
775 "name": {
776 "description": "The representative name for the entity.",
777 "type": "string"
778 },
779 "salience": {
780 "description": "The salience score associated with the entit y in the [0, 1.0] range.\n\nThe salience score for an entity provides informatio n about the\nimportance or centrality of that entity to the entire document text .\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\ns alient.",
781 "format": "float",
782 "type": "number"
783 },
784 "type": {
785 "description": "The entity type.",
786 "enum": [
787 "UNKNOWN",
788 "PERSON",
789 "LOCATION",
790 "ORGANIZATION",
791 "EVENT",
792 "WORK_OF_ART",
793 "CONSUMER_GOOD",
794 "OTHER"
795 ],
796 "enumDescriptions": [
797 "Unknown",
798 "Person",
799 "Location",
800 "Organization",
801 "Event",
802 "Work of art",
803 "Consumer goods",
804 "Other types"
805 ],
806 "type": "string"
807 },
808 "metadata": {
809 "additionalProperties": {
810 "type": "string"
811 },
812 "description": "Metadata associated with the entity.\n\nCurr ently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.",
813 "type": "object"
814 }
815 },
816 "type": "object"
817 },
818 "AnalyzeSyntaxResponse": {
819 "description": "The syntax analysis response message.",
820 "id": "AnalyzeSyntaxResponse",
821 "properties": {
822 "language": {
823 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
824 "type": "string"
825 },
826 "sentences": {
827 "description": "Sentences in the input document.",
828 "items": {
829 "$ref": "Sentence"
830 },
831 "type": "array"
832 },
833 "tokens": {
834 "description": "Tokens, along with their syntactic informati on, in the input document.",
835 "items": {
836 "$ref": "Token"
837 },
838 "type": "array"
839 }
840 },
841 "type": "object"
842 },
843 "AnnotateTextRequest": {
844 "description": "The request message for the text annotation API, whi ch can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.",
845 "id": "AnnotateTextRequest",
846 "properties": {
847 "encodingType": {
848 "description": "The encoding type used by the API to calcula te offsets.",
849 "enum": [
850 "NONE",
851 "UTF8",
852 "UTF16",
853 "UTF32"
854 ],
855 "enumDescriptions": [
856 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
857 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
858 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
859 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
860 ],
861 "type": "string"
862 },
863 "document": {
864 "$ref": "Document",
865 "description": "Input document."
866 },
867 "features": {
868 "$ref": "Features",
869 "description": "The enabled features."
870 }
871 },
872 "type": "object"
873 },
874 "AnalyzeSentimentRequest": {
875 "description": "The sentiment analysis request message.",
876 "id": "AnalyzeSentimentRequest",
877 "properties": {
878 "encodingType": {
879 "description": "The encoding type used by the API to calcula te sentence offsets for the\nsentence sentiment.",
880 "enum": [
881 "NONE",
882 "UTF8",
883 "UTF16", 1100 "UTF16",
884 "UTF32" 1101 "UTF32"
885 ], 1102 ],
886 "enumDescriptions": [ 1103 "enumDescriptions": [
887 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.", 1104 "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
888 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.", 1105 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are example s of languages\nthat use this encoding natively.",
889 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.", 1106 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript a re examples of\nlanguages that use this encoding natively.",
890 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively." 1107 "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
891 ], 1108 ],
892 "type": "string" 1109 "type": "string"
893 }, 1110 },
894 "document": { 1111 "document": {
895 "$ref": "Document", 1112 "$ref": "Document",
896 "description": "Input document." 1113 "description": "Input document."
897 } 1114 }
898 }, 1115 },
899 "type": "object" 1116 "type": "object"
900 }, 1117 },
901 "AnnotateTextResponse": { 1118 "AnalyzeSentimentResponse": {
902 "description": "The text annotations response message.", 1119 "description": "The sentiment analysis response message.",
903 "id": "AnnotateTextResponse", 1120 "id": "AnalyzeSentimentResponse",
904 "properties": { 1121 "properties": {
905 "entities": { 1122 "sentences": {
906 "description": "Entities, along with their semantic informat ion, in the input document.\nPopulated if the user enables\nAnnotateTextRequest. Features.extract_entities.", 1123 "description": "The sentiment for all the sentences in the d ocument.",
907 "items": { 1124 "items": {
908 "$ref": "Entity" 1125 "$ref": "Sentence"
909 }, 1126 },
910 "type": "array" 1127 "type": "array"
911 }, 1128 },
912 "documentSentiment": { 1129 "documentSentiment": {
913 "$ref": "Sentiment", 1130 "$ref": "Sentiment",
914 "description": "The overall sentiment for the document. Popu lated if the user enables\nAnnotateTextRequest.Features.extract_document_sentime nt." 1131 "description": "The overall sentiment of the input document. "
915 }, 1132 },
916 "language": { 1133 "language": {
917 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.", 1134 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
918 "type": "string" 1135 "type": "string"
1136 }
1137 },
1138 "type": "object"
1139 },
1140 "AnalyzeEntitiesResponse": {
1141 "description": "The entity analysis response message.",
1142 "id": "AnalyzeEntitiesResponse",
1143 "properties": {
1144 "language": {
1145 "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automat ically-detected language.\nSee Document.language field for more details.",
1146 "type": "string"
919 }, 1147 },
920 "sentences": { 1148 "entities": {
921 "description": "Sentences in the input document. Populated i f the user enables\nAnnotateTextRequest.Features.extract_syntax.", 1149 "description": "The recognized entities in the input documen t.",
922 "items": { 1150 "items": {
923 "$ref": "Sentence" 1151 "$ref": "Entity"
924 },
925 "type": "array"
926 },
927 "tokens": {
928 "description": "Tokens, along with their syntactic informati on, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.F eatures.extract_syntax.",
929 "items": {
930 "$ref": "Token"
931 }, 1152 },
932 "type": "array" 1153 "type": "array"
933 } 1154 }
934 }, 1155 },
935 "type": "object" 1156 "type": "object"
936 },
937 "DependencyEdge": {
938 "description": "Represents dependency parse tree information for a t oken.",
939 "id": "DependencyEdge",
940 "properties": {
941 "headTokenIndex": {
942 "description": "Represents the head of this token in the dep endency tree.\nThis is the index of the token which has an arc going to this tok en.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is i ts own index.",
943 "format": "int32",
944 "type": "integer"
945 },
946 "label": {
947 "description": "The parse label for the token.",
948 "enum": [
949 "UNKNOWN",
950 "ABBREV",
951 "ACOMP",
952 "ADVCL",
953 "ADVMOD",
954 "AMOD",
955 "APPOS",
956 "ATTR",
957 "AUX",
958 "AUXPASS",
959 "CC",
960 "CCOMP",
961 "CONJ",
962 "CSUBJ",
963 "CSUBJPASS",
964 "DEP",
965 "DET",
966 "DISCOURSE",
967 "DOBJ",
968 "EXPL",
969 "GOESWITH",
970 "IOBJ",
971 "MARK",
972 "MWE",
973 "MWV",
974 "NEG",
975 "NN",
976 "NPADVMOD",
977 "NSUBJ",
978 "NSUBJPASS",
979 "NUM",
980 "NUMBER",
981 "P",
982 "PARATAXIS",
983 "PARTMOD",
984 "PCOMP",
985 "POBJ",
986 "POSS",
987 "POSTNEG",
988 "PRECOMP",
989 "PRECONJ",
990 "PREDET",
991 "PREF",
992 "PREP",
993 "PRONL",
994 "PRT",
995 "PS",
996 "QUANTMOD",
997 "RCMOD",
998 "RCMODREL",
999 "RDROP",
1000 "REF",
1001 "REMNANT",
1002 "REPARANDUM",
1003 "ROOT",
1004 "SNUM",
1005 "SUFF",
1006 "TMOD",
1007 "TOPIC",
1008 "VMOD",
1009 "VOCATIVE",
1010 "XCOMP",
1011 "SUFFIX",
1012 "TITLE",
1013 "ADVPHMOD",
1014 "AUXCAUS",
1015 "AUXVV",
1016 "DTMOD",
1017 "FOREIGN",
1018 "KW",
1019 "LIST",
1020 "NOMC",
1021 "NOMCSUBJ",
1022 "NOMCSUBJPASS",
1023 "NUMC",
1024 "COP",
1025 "DISLOCATED"
1026 ],
1027 "enumDescriptions": [
1028 "Unknown",
1029 "Abbreviation modifier",
1030 "Adjectival complement",
1031 "Adverbial clause modifier",
1032 "Adverbial modifier",
1033 "Adjectival modifier of an NP",
1034 "Appositional modifier of an NP",
1035 "Attribute dependent of a copular verb",
1036 "Auxiliary (non-main) verb",
1037 "Passive auxiliary",
1038 "Coordinating conjunction",
1039 "Clausal complement of a verb or adjective",
1040 "Conjunct",
1041 "Clausal subject",
1042 "Clausal passive subject",
1043 "Dependency (unable to determine)",
1044 "Determiner",
1045 "Discourse",
1046 "Direct object",
1047 "Expletive",
1048 "Goes with (part of a word in a text not well edited)",
1049 "Indirect object",
1050 "Marker (word introducing a subordinate clause)",
1051 "Multi-word expression",
1052 "Multi-word verbal expression",
1053 "Negation modifier",
1054 "Noun compound modifier",
1055 "Noun phrase used as an adverbial modifier",
1056 "Nominal subject",
1057 "Passive nominal subject",
1058 "Numeric modifier of a noun",
1059 "Element of compound number",
1060 "Punctuation mark",
1061 "Parataxis relation",
1062 "Participial modifier",
1063 "The complement of a preposition is a clause",
1064 "Object of a preposition",
1065 "Possession modifier",
1066 "Postverbal negative particle",
1067 "Predicate complement",
1068 "Preconjunt",
1069 "Predeterminer",
1070 "Prefix",
1071 "Prepositional modifier",
1072 "The relationship between a verb and verbal morpheme",
1073 "Particle",
1074 "Associative or possessive marker",
1075 "Quantifier phrase modifier",
1076 "Relative clause modifier",
1077 "Complementizer in relative clause",
1078 "Ellipsis without a preceding predicate",
1079 "Referent",
1080 "Remnant",
1081 "Reparandum",
1082 "Root",
1083 "Suffix specifying a unit of number",
1084 "Suffix",
1085 "Temporal modifier",
1086 "Topic marker",
1087 "Clause headed by an infinite form of the verb that modi fies a noun",
1088 "Vocative",
1089 "Open clausal complement",
1090 "Name suffix",
1091 "Name title",
1092 "Adverbial phrase modifier",
1093 "Causative auxiliary",
1094 "Helper auxiliary",
1095 "Rentaishi (Prenominal modifier)",
1096 "Foreign words",
1097 "Keyword",
1098 "List for chains of comparable items",
1099 "Nominalized clause",
1100 "Nominalized clausal subject",
1101 "Nominalized clausal passive",
1102 "Compound of numeric modifier",
1103 "Copula",
1104 "Dislocated relation (for fronted/topicalized elements)"
1105 ],
1106 "type": "string"
1107 }
1108 },
1109 "type": "object"
1110 },
1111 "TextSpan": {
1112 "description": "Represents an output piece of text.",
1113 "id": "TextSpan",
1114 "properties": {
1115 "beginOffset": {
1116 "description": "The API calculates the beginning offset of t he content in the original\ndocument according to the EncodingType specified in the API request.",
1117 "format": "int32",
1118 "type": "integer"
1119 },
1120 "content": {
1121 "description": "The content of the output text.",
1122 "type": "string"
1123 }
1124 },
1125 "type": "object"
1126 },
1127 "Token": {
1128 "description": "Represents the smallest syntactic building block of the text.",
1129 "id": "Token",
1130 "properties": {
1131 "partOfSpeech": {
1132 "$ref": "PartOfSpeech",
1133 "description": "Parts of speech tag for this token."
1134 },
1135 "text": {
1136 "$ref": "TextSpan",
1137 "description": "The token text."
1138 },
1139 "dependencyEdge": {
1140 "$ref": "DependencyEdge",
1141 "description": "Dependency tree parse for this token."
1142 },
1143 "lemma": {
1144 "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_ %28morphology%29) of the token.",
1145 "type": "string"
1146 }
1147 },
1148 "type": "object"
1149 } 1157 }
1150 }, 1158 },
1151 "servicePath": "", 1159 "servicePath": "",
1152 "title": "Google Cloud Natural Language API", 1160 "title": "Google Cloud Natural Language API",
1153 "version": "v1beta2" 1161 "version": "v1beta2"
1154 } 1162 }
OLDNEW
« no previous file with comments | « discovery/googleapis_beta/language__v1beta1.json ('k') | discovery/googleapis_beta/logging__v2beta1.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698