Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(930)

Unified Diff: discovery/googleapis/language__v1.json

Issue 2695743002: Api-roll 45: 2017-02-13 (Closed)
Patch Set: reverted local changes to pubspec file Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « discovery/googleapis/kgsearch__v1.json ('k') | discovery/googleapis/logging__v2.json » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: discovery/googleapis/language__v1.json
diff --git a/discovery/googleapis/language__v1.json b/discovery/googleapis/language__v1.json
index cee2ed0e030d88590b190c4145016573081f06e8..0ac71ceb656e947469c803bdb28eea7c47e05675 100644
--- a/discovery/googleapis/language__v1.json
+++ b/discovery/googleapis/language__v1.json
@@ -25,35 +25,21 @@
"ownerDomain": "google.com",
"ownerName": "Google",
"parameters": {
- "access_token": {
- "description": "OAuth access token.",
- "location": "query",
- "type": "string"
- },
- "prettyPrint": {
- "default": "true",
- "description": "Returns response with indentations and line breaks.",
- "location": "query",
- "type": "boolean"
- },
- "key": {
- "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
- "location": "query",
- "type": "string"
- },
- "quotaUser": {
- "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
+ "callback": {
+ "description": "JSONP",
"location": "query",
"type": "string"
},
- "pp": {
- "default": "true",
- "description": "Pretty-print response.",
- "location": "query",
- "type": "boolean"
- },
- "fields": {
- "description": "Selector specifying which fields to include in a partial response.",
+ "$.xgafv": {
+ "description": "V1 error format.",
+ "enum": [
+ "1",
+ "2"
+ ],
+ "enumDescriptions": [
+ "v1 error format",
+ "v2 error format"
+ ],
"location": "query",
"type": "string"
},
@@ -73,112 +59,126 @@
"location": "query",
"type": "string"
},
- "$.xgafv": {
- "description": "V1 error format.",
- "enum": [
- "1",
- "2"
- ],
- "enumDescriptions": [
- "v1 error format",
- "v2 error format"
- ],
+ "access_token": {
+ "description": "OAuth access token.",
"location": "query",
"type": "string"
},
- "callback": {
- "description": "JSONP",
+ "key": {
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
"location": "query",
"type": "string"
},
- "oauth_token": {
- "description": "OAuth 2.0 token for the current user.",
+ "quotaUser": {
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.",
"location": "query",
"type": "string"
},
- "uploadType": {
- "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
+ "pp": {
+ "default": "true",
+ "description": "Pretty-print response.",
"location": "query",
- "type": "string"
+ "type": "boolean"
},
"bearer_token": {
"description": "OAuth bearer token.",
"location": "query",
"type": "string"
},
+ "oauth_token": {
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query",
+ "type": "string"
+ },
"upload_protocol": {
"description": "Upload protocol for media (e.g. \"raw\", \"multipart\").",
"location": "query",
"type": "string"
+ },
+ "prettyPrint": {
+ "default": "true",
+ "description": "Returns response with indentations and line breaks.",
+ "location": "query",
+ "type": "boolean"
+ },
+ "uploadType": {
+ "description": "Legacy upload protocol for media (e.g. \"media\", \"multipart\").",
+ "location": "query",
+ "type": "string"
+ },
+ "fields": {
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query",
+ "type": "string"
}
},
"protocol": "rest",
"resources": {
"documents": {
"methods": {
- "analyzeSentiment": {
- "description": "Analyzes the sentiment of the provided text.",
+ "annotateText": {
+ "description": "A convenience method that provides all the features that analyzeSentiment,\nanalyzeEntities, and analyzeSyntax provide in one call.",
"httpMethod": "POST",
- "id": "language.documents.analyzeSentiment",
+ "id": "language.documents.annotateText",
"parameterOrder": [],
"parameters": {},
- "path": "v1/documents:analyzeSentiment",
+ "path": "v1/documents:annotateText",
"request": {
- "$ref": "AnalyzeSentimentRequest"
+ "$ref": "AnnotateTextRequest"
},
"response": {
- "$ref": "AnalyzeSentimentResponse"
+ "$ref": "AnnotateTextResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "analyzeSyntax": {
- "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependency trees, and other\nproperties.",
+ "analyzeEntities": {
+ "description": "Finds named entities (currently finds proper names) in the text,\nentity types, salience, mentions for each entity, and other properties.",
"httpMethod": "POST",
- "id": "language.documents.analyzeSyntax",
+ "id": "language.documents.analyzeEntities",
"parameterOrder": [],
"parameters": {},
- "path": "v1/documents:analyzeSyntax",
+ "path": "v1/documents:analyzeEntities",
"request": {
- "$ref": "AnalyzeSyntaxRequest"
+ "$ref": "AnalyzeEntitiesRequest"
},
"response": {
- "$ref": "AnalyzeSyntaxResponse"
+ "$ref": "AnalyzeEntitiesResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "annotateText": {
- "description": "A convenience method that provides all the features that analyzeSentiment,\nanalyzeEntities, and analyzeSyntax provide in one call.",
+ "analyzeSyntax": {
+ "description": "Analyzes the syntax of the text and provides sentence boundaries and\ntokenization along with part of speech tags, dependency trees, and other\nproperties.",
"httpMethod": "POST",
- "id": "language.documents.annotateText",
+ "id": "language.documents.analyzeSyntax",
"parameterOrder": [],
"parameters": {},
- "path": "v1/documents:annotateText",
+ "path": "v1/documents:analyzeSyntax",
"request": {
- "$ref": "AnnotateTextRequest"
+ "$ref": "AnalyzeSyntaxRequest"
},
"response": {
- "$ref": "AnnotateTextResponse"
+ "$ref": "AnalyzeSyntaxResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
]
},
- "analyzeEntities": {
- "description": "Finds named entities (currently finds proper names) in the text,\nentity types, salience, mentions for each entity, and other properties.",
+ "analyzeSentiment": {
+ "description": "Analyzes the sentiment of the provided text.",
"httpMethod": "POST",
- "id": "language.documents.analyzeEntities",
+ "id": "language.documents.analyzeSentiment",
"parameterOrder": [],
"parameters": {},
- "path": "v1/documents:analyzeEntities",
+ "path": "v1/documents:analyzeSentiment",
"request": {
- "$ref": "AnalyzeEntitiesRequest"
+ "$ref": "AnalyzeSentimentRequest"
},
"response": {
- "$ref": "AnalyzeEntitiesResponse"
+ "$ref": "AnalyzeSentimentResponse"
},
"scopes": [
"https://www.googleapis.com/auth/cloud-platform"
@@ -187,37 +187,27 @@
}
}
},
- "revision": "20170103",
+ "revision": "20170206",
"rootUrl": "https://language.googleapis.com/",
"schemas": {
- "Document": {
- "description": "################################################################ #\n\nRepresents the input to API methods.",
- "id": "Document",
+ "Token": {
+ "description": "Represents the smallest syntactic building block of the text.",
+ "id": "Token",
"properties": {
- "language": {
- "description": "The language of the document (if not specified, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.<br>\n**Current Language Restrictions:**\n\n * Only English, Spanish, and Japanese textual content are supported.\nIf the language (either specified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.",
- "type": "string"
+ "partOfSpeech": {
+ "$ref": "PartOfSpeech",
+ "description": "Parts of speech tag for this token."
},
- "gcsContentUri": {
- "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nNOTE: Cloud Storage object versioning is not supported.",
- "type": "string"
+ "text": {
+ "$ref": "TextSpan",
+ "description": "The token text."
},
- "type": {
- "description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.",
- "enum": [
- "TYPE_UNSPECIFIED",
- "PLAIN_TEXT",
- "HTML"
- ],
- "enumDescriptions": [
- "The content type is not specified.",
- "Plain text",
- "HTML"
- ],
- "type": "string"
+ "dependencyEdge": {
+ "$ref": "DependencyEdge",
+ "description": "Dependency tree parse for this token."
},
- "content": {
- "description": "The content of the input in string format.",
+ "lemma": {
+ "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.",
"type": "string"
}
},
@@ -243,11 +233,6 @@
"description": "The `Status` type defines a logical error model that is suitable for different\nprogramming environments, including REST APIs and RPC APIs. It is used by\n[gRPC](https://github.com/grpc). The error model is designed to be:\n\n- Simple to use and understand for most users\n- Flexible enough to meet unexpected needs\n\n# Overview\n\nThe `Status` message contains three pieces of data: error code, error message,\nand error details. The error code should be an enum value of\ngoogle.rpc.Code, but it may accept additional error codes if needed. The\nerror message should be a developer-facing English message that helps\ndevelopers *understand* and *resolve* the error. If a localized user-facing\nerror message is needed, put the localized message in the error details or\nlocalize it in the client. The optional error details may contain arbitrary\ninformation about the error. There is a predefined set of error detail types\nin the package `google.rpc` which can be used for common error conditions.\n\n# Language mapping\n\nThe `Status` message is the logical representation of the error model, but it\nis not necessarily the actual wire format. When the `Status` message is\nexposed in different client libraries and different wire protocols, it can be\nmapped differently. For example, it will likely be mapped to some exceptions\nin Java, but more likely mapped to some error codes in C.\n\n# Other uses\n\nThe error model and the `Status` message can be used in a variety of\nenvironments, either with or without APIs, to provide a\nconsistent developer experience across different environments.\n\nExample uses of this error model include:\n\n- Partial errors. If a service needs to return partial errors to the client,\n it may embed the `Status` in the normal response to indicate the partial\n errors.\n\n- Workflow errors. A typical workflow has multiple steps. Each step may\n have a `Status` message for error reporting purpose.\n\n- Batch operations. If a client uses batch request and batch response, the\n `Status` message should be used directly inside batch response, one for\n each error sub-response.\n\n- Asynchronous operations. If an API call embeds asynchronous operation\n results in its response, the status of those operations should be\n represented directly using the `Status` message.\n\n- Logging. If some API errors are stored in logs, the message `Status` could\n be used directly after any stripping needed for security/privacy reasons.",
"id": "Status",
"properties": {
- "code": {
- "description": "The status code, which should be an enum value of google.rpc.Code.",
- "format": "int32",
- "type": "integer"
- },
"details": {
"description": "A list of messages that carry the error details. There will be a\ncommon set of message types for APIs to use.",
"items": {
@@ -259,6 +244,11 @@
},
"type": "array"
},
+ "code": {
+ "description": "The status code, which should be an enum value of google.rpc.Code.",
+ "format": "int32",
+ "type": "integer"
+ },
"message": {
"description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\ngoogle.rpc.Status.details field, or localized by the client.",
"type": "string"
@@ -266,33 +256,6 @@
},
"type": "object"
},
- "AnalyzeEntitiesRequest": {
- "description": "The entity analysis request message.",
- "id": "AnalyzeEntitiesRequest",
- "properties": {
- "document": {
- "$ref": "Document",
- "description": "Input document."
- },
- "encodingType": {
- "description": "The encoding type used by the API to calculate offsets.",
- "enum": [
- "NONE",
- "UTF8",
- "UTF16",
- "UTF32"
- ],
- "enumDescriptions": [
- "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
- ],
- "type": "string"
- }
- },
- "type": "object"
- },
"EntityMention": {
"description": "Represents a mention for an entity in the text. Currently, proper noun\nmentions are supported.",
"id": "EntityMention",
@@ -318,289 +281,77 @@
},
"type": "object"
},
- "AnalyzeSentimentRequest": {
- "description": "The sentiment analysis request message.",
- "id": "AnalyzeSentimentRequest",
+ "Features": {
+ "description": "All available features for sentiment, syntax, and semantic analysis.\nSetting each one to true will enable that specific analysis for the input.",
+ "id": "Features",
"properties": {
- "document": {
- "$ref": "Document",
- "description": "Input document. Currently, `analyzeSentiment` only supports English text\n(Document.language=\"EN\")."
+ "extractSyntax": {
+ "description": "Extract syntax information.",
+ "type": "boolean"
},
- "encodingType": {
- "description": "The encoding type used by the API to calculate sentence offsets.",
- "enum": [
- "NONE",
- "UTF8",
- "UTF16",
- "UTF32"
- ],
- "enumDescriptions": [
- "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
- ],
- "type": "string"
+ "extractDocumentSentiment": {
+ "description": "Extract document-level sentiment.",
+ "type": "boolean"
+ },
+ "extractEntities": {
+ "description": "Extract entities.",
+ "type": "boolean"
}
},
"type": "object"
},
- "AnalyzeSentimentResponse": {
- "description": "The sentiment analysis response message.",
- "id": "AnalyzeSentimentResponse",
+ "Document": {
+ "description": "################################################################ #\n\nRepresents the input to API methods.",
+ "id": "Document",
"properties": {
- "documentSentiment": {
- "$ref": "Sentiment",
- "description": "The overall sentiment of the input document."
- },
"language": {
- "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
+ "description": "The language of the document (if not specified, the language is\nautomatically detected). Both ISO and BCP-47 language codes are\naccepted.<br>\n**Current Language Restrictions:**\n\n * Only English, Spanish, and Japanese textual content are supported.\nIf the language (either specified by the caller or automatically detected)\nis not supported by the called API method, an `INVALID_ARGUMENT` error\nis returned.",
"type": "string"
},
- "sentences": {
- "description": "The sentiment for all the sentences in the document.",
- "items": {
- "$ref": "Sentence"
- },
- "type": "array"
- }
- },
- "type": "object"
- },
- "AnalyzeSyntaxRequest": {
- "description": "The syntax analysis request message.",
- "id": "AnalyzeSyntaxRequest",
- "properties": {
- "document": {
- "$ref": "Document",
- "description": "Input document."
- },
- "encodingType": {
- "description": "The encoding type used by the API to calculate offsets.",
+ "type": {
+ "description": "Required. If the type is not set or is `TYPE_UNSPECIFIED`,\nreturns an `INVALID_ARGUMENT` error.",
"enum": [
- "NONE",
- "UTF8",
- "UTF16",
- "UTF32"
+ "TYPE_UNSPECIFIED",
+ "PLAIN_TEXT",
+ "HTML"
],
"enumDescriptions": [
- "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.",
- "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
+ "The content type is not specified.",
+ "Plain text",
+ "HTML"
],
"type": "string"
- }
- },
- "type": "object"
- },
- "DependencyEdge": {
- "description": "Represents dependency parse tree information for a token. (For more\ninformation on dependency labels, see\nhttp://www.aclweb.org/anthology/P13-2017",
- "id": "DependencyEdge",
- "properties": {
- "headTokenIndex": {
- "description": "Represents the head of this token in the dependency tree.\nThis is the index of the token which has an arc going to this token.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is its own index.",
- "format": "int32",
- "type": "integer"
},
- "label": {
- "description": "The parse label for the token.",
- "enum": [
- "UNKNOWN",
- "ABBREV",
- "ACOMP",
- "ADVCL",
- "ADVMOD",
- "AMOD",
- "APPOS",
- "ATTR",
- "AUX",
- "AUXPASS",
- "CC",
- "CCOMP",
- "CONJ",
- "CSUBJ",
- "CSUBJPASS",
- "DEP",
- "DET",
- "DISCOURSE",
- "DOBJ",
- "EXPL",
- "GOESWITH",
- "IOBJ",
- "MARK",
- "MWE",
- "MWV",
- "NEG",
- "NN",
- "NPADVMOD",
- "NSUBJ",
- "NSUBJPASS",
- "NUM",
- "NUMBER",
- "P",
- "PARATAXIS",
- "PARTMOD",
- "PCOMP",
- "POBJ",
- "POSS",
- "POSTNEG",
- "PRECOMP",
- "PRECONJ",
- "PREDET",
- "PREF",
- "PREP",
- "PRONL",
- "PRT",
- "PS",
- "QUANTMOD",
- "RCMOD",
- "RCMODREL",
- "RDROP",
- "REF",
- "REMNANT",
- "REPARANDUM",
- "ROOT",
- "SNUM",
- "SUFF",
- "TMOD",
- "TOPIC",
- "VMOD",
- "VOCATIVE",
- "XCOMP",
- "SUFFIX",
- "TITLE",
- "ADVPHMOD",
- "AUXCAUS",
- "AUXVV",
- "DTMOD",
- "FOREIGN",
- "KW",
- "LIST",
- "NOMC",
- "NOMCSUBJ",
- "NOMCSUBJPASS",
- "NUMC",
- "COP",
- "DISLOCATED"
- ],
- "enumDescriptions": [
- "Unknown",
- "Abbreviation modifier",
- "Adjectival complement",
- "Adverbial clause modifier",
- "Adverbial modifier",
- "Adjectival modifier of an NP",
- "Appositional modifier of an NP",
- "Attribute dependent of a copular verb",
- "Auxiliary (non-main) verb",
- "Passive auxiliary",
- "Coordinating conjunction",
- "Clausal complement of a verb or adjective",
- "Conjunct",
- "Clausal subject",
- "Clausal passive subject",
- "Dependency (unable to determine)",
- "Determiner",
- "Discourse",
- "Direct object",
- "Expletive",
- "Goes with (part of a word in a text not well edited)",
- "Indirect object",
- "Marker (word introducing a subordinate clause)",
- "Multi-word expression",
- "Multi-word verbal expression",
- "Negation modifier",
- "Noun compound modifier",
- "Noun phrase used as an adverbial modifier",
- "Nominal subject",
- "Passive nominal subject",
- "Numeric modifier of a noun",
- "Element of compound number",
- "Punctuation mark",
- "Parataxis relation",
- "Participial modifier",
- "The complement of a preposition is a clause",
- "Object of a preposition",
- "Possession modifier",
- "Postverbal negative particle",
- "Predicate complement",
- "Preconjunt",
- "Predeterminer",
- "Prefix",
- "Prepositional modifier",
- "The relationship between a verb and verbal morpheme",
- "Particle",
- "Associative or possessive marker",
- "Quantifier phrase modifier",
- "Relative clause modifier",
- "Complementizer in relative clause",
- "Ellipsis without a preceding predicate",
- "Referent",
- "Remnant",
- "Reparandum",
- "Root",
- "Suffix specifying a unit of number",
- "Suffix",
- "Temporal modifier",
- "Topic marker",
- "Clause headed by an infinite form of the verb that modifies a noun",
- "Vocative",
- "Open clausal complement",
- "Name suffix",
- "Name title",
- "Adverbial phrase modifier",
- "Causative auxiliary",
- "Helper auxiliary",
- "Rentaishi (Prenominal modifier)",
- "Foreign words",
- "Keyword",
- "List for chains of comparable items",
- "Nominalized clause",
- "Nominalized clausal subject",
- "Nominalized clausal passive",
- "Compound of numeric modifier",
- "Copula",
- "Dislocated relation (for fronted/topicalized elements)"
- ],
+ "content": {
+ "description": "The content of the input in string format.",
+ "type": "string"
+ },
+ "gcsContentUri": {
+ "description": "The Google Cloud Storage URI where the file content is located.\nThis URI must be of the form: gs://bucket_name/object_name. For more\ndetails, see https://cloud.google.com/storage/docs/reference-uris.\nNOTE: Cloud Storage object versioning is not supported.",
"type": "string"
}
},
"type": "object"
},
- "AnalyzeSyntaxResponse": {
- "description": "The syntax analysis response message.",
- "id": "AnalyzeSyntaxResponse",
+ "Sentence": {
+ "description": "Represents a sentence in the input document.",
+ "id": "Sentence",
"properties": {
- "language": {
- "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
- "type": "string"
- },
- "tokens": {
- "description": "Tokens, along with their syntactic information, in the input document.",
- "items": {
- "$ref": "Token"
- },
- "type": "array"
+ "text": {
+ "$ref": "TextSpan",
+ "description": "The sentence text."
},
- "sentences": {
- "description": "Sentences in the input document.",
- "items": {
- "$ref": "Sentence"
- },
- "type": "array"
+ "sentiment": {
+ "$ref": "Sentiment",
+ "description": "For calls to AnalyzeSentiment or if\nAnnotateTextRequest.Features.extract_document_sentiment is set to\ntrue, this field will contain the sentiment for the sentence."
}
},
"type": "object"
},
- "AnnotateTextRequest": {
- "description": "The request message for the text annotation API, which can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.",
- "id": "AnnotateTextRequest",
+ "AnalyzeEntitiesRequest": {
+ "description": "The entity analysis request message.",
+ "id": "AnalyzeEntitiesRequest",
"properties": {
- "document": {
- "$ref": "Document",
- "description": "Input document."
- },
"encodingType": {
"description": "The encoding type used by the API to calculate offsets.",
"enum": [
@@ -617,43 +368,26 @@
],
"type": "string"
},
- "features": {
- "$ref": "Features",
- "description": "The enabled features."
- }
- },
- "type": "object"
- },
- "Sentence": {
- "description": "Represents a sentence in the input document.",
- "id": "Sentence",
- "properties": {
- "text": {
- "$ref": "TextSpan",
- "description": "The sentence text."
- },
- "sentiment": {
- "$ref": "Sentiment",
- "description": "For calls to AnalyzeSentiment or if\nAnnotateTextRequest.Features.extract_document_sentiment is set to\ntrue, this field will contain the sentiment for the sentence."
+ "document": {
+ "$ref": "Document",
+ "description": "Input document."
}
},
"type": "object"
},
- "Features": {
- "description": "All available features for sentiment, syntax, and semantic analysis.\nSetting each one to true will enable that specific analysis for the input.",
- "id": "Features",
+ "Sentiment": {
+ "description": "Represents the feeling associated with the entire text or entities in\nthe text.",
+ "id": "Sentiment",
"properties": {
- "extractDocumentSentiment": {
- "description": "Extract document-level sentiment.",
- "type": "boolean"
- },
- "extractEntities": {
- "description": "Extract entities.",
- "type": "boolean"
+ "score": {
+ "description": "Sentiment score between -1.0 (negative sentiment) and 1.0\n(positive sentiment).",
+ "format": "float",
+ "type": "number"
},
- "extractSyntax": {
- "description": "Extract syntax information.",
- "type": "boolean"
+ "magnitude": {
+ "description": "A non-negative number in the [0, +inf) range, which represents\nthe absolute magnitude of sentiment regardless of score (positive or\nnegative).",
+ "format": "float",
+ "type": "number"
}
},
"type": "object"
@@ -662,94 +396,6 @@
"description": "Represents part of speech information for a token. Parts of speech\nare as defined in\nhttp://www.lrec-conf.org/proceedings/lrec2012/pdf/274_Paper.pdf",
"id": "PartOfSpeech",
"properties": {
- "aspect": {
- "description": "The grammatical aspect.",
- "enum": [
- "ASPECT_UNKNOWN",
- "PERFECTIVE",
- "IMPERFECTIVE",
- "PROGRESSIVE"
- ],
- "enumDescriptions": [
- "Aspect is not applicable in the analyzed language or is not predicted.",
- "Perfective",
- "Imperfective",
- "Progressive"
- ],
- "type": "string"
- },
- "gender": {
- "description": "The grammatical gender.",
- "enum": [
- "GENDER_UNKNOWN",
- "FEMININE",
- "MASCULINE",
- "NEUTER"
- ],
- "enumDescriptions": [
- "Gender is not applicable in the analyzed language or is not predicted.",
- "Feminine",
- "Masculine",
- "Neuter"
- ],
- "type": "string"
- },
- "person": {
- "description": "The grammatical person.",
- "enum": [
- "PERSON_UNKNOWN",
- "FIRST",
- "SECOND",
- "THIRD",
- "REFLEXIVE_PERSON"
- ],
- "enumDescriptions": [
- "Person is not applicable in the analyzed language or is not predicted.",
- "First",
- "Second",
- "Third",
- "Reflexive"
- ],
- "type": "string"
- },
- "case": {
- "description": "The grammatical case.",
- "enum": [
- "CASE_UNKNOWN",
- "ACCUSATIVE",
- "ADVERBIAL",
- "COMPLEMENTIVE",
- "DATIVE",
- "GENITIVE",
- "INSTRUMENTAL",
- "LOCATIVE",
- "NOMINATIVE",
- "OBLIQUE",
- "PARTITIVE",
- "PREPOSITIONAL",
- "REFLEXIVE_CASE",
- "RELATIVE_CASE",
- "VOCATIVE"
- ],
- "enumDescriptions": [
- "Case is not applicable in the analyzed language or is not predicted.",
- "Accusative",
- "Adverbial",
- "Complementive",
- "Dative",
- "Genitive",
- "Instrumental",
- "Locative",
- "Nominative",
- "Oblique",
- "Partitive",
- "Prepositional",
- "Reflexive",
- "Relative",
- "Vocative"
- ],
- "type": "string"
- },
"form": {
"description": "The grammatical form.",
"enum": [
@@ -782,39 +428,51 @@
],
"type": "string"
},
- "tense": {
- "description": "The grammatical tense.",
+ "number": {
+ "description": "The grammatical number.",
"enum": [
- "TENSE_UNKNOWN",
- "CONDITIONAL_TENSE",
- "FUTURE",
- "PAST",
- "PRESENT",
- "IMPERFECT",
- "PLUPERFECT"
+ "NUMBER_UNKNOWN",
+ "SINGULAR",
+ "PLURAL",
+ "DUAL"
],
"enumDescriptions": [
- "Tense is not applicable in the analyzed language or is not predicted.",
- "Conditional",
- "Future",
- "Past",
- "Present",
- "Imperfect",
- "Pluperfect"
+ "Number is not applicable in the analyzed language or is not predicted.",
+ "Singular",
+ "Plural",
+ "Dual"
],
"type": "string"
},
- "proper": {
- "description": "The grammatical properness.",
+ "voice": {
+ "description": "The grammatical voice.",
"enum": [
- "PROPER_UNKNOWN",
- "PROPER",
- "NOT_PROPER"
+ "VOICE_UNKNOWN",
+ "ACTIVE",
+ "CAUSATIVE",
+ "PASSIVE"
],
"enumDescriptions": [
- "Proper is not applicable in the analyzed language or is not predicted.",
- "Proper",
- "Not proper"
+ "Voice is not applicable in the analyzed language or is not predicted.",
+ "Active",
+ "Causative",
+ "Passive"
+ ],
+ "type": "string"
+ },
+ "aspect": {
+ "description": "The grammatical aspect.",
+ "enum": [
+ "ASPECT_UNKNOWN",
+ "PERFECTIVE",
+ "IMPERFECTIVE",
+ "PROGRESSIVE"
+ ],
+ "enumDescriptions": [
+ "Aspect is not applicable in the analyzed language or is not predicted.",
+ "Perfective",
+ "Imperfective",
+ "Progressive"
],
"type": "string"
},
@@ -876,87 +534,194 @@
],
"type": "string"
},
- "number": {
- "description": "The grammatical number.",
+ "gender": {
+ "description": "The grammatical gender.",
"enum": [
- "NUMBER_UNKNOWN",
- "SINGULAR",
- "PLURAL",
- "DUAL"
+ "GENDER_UNKNOWN",
+ "FEMININE",
+ "MASCULINE",
+ "NEUTER"
],
"enumDescriptions": [
- "Number is not applicable in the analyzed language or is not predicted.",
- "Singular",
- "Plural",
- "Dual"
+ "Gender is not applicable in the analyzed language or is not predicted.",
+ "Feminine",
+ "Masculine",
+ "Neuter"
],
"type": "string"
},
- "reciprocity": {
- "description": "The grammatical reciprocity.",
+ "person": {
+ "description": "The grammatical person.",
"enum": [
- "RECIPROCITY_UNKNOWN",
- "RECIPROCAL",
- "NON_RECIPROCAL"
+ "PERSON_UNKNOWN",
+ "FIRST",
+ "SECOND",
+ "THIRD",
+ "REFLEXIVE_PERSON"
],
"enumDescriptions": [
- "Reciprocity is not applicable in the analyzed language or is not\npredicted.",
- "Reciprocal",
- "Non-reciprocal"
+ "Person is not applicable in the analyzed language or is not predicted.",
+ "First",
+ "Second",
+ "Third",
+ "Reflexive"
],
"type": "string"
},
- "voice": {
- "description": "The grammatical voice.",
+ "proper": {
+ "description": "The grammatical properness.",
"enum": [
- "VOICE_UNKNOWN",
- "ACTIVE",
- "CAUSATIVE",
- "PASSIVE"
+ "PROPER_UNKNOWN",
+ "PROPER",
+ "NOT_PROPER"
],
"enumDescriptions": [
- "Voice is not applicable in the analyzed language or is not predicted.",
- "Active",
- "Causative",
- "Passive"
+ "Proper is not applicable in the analyzed language or is not predicted.",
+ "Proper",
+ "Not proper"
+ ],
+ "type": "string"
+ },
+ "case": {
+ "description": "The grammatical case.",
+ "enum": [
+ "CASE_UNKNOWN",
+ "ACCUSATIVE",
+ "ADVERBIAL",
+ "COMPLEMENTIVE",
+ "DATIVE",
+ "GENITIVE",
+ "INSTRUMENTAL",
+ "LOCATIVE",
+ "NOMINATIVE",
+ "OBLIQUE",
+ "PARTITIVE",
+ "PREPOSITIONAL",
+ "REFLEXIVE_CASE",
+ "RELATIVE_CASE",
+ "VOCATIVE"
+ ],
+ "enumDescriptions": [
+ "Case is not applicable in the analyzed language or is not predicted.",
+ "Accusative",
+ "Adverbial",
+ "Complementive",
+ "Dative",
+ "Genitive",
+ "Instrumental",
+ "Locative",
+ "Nominative",
+ "Oblique",
+ "Partitive",
+ "Prepositional",
+ "Reflexive",
+ "Relative",
+ "Vocative"
+ ],
+ "type": "string"
+ },
+ "tense": {
+ "description": "The grammatical tense.",
+ "enum": [
+ "TENSE_UNKNOWN",
+ "CONDITIONAL_TENSE",
+ "FUTURE",
+ "PAST",
+ "PRESENT",
+ "IMPERFECT",
+ "PLUPERFECT"
+ ],
+ "enumDescriptions": [
+ "Tense is not applicable in the analyzed language or is not predicted.",
+ "Conditional",
+ "Future",
+ "Past",
+ "Present",
+ "Imperfect",
+ "Pluperfect"
+ ],
+ "type": "string"
+ },
+ "reciprocity": {
+ "description": "The grammatical reciprocity.",
+ "enum": [
+ "RECIPROCITY_UNKNOWN",
+ "RECIPROCAL",
+ "NON_RECIPROCAL"
+ ],
+ "enumDescriptions": [
+ "Reciprocity is not applicable in the analyzed language or is not\npredicted.",
+ "Reciprocal",
+ "Non-reciprocal"
+ ],
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "AnalyzeSyntaxRequest": {
+ "description": "The syntax analysis request message.",
+ "id": "AnalyzeSyntaxRequest",
+ "properties": {
+ "encodingType": {
+ "description": "The encoding type used by the API to calculate offsets.",
+ "enum": [
+ "NONE",
+ "UTF8",
+ "UTF16",
+ "UTF32"
+ ],
+ "enumDescriptions": [
+ "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
],
"type": "string"
- }
- },
- "type": "object"
- },
- "AnnotateTextResponse": {
- "description": "The text annotations response message.",
- "id": "AnnotateTextResponse",
- "properties": {
- "entities": {
- "description": "Entities, along with their semantic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_entities.",
- "items": {
- "$ref": "Entity"
- },
- "type": "array"
},
+ "document": {
+ "$ref": "Document",
+ "description": "Input document."
+ }
+ },
+ "type": "object"
+ },
+ "AnalyzeSentimentResponse": {
+ "description": "The sentiment analysis response message.",
+ "id": "AnalyzeSentimentResponse",
+ "properties": {
"documentSentiment": {
"$ref": "Sentiment",
- "description": "The overall sentiment for the document. Populated if the user enables\nAnnotateTextRequest.Features.extract_document_sentiment."
+ "description": "The overall sentiment of the input document."
},
"language": {
"description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
"type": "string"
},
- "tokens": {
- "description": "Tokens, along with their syntactic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_syntax.",
+ "sentences": {
+ "description": "The sentiment for all the sentences in the document.",
"items": {
- "$ref": "Token"
+ "$ref": "Sentence"
},
"type": "array"
- },
- "sentences": {
- "description": "Sentences in the input document. Populated if the user enables\nAnnotateTextRequest.Features.extract_syntax.",
+ }
+ },
+ "type": "object"
+ },
+ "AnalyzeEntitiesResponse": {
+ "description": "The entity analysis response message.",
+ "id": "AnalyzeEntitiesResponse",
+ "properties": {
+ "entities": {
+ "description": "The recognized entities in the input document.",
"items": {
- "$ref": "Sentence"
+ "$ref": "Entity"
},
"type": "array"
+ },
+ "language": {
+ "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
+ "type": "string"
}
},
"type": "object"
@@ -965,18 +730,6 @@
"description": "Represents a phrase in the text that is a known entity, such as\na person, an organization, or location. The API associates information, such\nas salience and mentions, with entities.",
"id": "Entity",
"properties": {
- "metadata": {
- "additionalProperties": {
- "type": "string"
- },
- "description": "Metadata associated with the entity.\n\nCurrently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.",
- "type": "object"
- },
- "salience": {
- "description": "The salience score associated with the entity in the [0, 1.0] range.\n\nThe salience score for an entity provides information about the\nimportance or centrality of that entity to the entire document text.\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\nsalient.",
- "format": "float",
- "type": "number"
- },
"type": {
"description": "The entity type.",
"enum": [
@@ -1001,6 +754,18 @@
],
"type": "string"
},
+ "metadata": {
+ "additionalProperties": {
+ "type": "string"
+ },
+ "description": "Metadata associated with the entity.\n\nCurrently, Wikipedia URLs and Knowledge Graph MIDs are provided, if\navailable. The associated keys are \"wikipedia_url\" and \"mid\", respectively.",
+ "type": "object"
+ },
+ "salience": {
+ "description": "The salience score associated with the entity in the [0, 1.0] range.\n\nThe salience score for an entity provides information about the\nimportance or centrality of that entity to the entire document text.\nScores closer to 0 are less salient, while scores closer to 1.0 are highly\nsalient.",
+ "format": "float",
+ "type": "number"
+ },
"mentions": {
"description": "The mentions of this entity in the input document. The API currently\nsupports proper noun mentions.",
"items": {
@@ -1015,59 +780,294 @@
},
"type": "object"
},
- "Sentiment": {
- "description": "Represents the feeling associated with the entire text or entities in\nthe text.",
- "id": "Sentiment",
+ "AnalyzeSyntaxResponse": {
+ "description": "The syntax analysis response message.",
+ "id": "AnalyzeSyntaxResponse",
"properties": {
- "score": {
- "description": "Sentiment score between -1.0 (negative sentiment) and 1.0\n(positive sentiment).",
- "format": "float",
- "type": "number"
+ "sentences": {
+ "description": "Sentences in the input document.",
+ "items": {
+ "$ref": "Sentence"
+ },
+ "type": "array"
},
- "magnitude": {
- "description": "A non-negative number in the [0, +inf) range, which represents\nthe absolute magnitude of sentiment regardless of score (positive or\nnegative).",
- "format": "float",
- "type": "number"
+ "tokens": {
+ "description": "Tokens, along with their syntactic information, in the input document.",
+ "items": {
+ "$ref": "Token"
+ },
+ "type": "array"
+ },
+ "language": {
+ "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
+ "type": "string"
}
},
"type": "object"
},
- "Token": {
- "description": "Represents the smallest syntactic building block of the text.",
- "id": "Token",
+ "AnnotateTextRequest": {
+ "description": "The request message for the text annotation API, which can perform multiple\nanalysis types (sentiment, entities, and syntax) in one call.",
+ "id": "AnnotateTextRequest",
"properties": {
- "text": {
- "$ref": "TextSpan",
- "description": "The token text."
- },
- "partOfSpeech": {
- "$ref": "PartOfSpeech",
- "description": "Parts of speech tag for this token."
+ "encodingType": {
+ "description": "The encoding type used by the API to calculate offsets.",
+ "enum": [
+ "NONE",
+ "UTF8",
+ "UTF16",
+ "UTF32"
+ ],
+ "enumDescriptions": [
+ "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
+ ],
+ "type": "string"
},
- "dependencyEdge": {
- "$ref": "DependencyEdge",
- "description": "Dependency tree parse for this token."
+ "document": {
+ "$ref": "Document",
+ "description": "Input document."
},
- "lemma": {
- "description": "[Lemma](https://en.wikipedia.org/wiki/Lemma_%28morphology%29) of the token.",
- "type": "string"
+ "features": {
+ "$ref": "Features",
+ "description": "The enabled features."
}
},
"type": "object"
},
- "AnalyzeEntitiesResponse": {
- "description": "The entity analysis response message.",
- "id": "AnalyzeEntitiesResponse",
+ "AnnotateTextResponse": {
+ "description": "The text annotations response message.",
+ "id": "AnnotateTextResponse",
"properties": {
+ "documentSentiment": {
+ "$ref": "Sentiment",
+ "description": "The overall sentiment for the document. Populated if the user enables\nAnnotateTextRequest.Features.extract_document_sentiment."
+ },
+ "language": {
+ "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
+ "type": "string"
+ },
+ "sentences": {
+ "description": "Sentences in the input document. Populated if the user enables\nAnnotateTextRequest.Features.extract_syntax.",
+ "items": {
+ "$ref": "Sentence"
+ },
+ "type": "array"
+ },
+ "tokens": {
+ "description": "Tokens, along with their syntactic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_syntax.",
+ "items": {
+ "$ref": "Token"
+ },
+ "type": "array"
+ },
"entities": {
- "description": "The recognized entities in the input document.",
+ "description": "Entities, along with their semantic information, in the input document.\nPopulated if the user enables\nAnnotateTextRequest.Features.extract_entities.",
"items": {
"$ref": "Entity"
},
"type": "array"
+ }
+ },
+ "type": "object"
+ },
+ "AnalyzeSentimentRequest": {
+ "description": "The sentiment analysis request message.",
+ "id": "AnalyzeSentimentRequest",
+ "properties": {
+ "encodingType": {
+ "description": "The encoding type used by the API to calculate sentence offsets.",
+ "enum": [
+ "NONE",
+ "UTF8",
+ "UTF16",
+ "UTF32"
+ ],
+ "enumDescriptions": [
+ "If `EncodingType` is not specified, encoding-dependent information (such as\n`begin_offset`) will be set at `-1`.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-8 encoding of the input. C++ and Go are examples of languages\nthat use this encoding natively.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-16 encoding of the input. Java and Javascript are examples of\nlanguages that use this encoding natively.",
+ "Encoding-dependent information (such as `begin_offset`) is calculated based\non the UTF-32 encoding of the input. Python is an example of a language\nthat uses this encoding natively."
+ ],
+ "type": "string"
},
- "language": {
- "description": "The language of the text, which will be the same as the language specified\nin the request or, if not specified, the automatically-detected language.\nSee `Document.language` field for more details.",
+ "document": {
+ "$ref": "Document",
+ "description": "Input document. Currently, `analyzeSentiment` only supports English text\n(Document.language=\"EN\")."
+ }
+ },
+ "type": "object"
+ },
+ "DependencyEdge": {
+ "description": "Represents dependency parse tree information for a token. (For more\ninformation on dependency labels, see\nhttp://www.aclweb.org/anthology/P13-2017",
+ "id": "DependencyEdge",
+ "properties": {
+ "headTokenIndex": {
+ "description": "Represents the head of this token in the dependency tree.\nThis is the index of the token which has an arc going to this token.\nThe index is the position of the token in the array of tokens returned\nby the API method. If this token is a root token, then the\n`head_token_index` is its own index.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "label": {
+ "description": "The parse label for the token.",
+ "enum": [
+ "UNKNOWN",
+ "ABBREV",
+ "ACOMP",
+ "ADVCL",
+ "ADVMOD",
+ "AMOD",
+ "APPOS",
+ "ATTR",
+ "AUX",
+ "AUXPASS",
+ "CC",
+ "CCOMP",
+ "CONJ",
+ "CSUBJ",
+ "CSUBJPASS",
+ "DEP",
+ "DET",
+ "DISCOURSE",
+ "DOBJ",
+ "EXPL",
+ "GOESWITH",
+ "IOBJ",
+ "MARK",
+ "MWE",
+ "MWV",
+ "NEG",
+ "NN",
+ "NPADVMOD",
+ "NSUBJ",
+ "NSUBJPASS",
+ "NUM",
+ "NUMBER",
+ "P",
+ "PARATAXIS",
+ "PARTMOD",
+ "PCOMP",
+ "POBJ",
+ "POSS",
+ "POSTNEG",
+ "PRECOMP",
+ "PRECONJ",
+ "PREDET",
+ "PREF",
+ "PREP",
+ "PRONL",
+ "PRT",
+ "PS",
+ "QUANTMOD",
+ "RCMOD",
+ "RCMODREL",
+ "RDROP",
+ "REF",
+ "REMNANT",
+ "REPARANDUM",
+ "ROOT",
+ "SNUM",
+ "SUFF",
+ "TMOD",
+ "TOPIC",
+ "VMOD",
+ "VOCATIVE",
+ "XCOMP",
+ "SUFFIX",
+ "TITLE",
+ "ADVPHMOD",
+ "AUXCAUS",
+ "AUXVV",
+ "DTMOD",
+ "FOREIGN",
+ "KW",
+ "LIST",
+ "NOMC",
+ "NOMCSUBJ",
+ "NOMCSUBJPASS",
+ "NUMC",
+ "COP",
+ "DISLOCATED"
+ ],
+ "enumDescriptions": [
+ "Unknown",
+ "Abbreviation modifier",
+ "Adjectival complement",
+ "Adverbial clause modifier",
+ "Adverbial modifier",
+ "Adjectival modifier of an NP",
+ "Appositional modifier of an NP",
+ "Attribute dependent of a copular verb",
+ "Auxiliary (non-main) verb",
+ "Passive auxiliary",
+ "Coordinating conjunction",
+ "Clausal complement of a verb or adjective",
+ "Conjunct",
+ "Clausal subject",
+ "Clausal passive subject",
+ "Dependency (unable to determine)",
+ "Determiner",
+ "Discourse",
+ "Direct object",
+ "Expletive",
+ "Goes with (part of a word in a text not well edited)",
+ "Indirect object",
+ "Marker (word introducing a subordinate clause)",
+ "Multi-word expression",
+ "Multi-word verbal expression",
+ "Negation modifier",
+ "Noun compound modifier",
+ "Noun phrase used as an adverbial modifier",
+ "Nominal subject",
+ "Passive nominal subject",
+ "Numeric modifier of a noun",
+ "Element of compound number",
+ "Punctuation mark",
+ "Parataxis relation",
+ "Participial modifier",
+ "The complement of a preposition is a clause",
+ "Object of a preposition",
+ "Possession modifier",
+ "Postverbal negative particle",
+ "Predicate complement",
+ "Preconjunt",
+ "Predeterminer",
+ "Prefix",
+ "Prepositional modifier",
+ "The relationship between a verb and verbal morpheme",
+ "Particle",
+ "Associative or possessive marker",
+ "Quantifier phrase modifier",
+ "Relative clause modifier",
+ "Complementizer in relative clause",
+ "Ellipsis without a preceding predicate",
+ "Referent",
+ "Remnant",
+ "Reparandum",
+ "Root",
+ "Suffix specifying a unit of number",
+ "Suffix",
+ "Temporal modifier",
+ "Topic marker",
+ "Clause headed by an infinite form of the verb that modifies a noun",
+ "Vocative",
+ "Open clausal complement",
+ "Name suffix",
+ "Name title",
+ "Adverbial phrase modifier",
+ "Causative auxiliary",
+ "Helper auxiliary",
+ "Rentaishi (Prenominal modifier)",
+ "Foreign words",
+ "Keyword",
+ "List for chains of comparable items",
+ "Nominalized clause",
+ "Nominalized clausal subject",
+ "Nominalized clausal passive",
+ "Compound of numeric modifier",
+ "Copula",
+ "Dislocated relation (for fronted/topicalized elements)"
+ ],
"type": "string"
}
},
« no previous file with comments | « discovery/googleapis/kgsearch__v1.json ('k') | discovery/googleapis/logging__v2.json » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698