OLD | NEW |
1 // This is a generated file (see the discoveryapis_generator project). | 1 // This is a generated file (see the discoveryapis_generator project). |
2 | 2 |
3 library googleapis.bigquery.v2; | 3 library googleapis.bigquery.v2; |
4 | 4 |
5 import 'dart:core' as core; | 5 import 'dart:core' as core; |
6 import 'dart:collection' as collection; | 6 import 'dart:collection' as collection; |
7 import 'dart:async' as async; | 7 import 'dart:async' as async; |
8 import 'dart:convert' as convert; | 8 import 'dart:convert' as convert; |
9 | 9 |
10 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; | 10 import 'package:_discoveryapis_commons/_discoveryapis_commons.dart' as commons; |
(...skipping 1989 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2000 _json["message"] = message; | 2000 _json["message"] = message; |
2001 } | 2001 } |
2002 if (reason != null) { | 2002 if (reason != null) { |
2003 _json["reason"] = reason; | 2003 _json["reason"] = reason; |
2004 } | 2004 } |
2005 return _json; | 2005 return _json; |
2006 } | 2006 } |
2007 } | 2007 } |
2008 | 2008 |
2009 class ExplainQueryStage { | 2009 class ExplainQueryStage { |
| 2010 /** Milliseconds the average shard spent on CPU-bound tasks. */ |
| 2011 core.String computeMsAvg; |
| 2012 /** Milliseconds the slowest shard spent on CPU-bound tasks. */ |
| 2013 core.String computeMsMax; |
2010 /** Relative amount of time the average shard spent on CPU-bound tasks. */ | 2014 /** Relative amount of time the average shard spent on CPU-bound tasks. */ |
2011 core.double computeRatioAvg; | 2015 core.double computeRatioAvg; |
2012 /** Relative amount of time the slowest shard spent on CPU-bound tasks. */ | 2016 /** Relative amount of time the slowest shard spent on CPU-bound tasks. */ |
2013 core.double computeRatioMax; | 2017 core.double computeRatioMax; |
2014 /** Unique ID for stage within plan. */ | 2018 /** Unique ID for stage within plan. */ |
2015 core.String id; | 2019 core.String id; |
2016 /** Human-readable name for stage. */ | 2020 /** Human-readable name for stage. */ |
2017 core.String name; | 2021 core.String name; |
| 2022 /** Milliseconds the average shard spent reading input. */ |
| 2023 core.String readMsAvg; |
| 2024 /** Milliseconds the slowest shard spent reading input. */ |
| 2025 core.String readMsMax; |
2018 /** Relative amount of time the average shard spent reading input. */ | 2026 /** Relative amount of time the average shard spent reading input. */ |
2019 core.double readRatioAvg; | 2027 core.double readRatioAvg; |
2020 /** Relative amount of time the slowest shard spent reading input. */ | 2028 /** Relative amount of time the slowest shard spent reading input. */ |
2021 core.double readRatioMax; | 2029 core.double readRatioMax; |
2022 /** Number of records read into the stage. */ | 2030 /** Number of records read into the stage. */ |
2023 core.String recordsRead; | 2031 core.String recordsRead; |
2024 /** Number of records written by the stage. */ | 2032 /** Number of records written by the stage. */ |
2025 core.String recordsWritten; | 2033 core.String recordsWritten; |
| 2034 /** Total number of bytes written to shuffle. */ |
| 2035 core.String shuffleOutputBytes; |
| 2036 /** Total number of bytes written to shuffle and spilled to disk. */ |
| 2037 core.String shuffleOutputBytesSpilled; |
2026 /** Current status for the stage. */ | 2038 /** Current status for the stage. */ |
2027 core.String status; | 2039 core.String status; |
2028 /** | 2040 /** |
2029 * List of operations within the stage in dependency order (approximately | 2041 * List of operations within the stage in dependency order (approximately |
2030 * chronological). | 2042 * chronological). |
2031 */ | 2043 */ |
2032 core.List<ExplainQueryStep> steps; | 2044 core.List<ExplainQueryStep> steps; |
| 2045 /** Milliseconds the average shard spent waiting to be scheduled. */ |
| 2046 core.String waitMsAvg; |
| 2047 /** Milliseconds the slowest shard spent waiting to be scheduled. */ |
| 2048 core.String waitMsMax; |
2033 /** | 2049 /** |
2034 * Relative amount of time the average shard spent waiting to be scheduled. | 2050 * Relative amount of time the average shard spent waiting to be scheduled. |
2035 */ | 2051 */ |
2036 core.double waitRatioAvg; | 2052 core.double waitRatioAvg; |
2037 /** | 2053 /** |
2038 * Relative amount of time the slowest shard spent waiting to be scheduled. | 2054 * Relative amount of time the slowest shard spent waiting to be scheduled. |
2039 */ | 2055 */ |
2040 core.double waitRatioMax; | 2056 core.double waitRatioMax; |
| 2057 /** Milliseconds the average shard spent on writing output. */ |
| 2058 core.String writeMsAvg; |
| 2059 /** Milliseconds the slowest shard spent on writing output. */ |
| 2060 core.String writeMsMax; |
2041 /** Relative amount of time the average shard spent on writing output. */ | 2061 /** Relative amount of time the average shard spent on writing output. */ |
2042 core.double writeRatioAvg; | 2062 core.double writeRatioAvg; |
2043 /** Relative amount of time the slowest shard spent on writing output. */ | 2063 /** Relative amount of time the slowest shard spent on writing output. */ |
2044 core.double writeRatioMax; | 2064 core.double writeRatioMax; |
2045 | 2065 |
2046 ExplainQueryStage(); | 2066 ExplainQueryStage(); |
2047 | 2067 |
2048 ExplainQueryStage.fromJson(core.Map _json) { | 2068 ExplainQueryStage.fromJson(core.Map _json) { |
| 2069 if (_json.containsKey("computeMsAvg")) { |
| 2070 computeMsAvg = _json["computeMsAvg"]; |
| 2071 } |
| 2072 if (_json.containsKey("computeMsMax")) { |
| 2073 computeMsMax = _json["computeMsMax"]; |
| 2074 } |
2049 if (_json.containsKey("computeRatioAvg")) { | 2075 if (_json.containsKey("computeRatioAvg")) { |
2050 computeRatioAvg = _json["computeRatioAvg"]; | 2076 computeRatioAvg = _json["computeRatioAvg"]; |
2051 } | 2077 } |
2052 if (_json.containsKey("computeRatioMax")) { | 2078 if (_json.containsKey("computeRatioMax")) { |
2053 computeRatioMax = _json["computeRatioMax"]; | 2079 computeRatioMax = _json["computeRatioMax"]; |
2054 } | 2080 } |
2055 if (_json.containsKey("id")) { | 2081 if (_json.containsKey("id")) { |
2056 id = _json["id"]; | 2082 id = _json["id"]; |
2057 } | 2083 } |
2058 if (_json.containsKey("name")) { | 2084 if (_json.containsKey("name")) { |
2059 name = _json["name"]; | 2085 name = _json["name"]; |
2060 } | 2086 } |
| 2087 if (_json.containsKey("readMsAvg")) { |
| 2088 readMsAvg = _json["readMsAvg"]; |
| 2089 } |
| 2090 if (_json.containsKey("readMsMax")) { |
| 2091 readMsMax = _json["readMsMax"]; |
| 2092 } |
2061 if (_json.containsKey("readRatioAvg")) { | 2093 if (_json.containsKey("readRatioAvg")) { |
2062 readRatioAvg = _json["readRatioAvg"]; | 2094 readRatioAvg = _json["readRatioAvg"]; |
2063 } | 2095 } |
2064 if (_json.containsKey("readRatioMax")) { | 2096 if (_json.containsKey("readRatioMax")) { |
2065 readRatioMax = _json["readRatioMax"]; | 2097 readRatioMax = _json["readRatioMax"]; |
2066 } | 2098 } |
2067 if (_json.containsKey("recordsRead")) { | 2099 if (_json.containsKey("recordsRead")) { |
2068 recordsRead = _json["recordsRead"]; | 2100 recordsRead = _json["recordsRead"]; |
2069 } | 2101 } |
2070 if (_json.containsKey("recordsWritten")) { | 2102 if (_json.containsKey("recordsWritten")) { |
2071 recordsWritten = _json["recordsWritten"]; | 2103 recordsWritten = _json["recordsWritten"]; |
2072 } | 2104 } |
| 2105 if (_json.containsKey("shuffleOutputBytes")) { |
| 2106 shuffleOutputBytes = _json["shuffleOutputBytes"]; |
| 2107 } |
| 2108 if (_json.containsKey("shuffleOutputBytesSpilled")) { |
| 2109 shuffleOutputBytesSpilled = _json["shuffleOutputBytesSpilled"]; |
| 2110 } |
2073 if (_json.containsKey("status")) { | 2111 if (_json.containsKey("status")) { |
2074 status = _json["status"]; | 2112 status = _json["status"]; |
2075 } | 2113 } |
2076 if (_json.containsKey("steps")) { | 2114 if (_json.containsKey("steps")) { |
2077 steps = _json["steps"].map((value) => new ExplainQueryStep.fromJson(value)
).toList(); | 2115 steps = _json["steps"].map((value) => new ExplainQueryStep.fromJson(value)
).toList(); |
2078 } | 2116 } |
| 2117 if (_json.containsKey("waitMsAvg")) { |
| 2118 waitMsAvg = _json["waitMsAvg"]; |
| 2119 } |
| 2120 if (_json.containsKey("waitMsMax")) { |
| 2121 waitMsMax = _json["waitMsMax"]; |
| 2122 } |
2079 if (_json.containsKey("waitRatioAvg")) { | 2123 if (_json.containsKey("waitRatioAvg")) { |
2080 waitRatioAvg = _json["waitRatioAvg"]; | 2124 waitRatioAvg = _json["waitRatioAvg"]; |
2081 } | 2125 } |
2082 if (_json.containsKey("waitRatioMax")) { | 2126 if (_json.containsKey("waitRatioMax")) { |
2083 waitRatioMax = _json["waitRatioMax"]; | 2127 waitRatioMax = _json["waitRatioMax"]; |
2084 } | 2128 } |
| 2129 if (_json.containsKey("writeMsAvg")) { |
| 2130 writeMsAvg = _json["writeMsAvg"]; |
| 2131 } |
| 2132 if (_json.containsKey("writeMsMax")) { |
| 2133 writeMsMax = _json["writeMsMax"]; |
| 2134 } |
2085 if (_json.containsKey("writeRatioAvg")) { | 2135 if (_json.containsKey("writeRatioAvg")) { |
2086 writeRatioAvg = _json["writeRatioAvg"]; | 2136 writeRatioAvg = _json["writeRatioAvg"]; |
2087 } | 2137 } |
2088 if (_json.containsKey("writeRatioMax")) { | 2138 if (_json.containsKey("writeRatioMax")) { |
2089 writeRatioMax = _json["writeRatioMax"]; | 2139 writeRatioMax = _json["writeRatioMax"]; |
2090 } | 2140 } |
2091 } | 2141 } |
2092 | 2142 |
2093 core.Map<core.String, core.Object> toJson() { | 2143 core.Map<core.String, core.Object> toJson() { |
2094 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 2144 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
| 2145 if (computeMsAvg != null) { |
| 2146 _json["computeMsAvg"] = computeMsAvg; |
| 2147 } |
| 2148 if (computeMsMax != null) { |
| 2149 _json["computeMsMax"] = computeMsMax; |
| 2150 } |
2095 if (computeRatioAvg != null) { | 2151 if (computeRatioAvg != null) { |
2096 _json["computeRatioAvg"] = computeRatioAvg; | 2152 _json["computeRatioAvg"] = computeRatioAvg; |
2097 } | 2153 } |
2098 if (computeRatioMax != null) { | 2154 if (computeRatioMax != null) { |
2099 _json["computeRatioMax"] = computeRatioMax; | 2155 _json["computeRatioMax"] = computeRatioMax; |
2100 } | 2156 } |
2101 if (id != null) { | 2157 if (id != null) { |
2102 _json["id"] = id; | 2158 _json["id"] = id; |
2103 } | 2159 } |
2104 if (name != null) { | 2160 if (name != null) { |
2105 _json["name"] = name; | 2161 _json["name"] = name; |
2106 } | 2162 } |
| 2163 if (readMsAvg != null) { |
| 2164 _json["readMsAvg"] = readMsAvg; |
| 2165 } |
| 2166 if (readMsMax != null) { |
| 2167 _json["readMsMax"] = readMsMax; |
| 2168 } |
2107 if (readRatioAvg != null) { | 2169 if (readRatioAvg != null) { |
2108 _json["readRatioAvg"] = readRatioAvg; | 2170 _json["readRatioAvg"] = readRatioAvg; |
2109 } | 2171 } |
2110 if (readRatioMax != null) { | 2172 if (readRatioMax != null) { |
2111 _json["readRatioMax"] = readRatioMax; | 2173 _json["readRatioMax"] = readRatioMax; |
2112 } | 2174 } |
2113 if (recordsRead != null) { | 2175 if (recordsRead != null) { |
2114 _json["recordsRead"] = recordsRead; | 2176 _json["recordsRead"] = recordsRead; |
2115 } | 2177 } |
2116 if (recordsWritten != null) { | 2178 if (recordsWritten != null) { |
2117 _json["recordsWritten"] = recordsWritten; | 2179 _json["recordsWritten"] = recordsWritten; |
2118 } | 2180 } |
| 2181 if (shuffleOutputBytes != null) { |
| 2182 _json["shuffleOutputBytes"] = shuffleOutputBytes; |
| 2183 } |
| 2184 if (shuffleOutputBytesSpilled != null) { |
| 2185 _json["shuffleOutputBytesSpilled"] = shuffleOutputBytesSpilled; |
| 2186 } |
2119 if (status != null) { | 2187 if (status != null) { |
2120 _json["status"] = status; | 2188 _json["status"] = status; |
2121 } | 2189 } |
2122 if (steps != null) { | 2190 if (steps != null) { |
2123 _json["steps"] = steps.map((value) => (value).toJson()).toList(); | 2191 _json["steps"] = steps.map((value) => (value).toJson()).toList(); |
2124 } | 2192 } |
| 2193 if (waitMsAvg != null) { |
| 2194 _json["waitMsAvg"] = waitMsAvg; |
| 2195 } |
| 2196 if (waitMsMax != null) { |
| 2197 _json["waitMsMax"] = waitMsMax; |
| 2198 } |
2125 if (waitRatioAvg != null) { | 2199 if (waitRatioAvg != null) { |
2126 _json["waitRatioAvg"] = waitRatioAvg; | 2200 _json["waitRatioAvg"] = waitRatioAvg; |
2127 } | 2201 } |
2128 if (waitRatioMax != null) { | 2202 if (waitRatioMax != null) { |
2129 _json["waitRatioMax"] = waitRatioMax; | 2203 _json["waitRatioMax"] = waitRatioMax; |
2130 } | 2204 } |
| 2205 if (writeMsAvg != null) { |
| 2206 _json["writeMsAvg"] = writeMsAvg; |
| 2207 } |
| 2208 if (writeMsMax != null) { |
| 2209 _json["writeMsMax"] = writeMsMax; |
| 2210 } |
2131 if (writeRatioAvg != null) { | 2211 if (writeRatioAvg != null) { |
2132 _json["writeRatioAvg"] = writeRatioAvg; | 2212 _json["writeRatioAvg"] = writeRatioAvg; |
2133 } | 2213 } |
2134 if (writeRatioMax != null) { | 2214 if (writeRatioMax != null) { |
2135 _json["writeRatioMax"] = writeRatioMax; | 2215 _json["writeRatioMax"] = writeRatioMax; |
2136 } | 2216 } |
2137 return _json; | 2217 return _json; |
2138 } | 2218 } |
2139 } | 2219 } |
2140 | 2220 |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2219 * Bigtable, specify "BIGTABLE". | 2299 * Bigtable, specify "BIGTABLE". |
2220 */ | 2300 */ |
2221 core.String sourceFormat; | 2301 core.String sourceFormat; |
2222 /** | 2302 /** |
2223 * [Required] The fully-qualified URIs that point to your data in Google | 2303 * [Required] The fully-qualified URIs that point to your data in Google |
2224 * Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard | 2304 * Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard |
2225 * character and it must come after the 'bucket' name. Size limits related to | 2305 * character and it must come after the 'bucket' name. Size limits related to |
2226 * load jobs apply to external data sources. For Google Cloud Bigtable URIs: | 2306 * load jobs apply to external data sources. For Google Cloud Bigtable URIs: |
2227 * Exactly one URI can be specified and it has be a fully specified and valid | 2307 * Exactly one URI can be specified and it has be a fully specified and valid |
2228 * HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore | 2308 * HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore |
2229 * backups, exactly one URI can be specified, and it must end with | 2309 * backups, exactly one URI can be specified. Also, the '*' wildcard character |
2230 * '.backup_info'. Also, the '*' wildcard character is not allowed. | 2310 * is not allowed. |
2231 */ | 2311 */ |
2232 core.List<core.String> sourceUris; | 2312 core.List<core.String> sourceUris; |
2233 | 2313 |
2234 ExternalDataConfiguration(); | 2314 ExternalDataConfiguration(); |
2235 | 2315 |
2236 ExternalDataConfiguration.fromJson(core.Map _json) { | 2316 ExternalDataConfiguration.fromJson(core.Map _json) { |
2237 if (_json.containsKey("autodetect")) { | 2317 if (_json.containsKey("autodetect")) { |
2238 autodetect = _json["autodetect"]; | 2318 autodetect = _json["autodetect"]; |
2239 } | 2319 } |
2240 if (_json.containsKey("bigtableOptions")) { | 2320 if (_json.containsKey("bigtableOptions")) { |
(...skipping 616 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2857 /** | 2937 /** |
2858 * [Deprecated] The inline schema. For CSV schemas, specify as | 2938 * [Deprecated] The inline schema. For CSV schemas, specify as |
2859 * "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, | 2939 * "Field1:Type1[,Field2:Type2]*". For example, "foo:STRING, bar:INTEGER, |
2860 * baz:FLOAT". | 2940 * baz:FLOAT". |
2861 */ | 2941 */ |
2862 core.String schemaInline; | 2942 core.String schemaInline; |
2863 /** [Deprecated] The format of the schemaInline property. */ | 2943 /** [Deprecated] The format of the schemaInline property. */ |
2864 core.String schemaInlineFormat; | 2944 core.String schemaInlineFormat; |
2865 /** | 2945 /** |
2866 * [Experimental] Allows the schema of the desitination table to be updated as | 2946 * [Experimental] Allows the schema of the desitination table to be updated as |
2867 * a side effect of the load job. Schema update options are supported in two | 2947 * a side effect of the load job if a schema is autodetected or supplied in |
2868 * cases: when writeDisposition is WRITE_APPEND; when writeDisposition is | 2948 * the job configuration. Schema update options are supported in two cases: |
| 2949 * when writeDisposition is WRITE_APPEND; when writeDisposition is |
2869 * WRITE_TRUNCATE and the destination table is a partition of a table, | 2950 * WRITE_TRUNCATE and the destination table is a partition of a table, |
2870 * specified by partition decorators. For normal tables, WRITE_TRUNCATE will | 2951 * specified by partition decorators. For normal tables, WRITE_TRUNCATE will |
2871 * always overwrite the schema. One or more of the following values are | 2952 * always overwrite the schema. One or more of the following values are |
2872 * specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the | 2953 * specified: ALLOW_FIELD_ADDITION: allow adding a nullable field to the |
2873 * schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the | 2954 * schema. ALLOW_FIELD_RELAXATION: allow relaxing a required field in the |
2874 * original schema to nullable. | 2955 * original schema to nullable. |
2875 */ | 2956 */ |
2876 core.List<core.String> schemaUpdateOptions; | 2957 core.List<core.String> schemaUpdateOptions; |
2877 /** | 2958 /** |
2878 * [Optional] The number of rows at the top of a CSV file that BigQuery will | 2959 * [Optional] The number of rows at the top of a CSV file that BigQuery will |
2879 * skip when loading the data. The default value is 0. This property is useful | 2960 * skip when loading the data. The default value is 0. This property is useful |
2880 * if you have header rows in the file that should be skipped. | 2961 * if you have header rows in the file that should be skipped. |
2881 */ | 2962 */ |
2882 core.int skipLeadingRows; | 2963 core.int skipLeadingRows; |
2883 /** | 2964 /** |
2884 * [Optional] The format of the data files. For CSV files, specify "CSV". For | 2965 * [Optional] The format of the data files. For CSV files, specify "CSV". For |
2885 * datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, | 2966 * datastore backups, specify "DATASTORE_BACKUP". For newline-delimited JSON, |
2886 * specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". The default | 2967 * specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". The default |
2887 * value is CSV. | 2968 * value is CSV. |
2888 */ | 2969 */ |
2889 core.String sourceFormat; | 2970 core.String sourceFormat; |
2890 /** | 2971 /** |
2891 * [Required] The fully-qualified URIs that point to your data in Google | 2972 * [Required] The fully-qualified URIs that point to your data in Google |
2892 * Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard | 2973 * Cloud. For Google Cloud Storage URIs: Each URI can contain one '*' wildcard |
2893 * character and it must come after the 'bucket' name. Size limits related to | 2974 * character and it must come after the 'bucket' name. Size limits related to |
2894 * load jobs apply to external data sources. For Google Cloud Bigtable URIs: | 2975 * load jobs apply to external data sources. For Google Cloud Bigtable URIs: |
2895 * Exactly one URI can be specified and it has be a fully specified and valid | 2976 * Exactly one URI can be specified and it has be a fully specified and valid |
2896 * HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore | 2977 * HTTPS URL for a Google Cloud Bigtable table. For Google Cloud Datastore |
2897 * backups: Exactly one URI can be specified, and it must end with | 2978 * backups: Exactly one URI can be specified. Also, the '*' wildcard character |
2898 * '.backup_info'. Also, the '*' wildcard character is not allowed. | 2979 * is not allowed. |
2899 */ | 2980 */ |
2900 core.List<core.String> sourceUris; | 2981 core.List<core.String> sourceUris; |
2901 /** | 2982 /** |
| 2983 * [Experimental] If specified, configures time-based partitioning for the |
| 2984 * destination table. |
| 2985 */ |
| 2986 TimePartitioning timePartitioning; |
| 2987 /** |
2902 * [Optional] Specifies the action that occurs if the destination table | 2988 * [Optional] Specifies the action that occurs if the destination table |
2903 * already exists. The following values are supported: WRITE_TRUNCATE: If the | 2989 * already exists. The following values are supported: WRITE_TRUNCATE: If the |
2904 * table already exists, BigQuery overwrites the table data. WRITE_APPEND: If | 2990 * table already exists, BigQuery overwrites the table data. WRITE_APPEND: If |
2905 * the table already exists, BigQuery appends the data to the table. | 2991 * the table already exists, BigQuery appends the data to the table. |
2906 * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' | 2992 * WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' |
2907 * error is returned in the job result. The default value is WRITE_APPEND. | 2993 * error is returned in the job result. The default value is WRITE_APPEND. |
2908 * Each action is atomic and only occurs if BigQuery is able to complete the | 2994 * Each action is atomic and only occurs if BigQuery is able to complete the |
2909 * job successfully. Creation, truncation and append actions occur as one | 2995 * job successfully. Creation, truncation and append actions occur as one |
2910 * atomic update upon job completion. | 2996 * atomic update upon job completion. |
2911 */ | 2997 */ |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2964 } | 3050 } |
2965 if (_json.containsKey("skipLeadingRows")) { | 3051 if (_json.containsKey("skipLeadingRows")) { |
2966 skipLeadingRows = _json["skipLeadingRows"]; | 3052 skipLeadingRows = _json["skipLeadingRows"]; |
2967 } | 3053 } |
2968 if (_json.containsKey("sourceFormat")) { | 3054 if (_json.containsKey("sourceFormat")) { |
2969 sourceFormat = _json["sourceFormat"]; | 3055 sourceFormat = _json["sourceFormat"]; |
2970 } | 3056 } |
2971 if (_json.containsKey("sourceUris")) { | 3057 if (_json.containsKey("sourceUris")) { |
2972 sourceUris = _json["sourceUris"]; | 3058 sourceUris = _json["sourceUris"]; |
2973 } | 3059 } |
| 3060 if (_json.containsKey("timePartitioning")) { |
| 3061 timePartitioning = new TimePartitioning.fromJson(_json["timePartitioning"]
); |
| 3062 } |
2974 if (_json.containsKey("writeDisposition")) { | 3063 if (_json.containsKey("writeDisposition")) { |
2975 writeDisposition = _json["writeDisposition"]; | 3064 writeDisposition = _json["writeDisposition"]; |
2976 } | 3065 } |
2977 } | 3066 } |
2978 | 3067 |
2979 core.Map<core.String, core.Object> toJson() { | 3068 core.Map<core.String, core.Object> toJson() { |
2980 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); | 3069 final core.Map<core.String, core.Object> _json = new core.Map<core.String, c
ore.Object>(); |
2981 if (allowJaggedRows != null) { | 3070 if (allowJaggedRows != null) { |
2982 _json["allowJaggedRows"] = allowJaggedRows; | 3071 _json["allowJaggedRows"] = allowJaggedRows; |
2983 } | 3072 } |
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3028 } | 3117 } |
3029 if (skipLeadingRows != null) { | 3118 if (skipLeadingRows != null) { |
3030 _json["skipLeadingRows"] = skipLeadingRows; | 3119 _json["skipLeadingRows"] = skipLeadingRows; |
3031 } | 3120 } |
3032 if (sourceFormat != null) { | 3121 if (sourceFormat != null) { |
3033 _json["sourceFormat"] = sourceFormat; | 3122 _json["sourceFormat"] = sourceFormat; |
3034 } | 3123 } |
3035 if (sourceUris != null) { | 3124 if (sourceUris != null) { |
3036 _json["sourceUris"] = sourceUris; | 3125 _json["sourceUris"] = sourceUris; |
3037 } | 3126 } |
| 3127 if (timePartitioning != null) { |
| 3128 _json["timePartitioning"] = (timePartitioning).toJson(); |
| 3129 } |
3038 if (writeDisposition != null) { | 3130 if (writeDisposition != null) { |
3039 _json["writeDisposition"] = writeDisposition; | 3131 _json["writeDisposition"] = writeDisposition; |
3040 } | 3132 } |
3041 return _json; | 3133 return _json; |
3042 } | 3134 } |
3043 } | 3135 } |
3044 | 3136 |
3045 class JobConfigurationQuery { | 3137 class JobConfigurationQuery { |
3046 /** | 3138 /** |
3047 * [Optional] If true and query uses legacy SQL dialect, allows the query to | 3139 * [Optional] If true and query uses legacy SQL dialect, allows the query to |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3124 */ | 3216 */ |
3125 core.List<core.String> schemaUpdateOptions; | 3217 core.List<core.String> schemaUpdateOptions; |
3126 /** | 3218 /** |
3127 * [Optional] If querying an external data source outside of BigQuery, | 3219 * [Optional] If querying an external data source outside of BigQuery, |
3128 * describes the data format, location and other properties of the data | 3220 * describes the data format, location and other properties of the data |
3129 * source. By defining these properties, the data source can then be queried | 3221 * source. By defining these properties, the data source can then be queried |
3130 * as if it were a standard BigQuery table. | 3222 * as if it were a standard BigQuery table. |
3131 */ | 3223 */ |
3132 core.Map<core.String, ExternalDataConfiguration> tableDefinitions; | 3224 core.Map<core.String, ExternalDataConfiguration> tableDefinitions; |
3133 /** | 3225 /** |
| 3226 * [Experimental] If specified, configures time-based partitioning for the |
| 3227 * destination table. |
| 3228 */ |
| 3229 TimePartitioning timePartitioning; |
| 3230 /** |
3134 * Specifies whether to use BigQuery's legacy SQL dialect for this query. The | 3231 * Specifies whether to use BigQuery's legacy SQL dialect for this query. The |
3135 * default value is true. If set to false, the query will use BigQuery's | 3232 * default value is true. If set to false, the query will use BigQuery's |
3136 * standard SQL: https://cloud.google.com/bigquery/sql-reference/ When | 3233 * standard SQL: https://cloud.google.com/bigquery/sql-reference/ When |
3137 * useLegacySql is set to false, the value of flattenResults is ignored; query | 3234 * useLegacySql is set to false, the value of flattenResults is ignored; query |
3138 * will be run as if flattenResults is false. | 3235 * will be run as if flattenResults is false. |
3139 */ | 3236 */ |
3140 core.bool useLegacySql; | 3237 core.bool useLegacySql; |
3141 /** | 3238 /** |
3142 * [Optional] Whether to look for the result in the query cache. The query | 3239 * [Optional] Whether to look for the result in the query cache. The query |
3143 * cache is a best-effort cache that will be flushed whenever tables in the | 3240 * cache is a best-effort cache that will be flushed whenever tables in the |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3200 } | 3297 } |
3201 if (_json.containsKey("queryParameters")) { | 3298 if (_json.containsKey("queryParameters")) { |
3202 queryParameters = _json["queryParameters"].map((value) => new QueryParamet
er.fromJson(value)).toList(); | 3299 queryParameters = _json["queryParameters"].map((value) => new QueryParamet
er.fromJson(value)).toList(); |
3203 } | 3300 } |
3204 if (_json.containsKey("schemaUpdateOptions")) { | 3301 if (_json.containsKey("schemaUpdateOptions")) { |
3205 schemaUpdateOptions = _json["schemaUpdateOptions"]; | 3302 schemaUpdateOptions = _json["schemaUpdateOptions"]; |
3206 } | 3303 } |
3207 if (_json.containsKey("tableDefinitions")) { | 3304 if (_json.containsKey("tableDefinitions")) { |
3208 tableDefinitions = commons.mapMap<core.Map<core.String, core.Object>, Exte
rnalDataConfiguration>(_json["tableDefinitions"], (core.Map<core.String, core.Ob
ject> item) => new ExternalDataConfiguration.fromJson(item)); | 3305 tableDefinitions = commons.mapMap<core.Map<core.String, core.Object>, Exte
rnalDataConfiguration>(_json["tableDefinitions"], (core.Map<core.String, core.Ob
ject> item) => new ExternalDataConfiguration.fromJson(item)); |
3209 } | 3306 } |
| 3307 if (_json.containsKey("timePartitioning")) { |
| 3308 timePartitioning = new TimePartitioning.fromJson(_json["timePartitioning"]
); |
| 3309 } |
3210 if (_json.containsKey("useLegacySql")) { | 3310 if (_json.containsKey("useLegacySql")) { |
3211 useLegacySql = _json["useLegacySql"]; | 3311 useLegacySql = _json["useLegacySql"]; |
3212 } | 3312 } |
3213 if (_json.containsKey("useQueryCache")) { | 3313 if (_json.containsKey("useQueryCache")) { |
3214 useQueryCache = _json["useQueryCache"]; | 3314 useQueryCache = _json["useQueryCache"]; |
3215 } | 3315 } |
3216 if (_json.containsKey("userDefinedFunctionResources")) { | 3316 if (_json.containsKey("userDefinedFunctionResources")) { |
3217 userDefinedFunctionResources = _json["userDefinedFunctionResources"].map((
value) => new UserDefinedFunctionResource.fromJson(value)).toList(); | 3317 userDefinedFunctionResources = _json["userDefinedFunctionResources"].map((
value) => new UserDefinedFunctionResource.fromJson(value)).toList(); |
3218 } | 3318 } |
3219 if (_json.containsKey("writeDisposition")) { | 3319 if (_json.containsKey("writeDisposition")) { |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3258 } | 3358 } |
3259 if (queryParameters != null) { | 3359 if (queryParameters != null) { |
3260 _json["queryParameters"] = queryParameters.map((value) => (value).toJson()
).toList(); | 3360 _json["queryParameters"] = queryParameters.map((value) => (value).toJson()
).toList(); |
3261 } | 3361 } |
3262 if (schemaUpdateOptions != null) { | 3362 if (schemaUpdateOptions != null) { |
3263 _json["schemaUpdateOptions"] = schemaUpdateOptions; | 3363 _json["schemaUpdateOptions"] = schemaUpdateOptions; |
3264 } | 3364 } |
3265 if (tableDefinitions != null) { | 3365 if (tableDefinitions != null) { |
3266 _json["tableDefinitions"] = commons.mapMap<ExternalDataConfiguration, core
.Map<core.String, core.Object>>(tableDefinitions, (ExternalDataConfiguration ite
m) => (item).toJson()); | 3366 _json["tableDefinitions"] = commons.mapMap<ExternalDataConfiguration, core
.Map<core.String, core.Object>>(tableDefinitions, (ExternalDataConfiguration ite
m) => (item).toJson()); |
3267 } | 3367 } |
| 3368 if (timePartitioning != null) { |
| 3369 _json["timePartitioning"] = (timePartitioning).toJson(); |
| 3370 } |
3268 if (useLegacySql != null) { | 3371 if (useLegacySql != null) { |
3269 _json["useLegacySql"] = useLegacySql; | 3372 _json["useLegacySql"] = useLegacySql; |
3270 } | 3373 } |
3271 if (useQueryCache != null) { | 3374 if (useQueryCache != null) { |
3272 _json["useQueryCache"] = useQueryCache; | 3375 _json["useQueryCache"] = useQueryCache; |
3273 } | 3376 } |
3274 if (userDefinedFunctionResources != null) { | 3377 if (userDefinedFunctionResources != null) { |
3275 _json["userDefinedFunctionResources"] = userDefinedFunctionResources.map((
value) => (value).toJson()).toList(); | 3378 _json["userDefinedFunctionResources"] = userDefinedFunctionResources.map((
value) => (value).toJson()).toList(); |
3276 } | 3379 } |
3277 if (writeDisposition != null) { | 3380 if (writeDisposition != null) { |
(...skipping 2099 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5377 } | 5480 } |
5378 if (useLegacySql != null) { | 5481 if (useLegacySql != null) { |
5379 _json["useLegacySql"] = useLegacySql; | 5482 _json["useLegacySql"] = useLegacySql; |
5380 } | 5483 } |
5381 if (userDefinedFunctionResources != null) { | 5484 if (userDefinedFunctionResources != null) { |
5382 _json["userDefinedFunctionResources"] = userDefinedFunctionResources.map((
value) => (value).toJson()).toList(); | 5485 _json["userDefinedFunctionResources"] = userDefinedFunctionResources.map((
value) => (value).toJson()).toList(); |
5383 } | 5486 } |
5384 return _json; | 5487 return _json; |
5385 } | 5488 } |
5386 } | 5489 } |
OLD | NEW |