-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
snippet_metadata.google.cloud.bigquery.storage.v1beta2.json
379 lines (379 loc) · 15.5 KB
/
snippet_metadata.google.cloud.bigquery.storage.v1beta2.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
{
"clientLibrary": {
"name": "cloud.google.com/go/bigquery/storage/apiv1beta2",
"version": "1.55.0",
"language": "GO",
"apis": [
{
"id": "google.cloud.bigquery.storage.v1beta2",
"version": "v1beta2"
}
]
},
"snippets": [
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_CreateReadSession_sync",
"title": "bigquerystorage CreateReadSession Sample",
"description": "CreateReadSession creates a new read session. A read session divides the contents of a\nBigQuery table into one or more streams, which can then be used to read\ndata from the table. The read session also specifies properties of the\ndata to be read, such as a list of columns or a push-down filter describing\nthe rows to be returned.\n\nA particular row can be read by at most one stream. When the caller has\nreached the end of each stream in the session, then all the data in the\ntable has been read.\n\nData is assigned to each stream such that roughly the same number of\nrows can be read from each stream. Because the server-side unit for\nassigning data is collections of rows, the API does not guarantee that\neach stream will return the same number or rows. Additionally, the\nlimits are enforced based on the number of pre-filtered rows, so some\nfilters can lead to lopsided assignments.\n\nRead sessions automatically expire 6 hours after they are created and do\nnot require manual clean-up by the caller.",
"file": "BigQueryReadClient/CreateReadSession/main.go",
"language": "GO",
"clientMethod": {
"shortName": "CreateReadSession",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient.CreateReadSession",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.CreateReadSessionRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.ReadSession",
"client": {
"shortName": "BigQueryReadClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient"
},
"method": {
"shortName": "CreateReadSession",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.CreateReadSession",
"service": {
"shortName": "BigQueryRead",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryRead_SplitReadStream_sync",
"title": "bigquerystorage SplitReadStream Sample",
"description": "SplitReadStream splits a given `ReadStream` into two `ReadStream` objects. These\n`ReadStream` objects are referred to as the primary and the residual\nstreams of the split. The original `ReadStream` can still be read from in\nthe same manner as before. Both of the returned `ReadStream` objects can\nalso be read from, and the rows returned by both child streams will be\nthe same as the rows read from the original stream.\n\nMoreover, the two child streams will be allocated back-to-back in the\noriginal `ReadStream`. Concretely, it is guaranteed that for streams\noriginal, primary, and residual, that original[0-j] = primary[0-j] and\noriginal[j-n] = residual[0-m] once the streams have been read to\ncompletion.",
"file": "BigQueryReadClient/SplitReadStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "SplitReadStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient.SplitReadStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.SplitReadStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.SplitReadStreamResponse",
"client": {
"shortName": "BigQueryReadClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryReadClient"
},
"method": {
"shortName": "SplitReadStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead.SplitReadStream",
"service": {
"shortName": "BigQueryRead",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryRead"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_AppendRows_sync",
"title": "bigquerystorage AppendRows Sample",
"description": "AppendRows appends data to the given stream.\n\nIf `offset` is specified, the `offset` is checked against the end of\nstream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an\nattempt is made to append to an offset beyond the current end of the stream\nor `ALREADY_EXISTS` if user provids an `offset` that has already been\nwritten to. User can retry with adjusted offset within the same RPC\nstream. If `offset` is not specified, append happens at the end of the\nstream.\n\nThe response contains the offset at which the append happened. Responses\nare received in the same order in which requests are sent. There will be\none response for each successful request. If the `offset` is not set in\nresponse, it means append didn't happen due to some errors. If one request\nfails, all the subsequent requests will also fail until a success request\nis made again.\n\nIf the stream is of `PENDING` type, data will only be available for read\noperations after the stream is committed.\n\n\nThis method is not supported for the REST transport.",
"file": "BigQueryWriteClient/AppendRows/main.go",
"language": "GO",
"clientMethod": {
"shortName": "AppendRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.AppendRows",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "storagepb.BigQueryWrite_AppendRowsClient",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "AppendRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.AppendRows",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 69,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_BatchCommitWriteStreams_sync",
"title": "bigquerystorage BatchCommitWriteStreams Sample",
"description": "BatchCommitWriteStreams atomically commits a group of `PENDING` streams that belong to the same\n`parent` table.\nStreams must be finalized before commit and cannot be committed multiple\ntimes. Once a stream is committed, data in the stream becomes available\nfor read operations.",
"file": "BigQueryWriteClient/BatchCommitWriteStreams/main.go",
"language": "GO",
"clientMethod": {
"shortName": "BatchCommitWriteStreams",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.BatchCommitWriteStreams",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.BatchCommitWriteStreamsRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.BatchCommitWriteStreamsResponse",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "BatchCommitWriteStreams",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.BatchCommitWriteStreams",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_CreateWriteStream_sync",
"title": "bigquerystorage CreateWriteStream Sample",
"description": "CreateWriteStream creates a write stream to the given table.\nAdditionally, every table has a special COMMITTED stream named '_default'\nto which data can be written. This stream doesn't need to be created using\nCreateWriteStream. It is a stream that can be used simultaneously by any\nnumber of clients. Data written to this stream is considered committed as\nsoon as an acknowledgement is received.",
"file": "BigQueryWriteClient/CreateWriteStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "CreateWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.CreateWriteStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.CreateWriteStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.WriteStream",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "CreateWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.CreateWriteStream",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FinalizeWriteStream_sync",
"title": "bigquerystorage FinalizeWriteStream Sample",
"description": "FinalizeWriteStream finalize a write stream so that no new data can be appended to the\nstream. Finalize is not supported on the '_default' stream.",
"file": "BigQueryWriteClient/FinalizeWriteStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "FinalizeWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.FinalizeWriteStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.FinalizeWriteStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.FinalizeWriteStreamResponse",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "FinalizeWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FinalizeWriteStream",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_FlushRows_sync",
"title": "bigquerystorage FlushRows Sample",
"description": "FlushRows flushes rows to a BUFFERED stream.\nIf users are appending rows to BUFFERED stream, flush operation is\nrequired in order for the rows to become available for reading. A\nFlush operation flushes up to any previously flushed offset in a BUFFERED\nstream, to the offset specified in the request.\nFlush is not supported on the _default stream, since it is not BUFFERED.",
"file": "BigQueryWriteClient/FlushRows/main.go",
"language": "GO",
"clientMethod": {
"shortName": "FlushRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.FlushRows",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.FlushRowsRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.FlushRowsResponse",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "FlushRows",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.FlushRows",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
},
{
"regionTag": "bigquerystorage_v1beta2_generated_BigQueryWrite_GetWriteStream_sync",
"title": "bigquerystorage GetWriteStream Sample",
"description": "GetWriteStream gets a write stream.",
"file": "BigQueryWriteClient/GetWriteStream/main.go",
"language": "GO",
"clientMethod": {
"shortName": "GetWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient.GetWriteStream",
"parameters": [
{
"type": "context.Context",
"name": "ctx"
},
{
"type": "storagepb.GetWriteStreamRequest",
"name": "req"
},
{
"type": "...gax.CallOption",
"name": "opts"
}
],
"resultType": "*storagepb.WriteStream",
"client": {
"shortName": "BigQueryWriteClient",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWriteClient"
},
"method": {
"shortName": "GetWriteStream",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite.GetWriteStream",
"service": {
"shortName": "BigQueryWrite",
"fullName": "google.cloud.bigquery.storage.v1beta2.BigQueryWrite"
}
}
},
"origin": "API_DEFINITION",
"segments": [
{
"start": 18,
"end": 53,
"type": "FULL"
}
]
}
]
}