-
Notifications
You must be signed in to change notification settings - Fork 41
/
doc.go
418 lines (298 loc) · 13.2 KB
/
doc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
/*
Package dbsql implements the go driver to Databricks SQL
# Usage
Clients should use the database/sql package in conjunction with the driver:
import (
"database/sql"
_ "github.com/databricks/databricks-sql-go"
)
func main() {
db, err := sql.Open("databricks", "token:<token>@<hostname>:<port>/<endpoint_path>")
if err != nil {
log.Fatal(err)
}
defer db.Close()
}
# Connection via DSN (Data Source Name)
Use sql.Open() to create a database handle via a data source name string:
db, err := sql.Open("databricks", "<dsn_string>")
The DSN format is:
token:[my_token]@[hostname]:[port]/[endpoint http path]?param=value
Supported optional connection parameters can be specified in param=value and include:
- catalog: Sets the initial catalog name in the session
- schema: Sets the initial schema name in the session
- maxRows: Sets up the max rows fetched per request. Default is 100000
- timeout: Adds timeout (in seconds) for the server query execution. Default is no timeout
- userAgentEntry: Used to identify partners. Set as a string with format <isv-name+product-name>
- useCloudFetch: Used to enable cloud fetch for the query execution. Default is false
- maxDownloadThreads: Sets up the max number of concurrent workers for cloud fetch. Default is 10
- authType: Specifies the desired authentication type. Valid values are: Pat, OauthM2M, OauthU2M
- accessToken: Personal access token. Required if authType set to Pat
- clientID: Specifies the client ID to use with OauthM2M
- clientSecret: Specifies the client secret to use with OauthM2M
Supported optional session parameters can be specified in param=value and include:
- ansi_mode: (Boolean string). Session statements will adhere to rules defined by ANSI SQL specification.
- timezone: (e.g. "America/Los_Angeles"). Sets the timezone of the session
# Connection via new connector object
Use sql.OpenDB() to create a database handle via a new connector object created with dbsql.NewConnector():
import (
"database/sql"
dbsql "github.com/databricks/databricks-sql-go"
)
func main() {
connector, err := dbsql.NewConnector(
dbsql.WithServerHostname(<hostname>),
dbsql.WithPort(<port>),
dbsql.WithHTTPPath(<http_path>),
dbsql.WithAccessToken(<my_token>)
)
if err != nil {
log.Fatal(err)
}
db := sql.OpenDB(connector)
defer db.Close()
...
}
Supported functional options include:
- WithServerHostname(<hostname> string): Sets up the server hostname. The hostname can be prefixed with "http:" or "https:" to specify a protocol to use. Mandatory
- WithPort(<port> int): Sets up the server port. Mandatory
- WithAccessToken(<my_token> string): Sets up the Personal Access Token. Mandatory
- WithHTTPPath(<http_path> string): Sets up the endpoint to the warehouse. Mandatory
- WithInitialNamespace(<catalog> string, <schema> string): Sets up the catalog and schema name in the session. Optional
- WithMaxRows(<max_rows> int): Sets up the max rows fetched per request. Default is 100000. Optional
- WithSessionParams(<params_map> map[string]string): Sets up session parameters including "timezone" and "ansi_mode". Optional
- WithTimeout(<timeout> Duration). Adds timeout (in time.Duration) for the server query execution. Default is no timeout. Optional
- WithUserAgentEntry(<isv-name+product-name> string). Used to identify partners. Optional
- WithCloudFetch (bool). Used to enable cloud fetch for the query execution. Default is false. Optional
- WithMaxDownloadThreads (<num_threads> int). Sets up the max number of concurrent workers for cloud fetch. Default is 10. Optional
- WithAuthenticator (<authenticator> auth.Authenticator). Sets up authentication. Required if neither access token or client credentials are provided.
- WithClientCredentials(<clientID> string, <clientSecret> string). Sets up Oauth M2M authentication.
# Query cancellation and timeout
Cancelling a query via context cancellation or timeout is supported.
// Set up context timeout
ctx, cancel := context.WithTimeout(context.Background(), 30 * time.Second)
defer cancel()
// Execute query. Query will be cancelled after 30 seconds if still running
res, err := db.ExecContext(ctx, "CREATE TABLE example(id int, message string)")
# CorrelationId and ConnId
Use the driverctx package under driverctx/ctx.go to add CorrelationId and ConnId to the context.
CorrelationId and ConnId makes it convenient to parse and create metrics in logging.
**Connection Id**
Internal id to track what happens under a connection. Connections can be reused so this would track across queries.
**Query Id**
Internal id to track what happens under a query. Useful because the same query can be used with multiple connections.
**Correlation Id**
External id, such as request ID, to track what happens under a request. Useful to track multiple connections in the same request.
ctx := dbsqlctx.NewContextWithCorrelationId(context.Background(), "workflow-example")
# Logging
Use the logger package under logger.go to set up logging (from zerolog).
By default, logging level is `warn`. If you want to disable logging, use `disabled`.
The user can also utilize Track() and Duration() to custom log the elapsed time of anything tracked.
import (
dbsqllog "github.com/databricks/databricks-sql-go/logger"
dbsqlctx "github.com/databricks/databricks-sql-go/driverctx"
)
func main() {
// Optional. Set the logging level with SetLogLevel()
if err := dbsqllog.SetLogLevel("debug"); err != nil {
log.Fatal(err)
}
// Optional. Set logging output with SetLogOutput()
// Default is os.Stderr. If running in terminal, logger will use ConsoleWriter to prettify logs
dbsqllog.SetLogOutput(os.Stdout)
// Optional. Set correlation id with NewContextWithCorrelationId
ctx := dbsqlctx.NewContextWithCorrelationId(context.Background(), "workflow-example")
// Optional. Track time spent and log elapsed time
msg, start := logger.Track("Run Main")
defer log.Duration(msg, start)
db, err := sql.Open("databricks", "<dsn_string>")
...
}
The result log may look like this:
{"level":"debug","connId":"01ed6545-5669-1ec7-8c7e-6d8a1ea0ab16","corrId":"workflow-example","queryId":"01ed6545-57cc-188a-bfc5-d9c0eaf8e189","time":1668558402,"message":"Run Main elapsed time: 1.298712292s"}
# Programmatically Retrieving Connection and Query Id
Use the driverctx package under driverctx/ctx.go to add callbacks to the query context to receive the connection id and query id.
import (
"github.com/databricks/databricks-sql-go/driverctx"
)
func main() {
...
qidCallback := func(id string) {
fmt.Println("query id: " + id)
}
connIdCallback := func(id string) {
fmt.Println("connection id: " + id)
}
ctx := context.Background()
ctx = driverctx.NewContextWithQueryIdCallback(ctx, qidCallback)
ctx = driverctx.NewContextWithConnIdCallback(ctx, connIdCallback)
rows, err1 := db.QueryContext(ctx, `select * from sometable`)
...
}
# Query parameters
Passing parameters to a query is supported when run against servers with version DBR 14.1.
// Named parameters:
p := dbsql.Parameter{Name: "p_bool", Value: true},
rows, err := db.QueryContext(ctx, `select * from sometable where condition=:p_bool`,dbsql.Parameter{Name: "p_bool", Value: true})
// Positional parameters - both `dbsql.Parameter` and plain values can be used:
rows, err := db.Query(`select *, ? from sometable where field=?`,dbsql.Parameter{Value: "123.456"}, "another parameter")
For complex types, you can specify the SQL type using the dbsql.Parameter type field. If this field is set, the value field MUST be set to a string.
Please note that named and positional parameters cannot be used together in the single query.
# Staging Ingestion
The Go driver now supports staging operations. In order to use a staging operation, you first must update the context with a list of folders that you are allowing the driver to access.
ctx := driverctx.NewContextWithStagingInfo(context.Background(), []string{"staging/"})
After doing so, you can execute staging operations using this context using the exec context.
_, err1 := db.ExecContext(ctx, `PUT 'staging/file.csv' INTO '/Volumes/main/staging_test/e2etests/file.csv' OVERWRITE`)
# Errors
There are three error types exposed via dbsql/errors
DBDriverError - An error in the go driver. Example: unimplemented functionality, invalid driver state, errors processing a server response, etc.
DBRequestError - An error that is caused by an invalid request. Example: permission denied, invalid http path or other connection parameter, resource not available, etc.
DBExecutionError - Any error that occurs after the SQL statement has been accepted such as a SQL syntax error, missing table, etc.
Each type has a corresponding sentinel value which can be used with errors.Is() to determine if one of the types is present in an error chain.
DriverError
RequestError
ExecutionError
Example usage:
import (
fmt
errors
dbsqlerr "github.com/databricks/databricks-sql-go/errors"
)
func main() {
...
_, err := db.ExecContext(ogCtx, `Select id from range(100)`)
if err != nil {
if errors.Is(err, dbsqlerr.ExecutionError) {
var execErr dbsqlerr.DBExecutionError
if ok := errors.As(err, &execError); ok {
fmt.Printf("%s, corrId: %s, connId: %s, queryId: %s, sqlState: %s, isRetryable: %t, retryAfter: %f seconds",
execErr.Error(),
execErr.CorrelationId(),
execErr.ConnectionId(),
execErr.QueryId(),
execErr.SqlState(),
execErr.IsRetryable(),
execErr.RetryAfter().Seconds(),
)
}
}
...
}
...
}
See the documentation for dbsql/errors for more information.
# Retrieving Arrow Batches
The driver supports the ability to retrieve Apache Arrow record batches.
To work with record batches it is necessary to use sql.Conn.Raw() to access the underlying driver connection to retrieve a driver.Rows instance.
The driver exposes two public interfaces for working with record batches from the rows sub-package:
type Rows interface {
GetArrowBatches(context.Context) (ArrowBatchIterator, error)
}
type ArrowBatchIterator interface {
// Retrieve the next arrow.Record.
// Will return io.EOF if there are no more records
Next() (arrow.Record, error)
// Return true if the iterator contains more batches, false otherwise.
HasNext() bool
// Release any resources in use by the iterator.
Close()
}
The driver.Rows instance retrieved using Conn.Raw() can be converted to a Databricks Rows instance via a type assertion, then use GetArrowBatches() to retrieve a batch iterator.
If the ArrowBatchIterator is not closed it will leak resources, such as the underlying connection.
Calling code must call Release() on records returned by DBSQLArrowBatchIterator.Next().
Example usage:
import (
...
dbsqlrows "github.com/databricks/databricks-sql-go/rows"
)
func main() {
...
db := sql.OpenDB(connector)
defer db.Close()
conn, _ := db.Conn(context.BackGround())
defer conn.Close()
query := `select * from main.default.taxi_trip_data`
var rows driver.Rows
var err error
err = conn.Raw(func(d interface{}) error {
rows, err = d.(driver.QueryerContext).QueryContext(ctx, query, nil)
return err
})
if err != nil {
log.Fatalf("unable to run the query. err: %v", err)
}
defer rows.Close()
batches, err := rows.(dbsqlrows.Rows).GetArrowBatches(context.BackGround())
if err != nil {
log.Fatalf("unable to get arrow batches. err: %v", err)
}
var iBatch, nRows int
for batches.HasNext() {
b, err := batches.Next()
if err != nil {
log.Fatalf("Failure retrieving batch. err: %v", err)
}
log.Printf("batch %v: nRecords=%v\n", iBatch, b.NumRows())
iBatch += 1
nRows += int(b.NumRows())
b.Release()
}
log.Printf("NRows: %v\n", nRows)
}
# Supported Data Types
==================================
Databricks Type --> Golang Type
==================================
BOOLEAN --> bool
TINYINT --> int8
SMALLINT --> int16
INT --> int32
BIGINT --> int64
FLOAT --> float32
DOUBLE --> float64
VOID --> nil
STRING --> string
DATE --> time.Time
TIMESTAMP --> time.Time
DECIMAL(p,s) --> sql.RawBytes
BINARY --> sql.RawBytes
ARRAY<elementType> --> sql.RawBytes
STRUCT --> sql.RawBytes
MAP<keyType, valueType> --> sql.RawBytes
INTERVAL (year-month) --> string
INTERVAL (day-time) --> string
For ARRAY, STRUCT, and MAP types, sql.Scan can cast sql.RawBytes to JSON string, which can be unmarshalled to Golang
arrays, maps, and structs. For example:
type structVal struct {
StringField string `json:"string_field"`
ArrayField []int `json:"array_field"`
}
type row struct {
arrayVal []int
mapVal map[string]int
structVal structVal
}
res := []row{}
for rows.Next() {
r := row{}
tempArray := []byte{}
tempStruct := []byte{}
tempMap := []byte{}
if err := rows.Scan(&tempArray, &tempMap, &tempStruct); err != nil {
log.Fatal(err)
}
if err := json.Unmarshal(tempArray, &r.arrayVal); err != nil {
log.Fatal(err)
}
if err := json.Unmarshal(tempMap, &r.mapVal); err != nil {
log.Fatal(err)
}
if err := json.Unmarshal(tempStruct, &r.structVal); err != nil {
log.Fatal(err)
}
res = append(res, r)
}
May generate the following row:
{arrayVal:[1,2,3] mapVal:{"key1":1} structVal:{"string_field":"string_val","array_field":[4,5,6]}}
*/
package dbsql