forked from apache/cassandra-gocql-driver
-
Notifications
You must be signed in to change notification settings - Fork 1
/
session.go
465 lines (416 loc) · 11.7 KB
/
session.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
// Copyright (c) 2012 The gocql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gocql
import (
"errors"
"fmt"
"io"
"sync"
"time"
)
// Session is the interface used by users to interact with the database.
//
// It's safe for concurrent use by multiple goroutines and a typical usage
// scenario is to have one global session object to interact with the
// whole Cassandra cluster.
//
// This type extends the Node interface by adding a convinient query builder
// and automatically sets a default consinstency level on all operations
// that do not have a consistency level set.
type Session struct {
Node Node
cons Consistency
pageSize int
prefetch float64
trace Tracer
mu sync.RWMutex
cfg ClusterConfig
}
// NewSession wraps an existing Node.
func NewSession(c *clusterImpl) *Session {
return &Session{Node: c, cons: Quorum, prefetch: 0.25, cfg: c.cfg}
}
// SetConsistency sets the default consistency level for this session. This
// setting can also be changed on a per-query basis and the default value
// is Quorum.
func (s *Session) SetConsistency(cons Consistency) {
s.mu.Lock()
s.cons = cons
s.mu.Unlock()
}
// SetPageSize sets the default page size for this session. A value <= 0 will
// disable paging. This setting can also be changed on a per-query basis.
func (s *Session) SetPageSize(n int) {
s.mu.Lock()
s.pageSize = n
s.mu.Unlock()
}
// SetPrefetch sets the default threshold for pre-fetching new pages. If
// there are only p*pageSize rows remaining, the next page will be requested
// automatically. This value can also be changed on a per-query basis and
// the default value is 0.25.
func (s *Session) SetPrefetch(p float64) {
s.mu.Lock()
s.prefetch = p
s.mu.Unlock()
}
// SetTrace sets the default tracer for this session. This setting can also
// be changed on a per-query basis.
func (s *Session) SetTrace(trace Tracer) {
s.mu.Lock()
s.trace = trace
s.mu.Unlock()
}
// Query generates a new query object for interacting with the database.
// Further details of the query may be tweaked using the resulting query
// value before the query is executed.
func (s *Session) Query(stmt string, values ...interface{}) *Query {
s.mu.RLock()
qry := &Query{stmt: stmt, values: values, cons: s.cons,
session: s, pageSize: s.pageSize, trace: s.trace,
prefetch: s.prefetch, rt: s.cfg.RetryPolicy}
s.mu.RUnlock()
return qry
}
// Close closes all connections. The session is unusable after this
// operation.
func (s *Session) Close() {
s.Node.Close()
}
func (s *Session) executeQuery(qry *Query) *Iter {
var itr *Iter
count := 0
for count <= qry.rt.NumRetries {
conn := s.Node.Pick(nil)
//Assign the error unavailable to the iterator
if conn == nil {
itr = &Iter{err: ErrUnavailable}
break
}
itr = conn.executeQuery(qry)
//Exit for loop if the query was successful
if itr.err == nil {
break
}
count++
}
return itr
}
// ExecuteBatch executes a batch operation and returns nil if successful
// otherwise an error is returned describing the failure.
func (s *Session) ExecuteBatch(batch *Batch) error {
// Prevent the execution of the batch if greater than the limit
// Currently batches have a limit of 65536 queries.
// https://datastax-oss.atlassian.net/browse/JAVA-229
if len(batch.Entries) > 65536 {
return ErrTooManyStmts
}
var err error
count := 0
for count <= batch.rt.NumRetries {
conn := s.Node.Pick(nil)
//Assign the error unavailable and break loop
if conn == nil {
err = ErrUnavailable
break
}
err = conn.executeBatch(batch)
//Exit loop if operation executed correctly
if err == nil {
break
}
count++
}
return err
}
// Query represents a CQL statement that can be executed.
type Query struct {
stmt string
values []interface{}
cons Consistency
pageSize int
pageState []byte
prefetch float64
trace Tracer
session *Session
rt RetryPolicy
}
// Consistency sets the consistency level for this query. If no consistency
// level have been set, the default consistency level of the cluster
// is used.
func (q *Query) Consistency(c Consistency) *Query {
q.cons = c
return q
}
// Trace enables tracing of this query. Look at the documentation of the
// Tracer interface to learn more about tracing.
func (q *Query) Trace(trace Tracer) *Query {
q.trace = trace
return q
}
// PageSize will tell the iterator to fetch the result in pages of size n.
// This is useful for iterating over large result sets, but setting the
// page size to low might decrease the performance. This feature is only
// available in Cassandra 2 and onwards.
func (q *Query) PageSize(n int) *Query {
q.pageSize = n
return q
}
// SetPrefetch sets the default threshold for pre-fetching new pages. If
// there are only p*pageSize rows remaining, the next page will be requested
// automatically.
func (q *Query) Prefetch(p float64) *Query {
q.prefetch = p
return q
}
// RetryPolicy sets the policy to use when retrying the query.
func (q *Query) RetryPolicy(r RetryPolicy) *Query {
q.rt = r
return q
}
// Exec executes the query without returning any rows.
func (q *Query) Exec() error {
iter := q.session.executeQuery(q)
return iter.err
}
// Iter executes the query and returns an iterator capable of iterating
// over all results.
func (q *Query) Iter() *Iter {
return q.session.executeQuery(q)
}
// Scan executes the query, copies the columns of the first selected
// row into the values pointed at by dest and discards the rest. If no rows
// were selected, ErrNotFound is returned.
func (q *Query) Scan(dest ...interface{}) error {
iter := q.Iter()
if iter.err != nil {
return iter.err
}
if len(iter.rows) == 0 {
return ErrNotFound
}
iter.Scan(dest...)
return iter.Close()
}
// ScanCAS executes a lightweight transaction (i.e. an UPDATE or INSERT
// statement containing an IF clause). If the transaction fails because
// the existing values did not match, the previos values will be stored
// in dest.
func (q *Query) ScanCAS(dest ...interface{}) (applied bool, err error) {
iter := q.Iter()
if iter.err != nil {
return false, iter.err
}
if len(iter.rows) == 0 {
return false, ErrNotFound
}
if len(iter.Columns()) > 1 {
dest = append([]interface{}{&applied}, dest...)
iter.Scan(dest...)
} else {
iter.Scan(&applied)
}
return applied, iter.Close()
}
// Iter represents an iterator that can be used to iterate over all rows that
// were returned by a query. The iterator might send additional queries to the
// database during the iteration if paging was enabled.
type Iter struct {
err error
pos int
rows [][][]byte
columns []ColumnInfo
next *nextIter
}
// Columns returns the name and type of the selected columns.
func (iter *Iter) Columns() []ColumnInfo {
return iter.columns
}
// Scan consumes the next row of the iterator and copies the columns of the
// current row into the values pointed at by dest. Use nil as a dest value
// to skip the corresponding column. Scan might send additional queries
// to the database to retrieve the next set of rows if paging was enabled.
//
// Scan returns true if the row was successfully unmarshaled or false if the
// end of the result set was reached or if an error occurred. Close should
// be called afterwards to retrieve any potential errors.
func (iter *Iter) Scan(dest ...interface{}) bool {
if iter.err != nil {
return false
}
if iter.pos >= len(iter.rows) {
if iter.next != nil {
*iter = *iter.next.fetch()
return iter.Scan(dest...)
}
return false
}
if iter.next != nil && iter.pos == iter.next.pos {
go iter.next.fetch()
}
if len(dest) != len(iter.columns) {
iter.err = errors.New("count mismatch")
return false
}
for i := 0; i < len(iter.columns); i++ {
if dest[i] == nil {
continue
}
err := Unmarshal(iter.columns[i].TypeInfo, iter.rows[iter.pos][i], dest[i])
if err != nil {
iter.err = err
return false
}
}
iter.pos++
return true
}
// Close closes the iterator and returns any errors that happened during
// the query or the iteration.
func (iter *Iter) Close() error {
return iter.err
}
type nextIter struct {
qry Query
pos int
once sync.Once
next *Iter
}
func (n *nextIter) fetch() *Iter {
n.once.Do(func() {
n.next = n.qry.session.executeQuery(&n.qry)
})
return n.next
}
type Batch struct {
Type BatchType
Entries []BatchEntry
Cons Consistency
rt RetryPolicy
}
// NewBatch creates a new batch operation without defaults from the cluster
func NewBatch(typ BatchType) *Batch {
return &Batch{Type: typ}
}
// NewBatch creates a new batch operation using defaults defined in the cluster
func (s *Session) NewBatch(typ BatchType) *Batch {
return &Batch{Type: typ, rt: s.cfg.RetryPolicy}
}
// Query adds the query to the batch operation
func (b *Batch) Query(stmt string, args ...interface{}) {
b.Entries = append(b.Entries, BatchEntry{Stmt: stmt, Args: args})
}
// RetryPolicy sets the retry policy to use when executing the batch operation
func (b *Batch) RetryPolicy(r RetryPolicy) *Batch {
b.rt = r
return b
}
type BatchType int
const (
LoggedBatch BatchType = 0
UnloggedBatch BatchType = 1
CounterBatch BatchType = 2
)
type BatchEntry struct {
Stmt string
Args []interface{}
}
type Consistency int
const (
Any Consistency = 1 + iota
One
Two
Three
Quorum
All
LocalQuorum
EachQuorum
Serial
LocalSerial
)
var consinstencyNames = []string{
0: "default",
Any: "any",
One: "one",
Two: "two",
Three: "three",
Quorum: "quorum",
All: "all",
LocalQuorum: "localquorum",
EachQuorum: "eachquorum",
Serial: "serial",
LocalSerial: "localserial",
}
func (c Consistency) String() string {
return consinstencyNames[c]
}
type ColumnInfo struct {
Keyspace string
Table string
Name string
TypeInfo *TypeInfo
}
// Tracer is the interface implemented by query tracers. Tracers have the
// ability to obtain a detailed event log of all events that happened during
// the execution of a query from Cassandra. Gathering this information might
// be essential for debugging and optimizing queries, but this feature should
// not be used on production systems with very high load.
type Tracer interface {
Trace(traceId []byte)
}
type traceWriter struct {
session *Session
w io.Writer
mu sync.Mutex
}
// NewTraceWriter returns a simple Tracer implementation that outputs
// the event log in a textual format.
func NewTraceWriter(session *Session, w io.Writer) Tracer {
return traceWriter{session: session, w: w}
}
func (t traceWriter) Trace(traceId []byte) {
var (
coordinator string
duration int
)
t.session.Query(`SELECT coordinator, duration
FROM system_traces.sessions
WHERE session_id = ?`, traceId).
Consistency(One).Scan(&coordinator, &duration)
iter := t.session.Query(`SELECT event_id, activity, source, source_elapsed
FROM system_traces.events
WHERE session_id = ?`, traceId).
Consistency(One).Iter()
var (
timestamp time.Time
activity string
source string
elapsed int
)
t.mu.Lock()
defer t.mu.Unlock()
fmt.Fprintf(t.w, "Tracing session %016x (coordinator: %s, duration: %v):\n",
traceId, coordinator, time.Duration(duration)*time.Microsecond)
for iter.Scan(×tamp, &activity, &source, &elapsed) {
fmt.Fprintf(t.w, "%s: %s (source: %s, elapsed: %d)\n",
timestamp.Format("2006/01/02 15:04:05.999999"), activity, source, elapsed)
}
if err := iter.Close(); err != nil {
fmt.Fprintln(t.w, "Error:", err)
}
}
type Error struct {
Code int
Message string
}
func (e Error) Error() string {
return e.Message
}
var (
ErrNotFound = errors.New("not found")
ErrUnavailable = errors.New("unavailable")
ErrProtocol = errors.New("protocol error")
ErrUnsupported = errors.New("feature not supported")
ErrTooManyStmts = errors.New("too many statements")
)