@@ -8,91 +8,65 @@ import (
8
8
9
9
"github.com/go-kit/log"
10
10
"github.com/go-kit/log/level"
11
- "github.com/prometheus/client_golang/prometheus"
12
11
"github.com/twmb/franz-go/pkg/kadm"
13
12
"github.com/twmb/franz-go/pkg/kerr"
14
13
"github.com/twmb/franz-go/pkg/kgo"
15
14
"github.com/twmb/franz-go/pkg/kmsg"
16
-
17
- "github.com/grafana/loki/v3/pkg/kafka"
18
- "github.com/grafana/loki/v3/pkg/kafka/client"
19
15
)
20
16
17
+ // A OffsetManager manages commit offsets for a Consumer Group.
21
18
type OffsetManager interface {
22
- Topic () string
23
- ConsumerGroup () string
24
-
19
+ // LastCommittedOffset returns the last committed offset for the partition.
25
20
LastCommittedOffset (ctx context.Context , partition int32 ) (int64 , error )
21
+
22
+ // PartitionOffset returns the last produced offset for the partition.
26
23
PartitionOffset (ctx context.Context , partition int32 , position SpecialOffset ) (int64 , error )
24
+
25
+ // NextOffset returns the first offset after t. If there are no offsets
26
+ // after t, it returns the latest offset instead.
27
27
NextOffset (ctx context.Context , partition int32 , t time.Time ) (int64 , error )
28
+
29
+ // Commits the offset for the partition.
28
30
Commit (ctx context.Context , partition int32 , offset int64 ) error
29
31
}
30
32
33
+ // Compile time check that KafkaOffsetManager implements OffsetManager.
31
34
var _ OffsetManager = & KafkaOffsetManager {}
32
35
36
+ // KafkaOffsetManager implements the [OffsetManager] interface.
33
37
type KafkaOffsetManager struct {
34
- client * kgo .Client
35
- adminClient * kadm .Client
36
- cfg kafka. Config
37
- instanceID string
38
- logger log.Logger
38
+ admin * kadm .Client
39
+ client * kgo .Client
40
+ topic string
41
+ consumerGroup string
42
+ logger log.Logger
39
43
}
40
44
45
+ // NewKafkaOffsetManager creates a new KafkaOffsetManager.
41
46
func NewKafkaOffsetManager (
42
- cfg kafka.Config ,
43
- instanceID string ,
44
- logger log.Logger ,
45
- reg prometheus.Registerer ,
46
- ) (* KafkaOffsetManager , error ) {
47
- // Create a new Kafka client for the partition manager.
48
- c , err := client .NewReaderClient ("partition-manager" , cfg , log .With (logger , "component" , "kafka-client" ), reg )
49
- if err != nil {
50
- return nil , fmt .Errorf ("creating kafka client: %w" , err )
51
- }
52
-
53
- return newKafkaOffsetManager (
54
- c ,
55
- cfg ,
56
- instanceID ,
57
- logger ,
58
- ), nil
59
- }
60
-
61
- // newKafkaReader creates a new KafkaReader instance
62
- func newKafkaOffsetManager (
63
47
client * kgo.Client ,
64
- cfg kafka. Config ,
65
- instanceID string ,
48
+ topic string ,
49
+ consumerGroup string ,
66
50
logger log.Logger ,
67
51
) * KafkaOffsetManager {
68
52
return & KafkaOffsetManager {
69
- client : client ,
70
- adminClient : kadm . NewClient ( client ) ,
71
- cfg : cfg ,
72
- instanceID : instanceID ,
73
- logger : logger ,
53
+ admin : kadm . NewClient ( client ) ,
54
+ client : client ,
55
+ topic : topic ,
56
+ consumerGroup : consumerGroup ,
57
+ logger : log . With ( logger , "topic" , topic , "consumer_group" , consumerGroup ) ,
74
58
}
75
59
}
76
60
77
- // Topic returns the topic being read
78
- func (r * KafkaOffsetManager ) Topic () string {
79
- return r .cfg .Topic
80
- }
81
-
82
- func (r * KafkaOffsetManager ) ConsumerGroup () string {
83
- return r .cfg .GetConsumerGroup (r .instanceID )
84
- }
85
-
86
- // NextOffset returns the first offset after the timestamp t. If the partition
87
- // does not have an offset after t, it returns the current end offset.
88
- func (r * KafkaOffsetManager ) NextOffset (ctx context.Context , partition int32 , t time.Time ) (int64 , error ) {
89
- resp , err := r .adminClient .ListOffsetsAfterMilli (ctx , t .UnixMilli (), r .cfg .Topic )
61
+ // NextOffset implements the [OffsetManager] interface.
62
+ func (m * KafkaOffsetManager ) NextOffset (ctx context.Context , partition int32 , t time.Time ) (int64 , error ) {
63
+ resp , err := m .admin .ListOffsetsAfterMilli (ctx , t .UnixMilli (), m .topic )
90
64
if err != nil {
91
65
return 0 , err
92
66
}
93
67
// If a topic does not exist, a special -1 partition for each non-existing
94
68
// topic is added to the response.
95
- partitions := resp [r . cfg . Topic ]
69
+ partitions := resp [m . topic ]
96
70
if special , ok := partitions [- 1 ]; ok {
97
71
return 0 , special .Err
98
72
}
@@ -108,16 +82,16 @@ func (r *KafkaOffsetManager) NextOffset(ctx context.Context, partition int32, t
108
82
return listed .Offset , nil
109
83
}
110
84
111
- // LastCommittedOffset retrieves the last committed offset for this partition
112
- func (r * KafkaOffsetManager ) LastCommittedOffset (ctx context.Context , partitionID int32 ) (int64 , error ) {
85
+ // LastCommittedOffset implements the [OffsetManager] interface.
86
+ func (m * KafkaOffsetManager ) LastCommittedOffset (ctx context.Context , partitionID int32 ) (int64 , error ) {
113
87
req := kmsg .NewPtrOffsetFetchRequest ()
114
88
req .Topics = []kmsg.OffsetFetchRequestTopic {{
115
- Topic : r . cfg . Topic ,
89
+ Topic : m . topic ,
116
90
Partitions : []int32 {partitionID },
117
91
}}
118
- req .Group = r . ConsumerGroup ()
92
+ req .Group = m . consumerGroup
119
93
120
- resps := r .client .RequestSharded (ctx , req )
94
+ resps := m .client .RequestSharded (ctx , req )
121
95
122
96
// Since we issued a request for only 1 partition, we expect exactly 1 response.
123
97
if expected , actual := 1 , len (resps ); actual != expected {
@@ -139,7 +113,7 @@ func (r *KafkaOffsetManager) LastCommittedOffset(ctx context.Context, partitionI
139
113
if len (fetchRes .Groups ) != 1 ||
140
114
len (fetchRes .Groups [0 ].Topics ) != 1 ||
141
115
len (fetchRes .Groups [0 ].Topics [0 ].Partitions ) != 1 {
142
- level .Debug (r .logger ).Log (
116
+ level .Debug (m .logger ).Log (
143
117
"msg" , "malformed response, setting to start offset" ,
144
118
)
145
119
return int64 (KafkaStartOffset ), nil
@@ -153,14 +127,14 @@ func (r *KafkaOffsetManager) LastCommittedOffset(ctx context.Context, partitionI
153
127
return partition .Offset , nil
154
128
}
155
129
156
- // FetchPartitionOffset retrieves the offset for a specific position
157
- func (r * KafkaOffsetManager ) PartitionOffset (ctx context.Context , partitionID int32 , position SpecialOffset ) (int64 , error ) {
130
+ // PatitionOffset implements the [OffsetManager] interface.
131
+ func (m * KafkaOffsetManager ) PartitionOffset (ctx context.Context , partitionID int32 , position SpecialOffset ) (int64 , error ) {
158
132
partitionReq := kmsg .NewListOffsetsRequestTopicPartition ()
159
133
partitionReq .Partition = partitionID
160
134
partitionReq .Timestamp = int64 (position )
161
135
162
136
topicReq := kmsg .NewListOffsetsRequestTopic ()
163
- topicReq .Topic = r . cfg . Topic
137
+ topicReq .Topic = m . topic
164
138
topicReq .Partitions = []kmsg.ListOffsetsRequestTopicPartition {partitionReq }
165
139
166
140
req := kmsg .NewPtrListOffsetsRequest ()
@@ -169,7 +143,7 @@ func (r *KafkaOffsetManager) PartitionOffset(ctx context.Context, partitionID in
169
143
170
144
// Even if we share the same client, other in-flight requests are not canceled once this context is canceled
171
145
// (or its deadline is exceeded). We've verified it with a unit test.
172
- resps := r .client .RequestSharded (ctx , req )
146
+ resps := m .client .RequestSharded (ctx , req )
173
147
174
148
// Since we issued a request for only 1 partition, we expect exactly 1 response.
175
149
if len (resps ) != 1 {
@@ -199,22 +173,18 @@ func (r *KafkaOffsetManager) PartitionOffset(ctx context.Context, partitionID in
199
173
return partition .Offset , nil
200
174
}
201
175
202
- // Commit commits an offset to the consumer group
203
- func (r * KafkaOffsetManager ) Commit (ctx context.Context , partitionID int32 , offset int64 ) error {
204
- admin := kadm .NewClient (r .client )
205
-
176
+ // Commit implements the [OffsetManager] interface.
177
+ func (m * KafkaOffsetManager ) Commit (ctx context.Context , partitionID int32 , offset int64 ) error {
206
178
// Commit the last consumed offset.
207
179
toCommit := kadm.Offsets {}
208
- toCommit .AddOffset (r .cfg .Topic , partitionID , offset , - 1 )
209
-
210
- committed , err := admin .CommitOffsets (ctx , r .ConsumerGroup (), toCommit )
180
+ toCommit .AddOffset (m .topic , partitionID , offset , - 1 )
181
+ committed , err := m .admin .CommitOffsets (ctx , m .consumerGroup , toCommit )
211
182
if err != nil {
212
183
return err
213
184
} else if ! committed .Ok () {
214
185
return committed .Error ()
215
186
}
216
-
217
- committedOffset , _ := committed .Lookup (r .cfg .Topic , partitionID )
218
- level .Debug (r .logger ).Log ("msg" , "last commit offset successfully committed to Kafka" , "offset" , committedOffset .At )
187
+ committedOffset , _ := committed .Lookup (m .topic , partitionID )
188
+ level .Debug (m .logger ).Log ("msg" , "last commit offset successfully committed to Kafka" , "offset" , committedOffset .At )
219
189
return nil
220
190
}
0 commit comments