Skip to content

Commit 2c75b16

Browse files
authoredJul 24, 2024··
Merge branch 'master' into feat-mq-kpush
2 parents dbbb9a3 + 59d5072 commit 2c75b16

25 files changed

+1081
-1462
lines changed
 

‎dq/consumer.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import (
1212

1313
const (
1414
expiration = 3600 // seconds
15-
guardValue = "1"
1615
tolerance = time.Minute * 30
1716
)
1817

@@ -45,17 +44,19 @@ func NewConsumer(c DqConf) Consumer {
4544
func (c *consumerCluster) Consume(consume Consume) {
4645
guardedConsume := func(body []byte) {
4746
key := hash.Md5Hex(body)
48-
body, ok := c.unwrap(body)
47+
taskBody, ok := c.unwrap(body)
4948
if !ok {
5049
logx.Errorf("discarded: %q", string(body))
5150
return
5251
}
5352

54-
ok, err := c.red.SetnxEx(key, guardValue, expiration)
53+
redisLock := redis.NewRedisLock(c.red, key)
54+
redisLock.SetExpire(expiration)
55+
ok, err := redisLock.Acquire()
5556
if err != nil {
5657
logx.Error(err)
5758
} else if ok {
58-
consume(body)
59+
consume(taskBody)
5960
}
6061
}
6162

‎dq/consumernode.go

+19-7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package dq
22

33
import (
4+
"errors"
45
"time"
56

67
"github.com/beanstalkd/go-beanstalk"
@@ -59,14 +60,25 @@ func (c *consumerNode) consumeEvents(consume Consume) {
5960
}
6061

6162
// the error can only be beanstalk.NameError or beanstalk.ConnError
62-
switch cerr := err.(type) {
63-
case beanstalk.ConnError:
64-
switch cerr.Err {
65-
case beanstalk.ErrTimeout:
63+
var cerr beanstalk.ConnError
64+
switch {
65+
case errors.As(err, &cerr):
66+
switch {
67+
case errors.Is(cerr.Err, beanstalk.ErrTimeout):
6668
// timeout error on timeout, just continue the loop
67-
case beanstalk.ErrBadChar, beanstalk.ErrBadFormat, beanstalk.ErrBuried, beanstalk.ErrDeadline,
68-
beanstalk.ErrDraining, beanstalk.ErrEmpty, beanstalk.ErrInternal, beanstalk.ErrJobTooBig,
69-
beanstalk.ErrNoCRLF, beanstalk.ErrNotFound, beanstalk.ErrNotIgnored, beanstalk.ErrTooLong:
69+
case
70+
errors.Is(cerr.Err, beanstalk.ErrBadChar),
71+
errors.Is(cerr.Err, beanstalk.ErrBadFormat),
72+
errors.Is(cerr.Err, beanstalk.ErrBuried),
73+
errors.Is(cerr.Err, beanstalk.ErrDeadline),
74+
errors.Is(cerr.Err, beanstalk.ErrDraining),
75+
errors.Is(cerr.Err, beanstalk.ErrEmpty),
76+
errors.Is(cerr.Err, beanstalk.ErrInternal),
77+
errors.Is(cerr.Err, beanstalk.ErrJobTooBig),
78+
errors.Is(cerr.Err, beanstalk.ErrNoCRLF),
79+
errors.Is(cerr.Err, beanstalk.ErrNotFound),
80+
errors.Is(cerr.Err, beanstalk.ErrNotIgnored),
81+
errors.Is(cerr.Err, beanstalk.ErrTooLong):
7082
// won't reset
7183
logx.Error(err)
7284
default:

‎dq/producer.go

+24-20
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
11
package dq
22

33
import (
4-
"bytes"
54
"log"
65
"math/rand"
7-
"strconv"
86
"strings"
97
"time"
108

@@ -25,15 +23,21 @@ type (
2523
Close() error
2624
Delay(body []byte, delay time.Duration) (string, error)
2725
Revoke(ids string) error
26+
27+
at(body []byte, at time.Time) (string, error)
28+
delay(body []byte, delay time.Duration) (string, error)
2829
}
2930

3031
producerCluster struct {
3132
nodes []Producer
3233
}
3334
)
3435

36+
var rng *rand.Rand
37+
3538
func init() {
36-
rand.Seed(time.Now().UnixNano())
39+
source := rand.NewSource(time.Now().UnixNano())
40+
rng = rand.New(source)
3741
}
3842

3943
func NewProducer(beanstalks []Beanstalk) Producer {
@@ -56,10 +60,8 @@ func NewProducer(beanstalks []Beanstalk) Producer {
5660
}
5761

5862
func (p *producerCluster) At(body []byte, at time.Time) (string, error) {
59-
wrapped := p.wrap(body, at)
60-
return p.insert(func(node Producer) (string, error) {
61-
return node.At(wrapped, at)
62-
})
63+
wrapped := wrap(body, at)
64+
return p.at(wrapped, at)
6365
}
6466

6567
func (p *producerCluster) Close() error {
@@ -73,10 +75,8 @@ func (p *producerCluster) Close() error {
7375
}
7476

7577
func (p *producerCluster) Delay(body []byte, delay time.Duration) (string, error) {
76-
wrapped := p.wrap(body, time.Now().Add(delay))
77-
return p.insert(func(node Producer) (string, error) {
78-
return node.Delay(wrapped, delay)
79-
})
78+
wrapped := wrap(body, time.Now().Add(delay))
79+
return p.delay(wrapped, delay)
8080
}
8181

8282
func (p *producerCluster) Revoke(ids string) error {
@@ -98,17 +98,29 @@ func (p *producerCluster) Revoke(ids string) error {
9898
return be.Err()
9999
}
100100

101+
func (p *producerCluster) at(body []byte, at time.Time) (string, error) {
102+
return p.insert(func(node Producer) (string, error) {
103+
return node.at(body, at)
104+
})
105+
}
106+
101107
func (p *producerCluster) cloneNodes() []Producer {
102108
return append([]Producer(nil), p.nodes...)
103109
}
104110

111+
func (p *producerCluster) delay(body []byte, delay time.Duration) (string, error) {
112+
return p.insert(func(node Producer) (string, error) {
113+
return node.delay(body, delay)
114+
})
115+
}
116+
105117
func (p *producerCluster) getWriteNodes() []Producer {
106118
if len(p.nodes) <= replicaNodes {
107119
return p.nodes
108120
}
109121

110122
nodes := p.cloneNodes()
111-
rand.Shuffle(len(nodes), func(i, j int) {
123+
rng.Shuffle(len(nodes), func(i, j int) {
112124
nodes[i], nodes[j] = nodes[j], nodes[i]
113125
})
114126
return nodes[:replicaNodes]
@@ -156,11 +168,3 @@ func (p *producerCluster) insert(fn func(node Producer) (string, error)) (string
156168

157169
return "", be.Err()
158170
}
159-
160-
func (p *producerCluster) wrap(body []byte, at time.Time) []byte {
161-
var builder bytes.Buffer
162-
builder.WriteString(strconv.FormatInt(at.UnixNano(), 10))
163-
builder.WriteByte(timeSep)
164-
builder.Write(body)
165-
return builder.Bytes()
166-
}

‎dq/producernode.go

+55-33
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"time"
99

1010
"github.com/beanstalkd/go-beanstalk"
11+
"github.com/zeromicro/go-zero/core/logx"
1112
)
1213

1314
var ErrTimeBeforeNow = errors.New("can't schedule task to past time")
@@ -27,46 +28,15 @@ func NewProducerNode(endpoint, tube string) Producer {
2728
}
2829

2930
func (p *producerNode) At(body []byte, at time.Time) (string, error) {
30-
now := time.Now()
31-
if at.Before(now) {
32-
return "", ErrTimeBeforeNow
33-
}
34-
35-
duration := at.Sub(now)
36-
return p.Delay(body, duration)
31+
return p.at(wrap(body, at), at)
3732
}
3833

3934
func (p *producerNode) Close() error {
4035
return p.conn.Close()
4136
}
4237

4338
func (p *producerNode) Delay(body []byte, delay time.Duration) (string, error) {
44-
conn, err := p.conn.get()
45-
if err != nil {
46-
return "", err
47-
}
48-
49-
id, err := conn.Put(body, PriNormal, delay, defaultTimeToRun)
50-
if err == nil {
51-
return fmt.Sprintf("%s/%s/%d", p.endpoint, p.tube, id), nil
52-
}
53-
54-
// the error can only be beanstalk.NameError or beanstalk.ConnError
55-
// just return when the error is beanstalk.NameError, don't reset
56-
switch cerr := err.(type) {
57-
case beanstalk.ConnError:
58-
switch cerr.Err {
59-
case beanstalk.ErrBadChar, beanstalk.ErrBadFormat, beanstalk.ErrBuried, beanstalk.ErrDeadline,
60-
beanstalk.ErrDraining, beanstalk.ErrEmpty, beanstalk.ErrInternal, beanstalk.ErrJobTooBig,
61-
beanstalk.ErrNoCRLF, beanstalk.ErrNotFound, beanstalk.ErrNotIgnored, beanstalk.ErrTooLong:
62-
// won't reset
63-
default:
64-
// beanstalk.ErrOOM, beanstalk.ErrTimeout, beanstalk.ErrUnknown and other errors
65-
p.conn.reset()
66-
}
67-
}
68-
69-
return "", err
39+
return p.delay(wrap(body, time.Now().Add(delay)), delay)
7040
}
7141

7242
func (p *producerNode) Revoke(jointId string) error {
@@ -96,3 +66,55 @@ func (p *producerNode) Revoke(jointId string) error {
9666
// if not in this beanstalk, ignore
9767
return nil
9868
}
69+
70+
func (p *producerNode) at(body []byte, at time.Time) (string, error) {
71+
now := time.Now()
72+
if at.Before(now) {
73+
return "", ErrTimeBeforeNow
74+
}
75+
76+
duration := at.Sub(now)
77+
return p.delay(body, duration)
78+
}
79+
80+
func (p *producerNode) delay(body []byte, delay time.Duration) (string, error) {
81+
conn, err := p.conn.get()
82+
if err != nil {
83+
return "", err
84+
}
85+
86+
id, err := conn.Put(body, PriNormal, delay, defaultTimeToRun)
87+
if err == nil {
88+
return fmt.Sprintf("%s/%s/%d", p.endpoint, p.tube, id), nil
89+
}
90+
91+
// the error can only be beanstalk.NameError or beanstalk.ConnError
92+
// just return when the error is beanstalk.NameError, don't reset
93+
var cerr beanstalk.ConnError
94+
switch {
95+
case errors.As(err, &cerr):
96+
switch {
97+
case
98+
errors.Is(cerr.Err, beanstalk.ErrBadChar),
99+
errors.Is(cerr.Err, beanstalk.ErrBadFormat),
100+
errors.Is(cerr.Err, beanstalk.ErrBuried),
101+
errors.Is(cerr.Err, beanstalk.ErrDeadline),
102+
errors.Is(cerr.Err, beanstalk.ErrDraining),
103+
errors.Is(cerr.Err, beanstalk.ErrEmpty),
104+
errors.Is(cerr.Err, beanstalk.ErrInternal),
105+
errors.Is(cerr.Err, beanstalk.ErrJobTooBig),
106+
errors.Is(cerr.Err, beanstalk.ErrNoCRLF),
107+
errors.Is(cerr.Err, beanstalk.ErrNotFound),
108+
errors.Is(cerr.Err, beanstalk.ErrNotIgnored),
109+
errors.Is(cerr.Err, beanstalk.ErrTooLong):
110+
// won't reset
111+
default:
112+
// beanstalk.ErrOOM, beanstalk.ErrTimeout, beanstalk.ErrUnknown and other errors
113+
p.conn.reset()
114+
}
115+
default:
116+
logx.Error(err)
117+
}
118+
119+
return "", err
120+
}

‎dq/wrapper.go

+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
package dq
2+
3+
import (
4+
"bytes"
5+
"strconv"
6+
"time"
7+
)
8+
9+
func wrap(body []byte, at time.Time) []byte {
10+
var builder bytes.Buffer
11+
builder.WriteString(strconv.FormatInt(at.UnixNano(), 10))
12+
builder.WriteByte(timeSep)
13+
builder.Write(body)
14+
return builder.Bytes()
15+
}
File renamed without changes.

‎example/dq/producer/node/producer.go

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
package main
2+
3+
import (
4+
"fmt"
5+
"strconv"
6+
"time"
7+
8+
"github.com/zeromicro/go-queue/dq"
9+
)
10+
11+
func main() {
12+
producer := dq.NewProducerNode("localhost:11300", "tube")
13+
14+
for i := 1000; i < 1005; i++ {
15+
_, err := producer.Delay([]byte(strconv.Itoa(i)), time.Second*5)
16+
if err != nil {
17+
fmt.Println(err)
18+
}
19+
}
20+
}

‎example/kq/consumer/queue.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package main
22

33
import (
4+
"context"
45
"fmt"
56

67
"github.com/zeromicro/go-queue/kq"
@@ -11,7 +12,7 @@ func main() {
1112
var c kq.KqConf
1213
conf.MustLoad("config.yaml", &c)
1314

14-
q := kq.MustNewQueue(c, kq.WithHandle(func(k, v string) error {
15+
q := kq.MustNewQueue(c, kq.WithHandle(func(ctx context.Context, k, v string) error {
1516
fmt.Printf("=> %s\n", v)
1617
return nil
1718
}))

‎example/kq/producer/produce.go

-2
Original file line numberDiff line numberDiff line change
@@ -41,12 +41,10 @@ func main() {
4141
log.Fatal(err)
4242
}
4343

44-
fmt.Println(string(body))
4544
if err := pusher.Push(context.Background(), string(body)); err != nil {
4645
log.Fatal(err)
4746
}
4847

49-
fmt.Println(string(body))
5048
if err := pusher.KPush(context.Background(), "test", string(body)); err != nil {
5149
log.Fatal(err)
5250
}

‎example/natsq/consumer/consumer.go

+60
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
package main
2+
3+
import (
4+
"fmt"
5+
6+
"github.com/zeromicro/go-queue/natsq"
7+
)
8+
9+
type MyConsumer struct {
10+
Channel string
11+
}
12+
13+
func (c *MyConsumer) HandleMessage(m *natsq.Msg) error {
14+
fmt.Printf("%s Received %s's a message: %s\n", c.Channel, m.Subject, string(m.Data))
15+
return nil
16+
}
17+
18+
func main() {
19+
20+
mc1 := &MyConsumer{Channel: "vipUpgrade"}
21+
mc2 := &MyConsumer{Channel: "taskFinish"}
22+
23+
c := &natsq.NatsConfig{
24+
ServerUri: "nats://127.0.0.1:4222",
25+
}
26+
27+
//JetMode
28+
// cq := []*natsq.ConsumerQueue{
29+
// {
30+
// Consumer: mc1,
31+
// QueueName: "vipUpgrade",
32+
// StreamName: "ccc",
33+
// Subjects: []string{"ddd", "eee"},
34+
// },
35+
// {
36+
// Consumer: mc2,
37+
// QueueName: "taskFinish",
38+
// StreamName: "ccc",
39+
// Subjects: []string{"ccc", "eee"},
40+
// },
41+
// }
42+
//q := natsq.MustNewConsumerManager(c, cq, natsq.NatJetMode)
43+
44+
//DefaultMode
45+
cq := []*natsq.ConsumerQueue{
46+
{
47+
Consumer: mc1,
48+
QueueName: "vipUpgrade",
49+
Subjects: []string{"ddd", "eee"},
50+
},
51+
{
52+
Consumer: mc2,
53+
QueueName: "taskFinish",
54+
Subjects: []string{"ccc", "eee"},
55+
},
56+
}
57+
q := natsq.MustNewConsumerManager(c, cq, natsq.NatDefaultMode)
58+
q.Start()
59+
defer q.Stop()
60+
}

‎example/natsq/publisher/publisher.go

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
package main
2+
3+
import (
4+
"log"
5+
"math/rand"
6+
"time"
7+
8+
"github.com/nats-io/nats.go/jetstream"
9+
"github.com/zeromicro/go-queue/natsq"
10+
)
11+
12+
func main() {
13+
14+
c := natsq.NatsConfig{
15+
ServerUri: "127.0.0.1:4222",
16+
}
17+
18+
// Default Mode
19+
p, _ := natsq.NewDefaultProducer(&c)
20+
for i := 0; i < 3; i++ {
21+
payload := randBody()
22+
err := p.Publish(randSub(), payload)
23+
if err != nil {
24+
log.Fatalf("Error publishing message: %v", err)
25+
} else {
26+
log.Printf("Published message: %s", string(payload))
27+
}
28+
}
29+
p.Close()
30+
31+
// JetMode
32+
j, _ := natsq.NewJetProducer(&c)
33+
j.CreateOrUpdateStream(jetstream.StreamConfig{
34+
Name: "ccc",
35+
Subjects: []string{"ccc", "ddd", "eee"},
36+
Storage: jetstream.FileStorage,
37+
NoAck: false,
38+
})
39+
for i := 0; i < 3; i++ {
40+
payload := randBody()
41+
err := j.Publish(randSub(), payload)
42+
if err != nil {
43+
log.Fatalf("Error publishing message: %v", err)
44+
} else {
45+
log.Printf("Published message: %s", string(payload))
46+
}
47+
}
48+
j.Close()
49+
}
50+
51+
func randSub() string {
52+
source := rand.NewSource(time.Now().UnixNano())
53+
// 创建一个新的随机数生成器
54+
rng := rand.New(source)
55+
strings := []string{"ccc", "ddd", "eee"}
56+
randomIndex := rng.Intn(len(strings))
57+
return strings[randomIndex]
58+
}
59+
60+
func randBody() []byte {
61+
charSet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
62+
length := 10
63+
result := make([]byte, length)
64+
for i := range result {
65+
result[i] = charSet[rand.Intn(len(charSet))]
66+
}
67+
return result
68+
}

‎example/stan/publisher/producer.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,13 @@ func main() {
4444
}
4545

4646
func randSub() string {
47-
rand.Seed(time.Now().UnixNano())
47+
source := rand.NewSource(time.Now().UnixNano())
48+
rng := rand.New(source)
4849
charSet := "abc"
4950
length := 1
5051
result := make([]byte, length)
5152
for i := range result {
52-
result[i] = charSet[rand.Intn(len(charSet))]
53+
result[i] = charSet[rng.Intn(len(charSet))]
5354
}
5455
return string(result)
5556
}

‎go.mod

+58-5
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,66 @@
11
module github.com/zeromicro/go-queue
22

3-
go 1.16
3+
go 1.20
44

55
require (
66
github.com/beanstalkd/go-beanstalk v0.2.0
7+
github.com/nats-io/nats.go v1.34.1
8+
github.com/nats-io/stan.go v0.10.4
9+
github.com/rabbitmq/amqp091-go v1.10.0
10+
github.com/segmentio/kafka-go v0.4.47
11+
github.com/stretchr/testify v1.9.0
12+
github.com/zeromicro/go-zero v1.6.6
13+
go.opentelemetry.io/otel v1.19.0
14+
)
15+
16+
require (
17+
github.com/beorn7/perks v1.0.1 // indirect
18+
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
19+
github.com/cespare/xxhash/v2 v2.2.0 // indirect
20+
github.com/davecgh/go-spew v1.1.1 // indirect
21+
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
22+
github.com/fatih/color v1.17.0 // indirect
23+
github.com/go-logr/logr v1.3.0 // indirect
24+
github.com/go-logr/stdr v1.2.2 // indirect
25+
github.com/gogo/protobuf v1.3.2 // indirect
26+
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect
27+
github.com/klauspost/compress v1.17.9 // indirect
28+
github.com/mattn/go-colorable v0.1.13 // indirect
29+
github.com/mattn/go-isatty v0.0.20 // indirect
30+
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
731
github.com/nats-io/nats-server/v2 v2.9.15 // indirect
832
github.com/nats-io/nats-streaming-server v0.25.3 // indirect
9-
github.com/nats-io/stan.go v0.10.4
10-
github.com/rabbitmq/amqp091-go v1.8.0
11-
github.com/segmentio/kafka-go v0.4.38
12-
github.com/zeromicro/go-zero v1.4.3
33+
github.com/nats-io/nkeys v0.4.7 // indirect
34+
github.com/nats-io/nuid v1.0.1 // indirect
35+
github.com/openzipkin/zipkin-go v0.4.2 // indirect
36+
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
37+
github.com/pierrec/lz4/v4 v4.1.21 // indirect
38+
github.com/pmezard/go-difflib v1.0.0 // indirect
39+
github.com/prometheus/client_golang v1.18.0 // indirect
40+
github.com/prometheus/client_model v0.5.0 // indirect
41+
github.com/prometheus/common v0.45.0 // indirect
42+
github.com/prometheus/procfs v0.12.0 // indirect
43+
github.com/redis/go-redis/v9 v9.5.3 // indirect
44+
github.com/spaolacci/murmur3 v1.1.0 // indirect
45+
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
46+
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
47+
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
48+
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
49+
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect
50+
go.opentelemetry.io/otel/exporters/zipkin v1.19.0 // indirect
51+
go.opentelemetry.io/otel/metric v1.19.0 // indirect
52+
go.opentelemetry.io/otel/sdk v1.19.0 // indirect
53+
go.opentelemetry.io/otel/trace v1.19.0 // indirect
54+
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
55+
go.uber.org/automaxprocs v1.5.3 // indirect
56+
golang.org/x/crypto v0.24.0 // indirect
57+
golang.org/x/net v0.26.0 // indirect
58+
golang.org/x/sys v0.21.0 // indirect
59+
golang.org/x/text v0.16.0 // indirect
60+
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
61+
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
62+
google.golang.org/grpc v1.64.0 // indirect
63+
google.golang.org/protobuf v1.34.2 // indirect
64+
gopkg.in/yaml.v2 v2.4.0 // indirect
65+
gopkg.in/yaml.v3 v3.0.1 // indirect
1366
)

‎go.sum

+122-1,339
Large diffs are not rendered by default.

‎kq/config.go

+14-13
Original file line numberDiff line numberDiff line change
@@ -9,17 +9,18 @@ const (
99

1010
type KqConf struct {
1111
service.ServiceConf
12-
Brokers []string
13-
Group string
14-
Topic string
15-
CaFile string `json:",optional"`
16-
Offset string `json:",options=first|last,default=last"`
17-
Conns int `json:",default=1"`
18-
Consumers int `json:",default=8"`
19-
Processors int `json:",default=8"`
20-
MinBytes int `json:",default=10240"` // 10K
21-
MaxBytes int `json:",default=10485760"` // 10M
22-
Username string `json:",optional"`
23-
Password string `json:",optional"`
24-
ForceCommit bool `json:",default=true"`
12+
Brokers []string
13+
Group string
14+
Topic string
15+
CaFile string `json:",optional"`
16+
Offset string `json:",options=first|last,default=last"`
17+
Conns int `json:",default=1"`
18+
Consumers int `json:",default=8"`
19+
Processors int `json:",default=8"`
20+
MinBytes int `json:",default=10240"` // 10K
21+
MaxBytes int `json:",default=10485760"` // 10M
22+
Username string `json:",optional"`
23+
Password string `json:",optional"`
24+
ForceCommit bool `json:",default=true"`
25+
CommitInOrder bool `json:",default=false"`
2526
}

‎kq/internal/message.go

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
package internal
2+
3+
import "github.com/segmentio/kafka-go"
4+
5+
type Message struct {
6+
*kafka.Message
7+
}
8+
9+
func NewMessage(msg *kafka.Message) *Message {
10+
return &Message{Message: msg}
11+
}
12+
13+
func (m *Message) GetHeader(key string) string {
14+
for _, h := range m.Headers {
15+
if h.Key == key {
16+
return string(h.Value)
17+
}
18+
}
19+
return ""
20+
}
21+
22+
func (m *Message) SetHeader(key, val string) {
23+
// Ensure uniqueness of keys
24+
for i := 0; i < len(m.Headers); i++ {
25+
if m.Headers[i].Key == key {
26+
m.Headers = append(m.Headers[:i], m.Headers[i+1:]...)
27+
i--
28+
}
29+
}
30+
m.Headers = append(m.Headers, kafka.Header{
31+
Key: key,
32+
Value: []byte(val),
33+
})
34+
}

‎kq/internal/message_test.go

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
package internal
2+
3+
import (
4+
"testing"
5+
6+
"github.com/segmentio/kafka-go"
7+
"github.com/stretchr/testify/assert"
8+
)
9+
10+
func TestMessageGetHeader(t *testing.T) {
11+
testCases := []struct {
12+
name string
13+
msg *Message
14+
key string
15+
expected string
16+
}{
17+
{
18+
name: "exists",
19+
msg: &Message{
20+
Message: &kafka.Message{Headers: []kafka.Header{
21+
{Key: "foo", Value: []byte("bar")},
22+
}}},
23+
key: "foo",
24+
expected: "bar",
25+
},
26+
{
27+
name: "not exists",
28+
msg: &Message{Message: &kafka.Message{Headers: []kafka.Header{}}},
29+
key: "foo",
30+
expected: "",
31+
},
32+
}
33+
34+
for _, tc := range testCases {
35+
t.Run(tc.name, func(t *testing.T) {
36+
result := tc.msg.GetHeader(tc.key)
37+
assert.Equal(t, tc.expected, result)
38+
})
39+
}
40+
}
41+
42+
func TestMessageSetHeader(t *testing.T) {
43+
msg := &Message{Message: &kafka.Message{Headers: []kafka.Header{
44+
{Key: "foo", Value: []byte("bar")}},
45+
}}
46+
47+
msg.SetHeader("foo", "bar2")
48+
msg.SetHeader("foo2", "bar2")
49+
msg.SetHeader("foo2", "bar3")
50+
msg.SetHeader("foo3", "bar4")
51+
52+
assert.ElementsMatch(t, msg.Headers, []kafka.Header{
53+
{Key: "foo", Value: []byte("bar2")},
54+
{Key: "foo2", Value: []byte("bar3")},
55+
{Key: "foo3", Value: []byte("bar4")},
56+
})
57+
}

‎kq/internal/trace.go

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
package internal
2+
3+
import "go.opentelemetry.io/otel/propagation"
4+
5+
var _ propagation.TextMapCarrier = (*MessageCarrier)(nil)
6+
7+
// MessageCarrier injects and extracts traces from a types.Message.
8+
type MessageCarrier struct {
9+
msg *Message
10+
}
11+
12+
// NewMessageCarrier returns a new MessageCarrier.
13+
func NewMessageCarrier(msg *Message) MessageCarrier {
14+
return MessageCarrier{msg: msg}
15+
}
16+
17+
// Get returns the value associated with the passed key.
18+
func (m MessageCarrier) Get(key string) string {
19+
return m.msg.GetHeader(key)
20+
}
21+
22+
// Set stores the key-value pair.
23+
func (m MessageCarrier) Set(key string, value string) {
24+
m.msg.SetHeader(key, value)
25+
}
26+
27+
// Keys lists the keys stored in this carrier.
28+
func (m MessageCarrier) Keys() []string {
29+
out := make([]string, len(m.msg.Headers))
30+
for i, h := range m.msg.Headers {
31+
out[i] = h.Key
32+
}
33+
34+
return out
35+
}

‎kq/internal/trace_test.go

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
package internal
2+
3+
import (
4+
"testing"
5+
6+
"github.com/segmentio/kafka-go"
7+
"github.com/stretchr/testify/assert"
8+
)
9+
10+
func TestMessageCarrierGet(t *testing.T) {
11+
testCases := []struct {
12+
name string
13+
carrier MessageCarrier
14+
key string
15+
expected string
16+
}{
17+
{
18+
name: "exists",
19+
carrier: NewMessageCarrier(&Message{&kafka.Message{Headers: []kafka.Header{
20+
{Key: "foo", Value: []byte("bar")},
21+
}}}),
22+
key: "foo",
23+
expected: "bar",
24+
},
25+
{
26+
name: "not exists",
27+
carrier: NewMessageCarrier(&Message{&kafka.Message{Headers: []kafka.Header{}}}),
28+
key: "foo",
29+
expected: "",
30+
},
31+
}
32+
33+
for _, tc := range testCases {
34+
t.Run(tc.name, func(t *testing.T) {
35+
result := tc.carrier.Get(tc.key)
36+
assert.Equal(t, tc.expected, result)
37+
})
38+
}
39+
}
40+
41+
func TestMessageCarrierSet(t *testing.T) {
42+
msg := Message{&kafka.Message{Headers: []kafka.Header{
43+
{Key: "foo", Value: []byte("bar")},
44+
}}}
45+
carrier := MessageCarrier{msg: &msg}
46+
47+
carrier.Set("foo", "bar2")
48+
carrier.Set("foo2", "bar2")
49+
carrier.Set("foo2", "bar3")
50+
carrier.Set("foo3", "bar4")
51+
52+
assert.ElementsMatch(t, carrier.msg.Headers, []kafka.Header{
53+
{Key: "foo", Value: []byte("bar2")},
54+
{Key: "foo2", Value: []byte("bar3")},
55+
{Key: "foo3", Value: []byte("bar4")},
56+
})
57+
}
58+
59+
func TestMessageCarrierKeys(t *testing.T) {
60+
testCases := []struct {
61+
name string
62+
carrier MessageCarrier
63+
expected []string
64+
}{
65+
{
66+
name: "one",
67+
carrier: MessageCarrier{msg: &Message{&kafka.Message{Headers: []kafka.Header{
68+
{Key: "foo", Value: []byte("bar")},
69+
}}}},
70+
expected: []string{"foo"},
71+
},
72+
{
73+
name: "none",
74+
carrier: MessageCarrier{msg: &Message{&kafka.Message{Headers: []kafka.Header{}}}},
75+
expected: []string{},
76+
},
77+
{
78+
name: "many",
79+
carrier: MessageCarrier{msg: &Message{&kafka.Message{Headers: []kafka.Header{
80+
{Key: "foo", Value: []byte("bar")},
81+
{Key: "baz", Value: []byte("quux")},
82+
}}}},
83+
expected: []string{"foo", "baz"},
84+
},
85+
}
86+
87+
for _, tc := range testCases {
88+
t.Run(tc.name, func(t *testing.T) {
89+
result := tc.carrier.Keys()
90+
assert.Equal(t, tc.expected, result)
91+
})
92+
}
93+
}

‎kq/pusher.go

+28-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,10 @@ import (
66
"time"
77

88
"github.com/segmentio/kafka-go"
9+
"github.com/zeromicro/go-queue/kq/internal"
910
"github.com/zeromicro/go-zero/core/executors"
1011
"github.com/zeromicro/go-zero/core/logx"
12+
"go.opentelemetry.io/otel"
1113
)
1214

1315
type (
@@ -26,6 +28,9 @@ type (
2628
// executors.ChunkExecutor options
2729
chunkSize int
2830
flushInterval time.Duration
31+
32+
// syncPush is used to enable sync push
33+
syncPush bool
2934
}
3035
)
3136

@@ -46,6 +51,16 @@ func NewPusher(addrs []string, topic string, opts ...PushOption) *Pusher {
4651
// apply kafka.Writer options
4752
producer.AllowAutoTopicCreation = options.allowAutoTopicCreation
4853

54+
pusher := &Pusher{
55+
producer: producer,
56+
topic: topic,
57+
}
58+
59+
// if syncPush is true, return the pusher directly
60+
if options.syncPush {
61+
return pusher
62+
}
63+
4964
// apply ChunkExecutor options
5065
var chunkOpts []executors.ChunkOption
5166
if options.chunkSize > 0 {
@@ -55,10 +70,6 @@ func NewPusher(addrs []string, topic string, opts ...PushOption) *Pusher {
5570
chunkOpts = append(chunkOpts, executors.WithFlushInterval(options.flushInterval))
5671
}
5772

58-
pusher := &Pusher{
59-
producer: producer,
60-
topic: topic,
61-
}
6273
pusher.executor = executors.NewChunkExecutor(func(tasks []interface{}) {
6374
chunk := make([]kafka.Message, len(tasks))
6475
for i := range tasks {
@@ -105,6 +116,12 @@ func (p *Pusher) Push(ctx context.Context, v string) error {
105116
Key: []byte(strconv.FormatInt(time.Now().UnixNano(), 10)), // current timestamp
106117
Value: []byte(v),
107118
}
119+
120+
// wrap message into message carrier
121+
mc := internal.NewMessageCarrier(internal.NewMessage(&msg))
122+
// inject trace context into message
123+
otel.GetTextMapPropagator().Inject(ctx, mc)
124+
108125
if p.executor != nil {
109126
return p.executor.Add(msg, len(v))
110127
} else {
@@ -132,3 +149,10 @@ func WithAllowAutoTopicCreation() PushOption {
132149
options.allowAutoTopicCreation = true
133150
}
134151
}
152+
153+
// WithSyncPush enables the Pusher to push messages synchronously.
154+
func WithSyncPush() PushOption {
155+
return func(options *pushOptions) {
156+
options.syncPush = true
157+
}
158+
}

‎kq/queue.go

+95-31
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import (
44
"context"
55
"crypto/tls"
66
"crypto/x509"
7+
"errors"
78
"io"
89
"log"
910
"os"
@@ -14,12 +15,16 @@ import (
1415
_ "github.com/segmentio/kafka-go/lz4"
1516
"github.com/segmentio/kafka-go/sasl/plain"
1617
_ "github.com/segmentio/kafka-go/snappy"
18+
"github.com/zeromicro/go-queue/kq/internal"
19+
"github.com/zeromicro/go-zero/core/contextx"
20+
"github.com/zeromicro/go-zero/core/logc"
1721
"github.com/zeromicro/go-zero/core/logx"
1822
"github.com/zeromicro/go-zero/core/queue"
1923
"github.com/zeromicro/go-zero/core/service"
2024
"github.com/zeromicro/go-zero/core/stat"
2125
"github.com/zeromicro/go-zero/core/threading"
2226
"github.com/zeromicro/go-zero/core/timex"
27+
"go.opentelemetry.io/otel"
2328
)
2429

2530
const (
@@ -29,12 +34,12 @@ const (
2934
)
3035

3136
type (
32-
ConsumeHandle func(key, value string) error
37+
ConsumeHandle func(ctx context.Context, key, value string) error
3338

34-
ConsumeErrorHandler func(msg kafka.Message, err error)
39+
ConsumeErrorHandler func(ctx context.Context, msg kafka.Message, err error)
3540

3641
ConsumeHandler interface {
37-
Consume(key, value string) error
42+
Consume(ctx context.Context, key, value string) error
3843
}
3944

4045
queueOptions struct {
@@ -54,6 +59,7 @@ type (
5459
channel chan kafka.Message
5560
producerRoutines *threading.RoutineGroup
5661
consumerRoutines *threading.RoutineGroup
62+
commitRunner *threading.StableRunner[kafka.Message, kafka.Message]
5763
metrics *stat.Metrics
5864
errorHandler ConsumeErrorHandler
5965
}
@@ -143,7 +149,7 @@ func newKafkaQueue(c KqConf, handler ConsumeHandler, options queueOptions) queue
143149
}
144150
consumer := kafka.NewReader(readerConfig)
145151

146-
return &kafkaQueue{
152+
q := &kafkaQueue{
147153
c: c,
148154
consumer: consumer,
149155
handler: handler,
@@ -153,25 +159,51 @@ func newKafkaQueue(c KqConf, handler ConsumeHandler, options queueOptions) queue
153159
metrics: options.metrics,
154160
errorHandler: options.errorHandler,
155161
}
162+
if c.CommitInOrder {
163+
q.commitRunner = threading.NewStableRunner(func(msg kafka.Message) kafka.Message {
164+
if err := q.consumeOne(context.Background(), string(msg.Key), string(msg.Value)); err != nil {
165+
if q.errorHandler != nil {
166+
q.errorHandler(context.Background(), msg, err)
167+
}
168+
}
169+
170+
return msg
171+
})
172+
}
173+
174+
return q
156175
}
157176

158177
func (q *kafkaQueue) Start() {
159-
q.startConsumers()
160-
q.startProducers()
178+
if q.c.CommitInOrder {
179+
go q.commitInOrder()
161180

162-
q.producerRoutines.Wait()
163-
close(q.channel)
164-
q.consumerRoutines.Wait()
181+
if err := q.consume(func(msg kafka.Message) {
182+
if e := q.commitRunner.Push(msg); e != nil {
183+
logx.Error(e)
184+
}
185+
}); err != nil {
186+
logx.Error(err)
187+
}
188+
} else {
189+
q.startConsumers()
190+
q.startProducers()
191+
q.producerRoutines.Wait()
192+
close(q.channel)
193+
q.consumerRoutines.Wait()
194+
195+
logx.Infof("Consumer %s is closed", q.c.Name)
196+
}
165197
}
166198

167199
func (q *kafkaQueue) Stop() {
168200
q.consumer.Close()
169201
logx.Close()
170202
}
171203

172-
func (q *kafkaQueue) consumeOne(key, val string) error {
204+
func (q *kafkaQueue) consumeOne(ctx context.Context, key, val string) error {
173205
startTime := timex.Now()
174-
err := q.handler.Consume(key, val)
206+
err := q.handler.Consume(ctx, key, val)
175207
q.metrics.Add(stat.Task{
176208
Duration: timex.Since(startTime),
177209
})
@@ -182,18 +214,25 @@ func (q *kafkaQueue) startConsumers() {
182214
for i := 0; i < q.c.Processors; i++ {
183215
q.consumerRoutines.Run(func() {
184216
for msg := range q.channel {
185-
if err := q.consumeOne(string(msg.Key), string(msg.Value)); err != nil {
217+
// wrap message into message carrier
218+
mc := internal.NewMessageCarrier(internal.NewMessage(&msg))
219+
// extract trace context from message
220+
ctx := otel.GetTextMapPropagator().Extract(context.Background(), mc)
221+
// remove deadline and error control
222+
ctx = contextx.ValueOnlyFrom(ctx)
223+
224+
if err := q.consumeOne(ctx, string(msg.Key), string(msg.Value)); err != nil {
186225
if q.errorHandler != nil {
187-
q.errorHandler(msg, err)
226+
q.errorHandler(ctx, msg, err)
188227
}
189228

190229
if !q.c.ForceCommit {
191230
continue
192231
}
193232
}
194233

195-
if err := q.consumer.CommitMessages(context.Background(), msg); err != nil {
196-
logx.Errorf("commit failed, error: %v", err)
234+
if err := q.consumer.CommitMessages(ctx, msg); err != nil {
235+
logc.Errorf(ctx, "commit failed, error: %v", err)
197236
}
198237
}
199238
})
@@ -202,25 +241,50 @@ func (q *kafkaQueue) startConsumers() {
202241

203242
func (q *kafkaQueue) startProducers() {
204243
for i := 0; i < q.c.Consumers; i++ {
244+
i := i
205245
q.producerRoutines.Run(func() {
206-
for {
207-
msg, err := q.consumer.FetchMessage(context.Background())
208-
// io.EOF means consumer closed
209-
// io.ErrClosedPipe means committing messages on the consumer,
210-
// kafka will refire the messages on uncommitted messages, ignore
211-
if err == io.EOF || err == io.ErrClosedPipe {
212-
return
213-
}
214-
if err != nil {
215-
logx.Errorf("Error on reading message, %q", err.Error())
216-
continue
217-
}
246+
if err := q.consume(func(msg kafka.Message) {
218247
q.channel <- msg
248+
}); err != nil {
249+
logx.Infof("Consumer %s-%d is closed, error: %q", q.c.Name, i, err.Error())
250+
return
219251
}
220252
})
221253
}
222254
}
223255

256+
func (q *kafkaQueue) consume(handle func(msg kafka.Message)) error {
257+
for {
258+
msg, err := q.consumer.FetchMessage(context.Background())
259+
// io.EOF means consumer closed
260+
// io.ErrClosedPipe means committing messages on the consumer,
261+
// kafka will refire the messages on uncommitted messages, ignore
262+
if err == io.EOF || errors.Is(err, io.ErrClosedPipe) {
263+
return err
264+
}
265+
if err != nil {
266+
logx.Errorf("Error on reading message, %q", err.Error())
267+
continue
268+
}
269+
270+
handle(msg)
271+
}
272+
}
273+
274+
func (q *kafkaQueue) commitInOrder() {
275+
for {
276+
msg, err := q.commitRunner.Get()
277+
if err != nil {
278+
logx.Error(err)
279+
return
280+
}
281+
282+
if err := q.consumer.CommitMessages(context.Background(), msg); err != nil {
283+
logx.Errorf("commit failed, error: %v", err)
284+
}
285+
}
286+
}
287+
224288
func (q kafkaQueues) Start() {
225289
for _, each := range q.queues {
226290
q.group.Add(each)
@@ -272,8 +336,8 @@ type innerConsumeHandler struct {
272336
handle ConsumeHandle
273337
}
274338

275-
func (ch innerConsumeHandler) Consume(k, v string) error {
276-
return ch.handle(k, v)
339+
func (ch innerConsumeHandler) Consume(ctx context.Context, k, v string) error {
340+
return ch.handle(ctx, k, v)
277341
}
278342

279343
func ensureQueueOptions(c KqConf, options *queueOptions) {
@@ -290,8 +354,8 @@ func ensureQueueOptions(c KqConf, options *queueOptions) {
290354
options.metrics = stat.NewMetrics(c.Name)
291355
}
292356
if options.errorHandler == nil {
293-
options.errorHandler = func(msg kafka.Message, err error) {
294-
logx.Errorf("consume: %s, error: %v", string(msg.Value), err)
357+
options.errorHandler = func(ctx context.Context, msg kafka.Message, err error) {
358+
logc.Errorf(ctx, "consume: %s, error: %v", string(msg.Value), err)
295359
}
296360
}
297361
}

‎natsq/config.go

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
package natsq
2+
3+
import (
4+
"github.com/nats-io/nats.go"
5+
)
6+
7+
type NatsConfig struct {
8+
ServerUri string
9+
ClientName string
10+
Options []nats.Option
11+
}

‎natsq/consumer.go

+174
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
package natsq
2+
3+
import (
4+
"context"
5+
"errors"
6+
"log"
7+
"sync"
8+
9+
"github.com/nats-io/nats.go"
10+
"github.com/nats-io/nats.go/jetstream"
11+
"github.com/zeromicro/go-zero/core/logx"
12+
"github.com/zeromicro/go-zero/core/queue"
13+
)
14+
15+
const (
16+
NatDefaultMode = iota
17+
NatJetMode
18+
)
19+
20+
type (
21+
Msg struct {
22+
Subject string
23+
Data []byte
24+
}
25+
26+
ConsumeHandle func(m *Msg) error
27+
28+
// ConsumeHandler Consumer interface, used to define the methods required by the consumer
29+
ConsumeHandler interface {
30+
HandleMessage(m *Msg) error
31+
}
32+
33+
// ConsumerQueue Consumer queue, used to maintain the relationship between a consumer queue
34+
ConsumerQueue struct {
35+
StreamName string // stream name
36+
QueueName string // queue name
37+
Subjects []string // Subscribe subject
38+
Consumer ConsumeHandler // consumer object
39+
JetOption []jetstream.PullConsumeOpt // Jetstream configuration
40+
}
41+
42+
// ConsumerManager Consumer manager for managing multiple consumer queues
43+
ConsumerManager struct {
44+
mutex sync.RWMutex // read-write lock
45+
conn *nats.Conn // nats connect
46+
mode uint // nats mode
47+
queues []ConsumerQueue // consumer queue list
48+
options []nats.Option // Connection configuration items
49+
doneChan chan struct{} // close channel
50+
}
51+
)
52+
53+
// MustNewConsumerManager creates a new ConsumerManager instance.
54+
// It connects to NATS server, registers the provided consumer queues, and returns the ConsumerManager.
55+
// If any error occurs during the process, it logs the error and continues.
56+
func MustNewConsumerManager(cfg *NatsConfig, cq []*ConsumerQueue, mode uint) queue.MessageQueue {
57+
sc, err := nats.Connect(cfg.ServerUri, cfg.Options...)
58+
if err != nil {
59+
logx.Errorf("failed to connect nats, error: %v", err)
60+
}
61+
cm := &ConsumerManager{
62+
conn: sc,
63+
options: cfg.Options,
64+
mode: mode,
65+
doneChan: make(chan struct{}),
66+
}
67+
if len(cq) == 0 {
68+
logx.Errorf("failed consumerQueue register to nats, error: cq len is 0")
69+
}
70+
for _, item := range cq {
71+
err = cm.registerQueue(item)
72+
if err != nil {
73+
logx.Errorf("failed to register nats, error: %v", err)
74+
}
75+
}
76+
77+
return cm
78+
}
79+
80+
// Start starts consuming messages from all the registered consumer queues.
81+
// It launches a goroutine for each consumer queue to subscribe and process messages.
82+
// The method blocks until the doneChan is closed.
83+
func (cm *ConsumerManager) Start() {
84+
cm.mutex.RLock()
85+
defer cm.mutex.RUnlock()
86+
87+
if len(cm.queues) == 0 {
88+
logx.Errorf("no consumer queues found")
89+
}
90+
for _, consumerQueue := range cm.queues {
91+
go cm.subscribe(consumerQueue)
92+
}
93+
<-cm.doneChan
94+
}
95+
96+
// Stop closes the NATS connection and stops the ConsumerManager.
97+
func (cm *ConsumerManager) Stop() {
98+
if cm.conn != nil {
99+
cm.conn.Close()
100+
}
101+
}
102+
103+
// registerQueue registers a new consumer queue with the ConsumerManager.
104+
// It validates the required fields of the ConsumerQueue and adds it to the list of queues.
105+
// If any required field is missing, it returns an error.
106+
func (cm *ConsumerManager) registerQueue(queue *ConsumerQueue) error {
107+
cm.mutex.Lock()
108+
defer cm.mutex.Unlock()
109+
110+
if cm.mode == NatJetMode && queue.StreamName == "" {
111+
return errors.New("stream name is required")
112+
}
113+
114+
if queue.QueueName == "" {
115+
return errors.New("queue name is required")
116+
}
117+
if len(queue.Subjects) == 0 {
118+
return errors.New("subject is required")
119+
}
120+
if queue.Consumer == nil {
121+
return errors.New("consumer is required")
122+
}
123+
124+
cm.queues = append(cm.queues, *queue)
125+
return nil
126+
}
127+
128+
// subscribe subscribes to the specified consumer queue and starts processing messages.
129+
// If the NATS mode is NatJetMode, it creates a JetStream consumer and consumes messages using the provided options.
130+
// If the NATS mode is NatDefaultMode, it subscribes to the specified subjects using the queue name.
131+
// The method blocks until the doneChan is closed.
132+
func (cm *ConsumerManager) subscribe(queue ConsumerQueue) {
133+
ctx := context.Background()
134+
if cm.mode == NatJetMode {
135+
js, _ := jetstream.New(cm.conn)
136+
stream, err := js.Stream(ctx, "ccc")
137+
if err != nil {
138+
log.Fatalf("Error creating stream: %v", err)
139+
return
140+
}
141+
consumer, _ := stream.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{
142+
Name: queue.QueueName,
143+
AckPolicy: jetstream.AckExplicitPolicy,
144+
FilterSubjects: queue.Subjects,
145+
})
146+
consContext, subErr := consumer.Consume(func(msg jetstream.Msg) {
147+
err := queue.Consumer.HandleMessage(&Msg{Subject: msg.Subject(), Data: msg.Data()})
148+
if err != nil {
149+
logx.Errorf("error handling message: %v", err.Error())
150+
} else {
151+
msg.Ack()
152+
}
153+
}, queue.JetOption...)
154+
if subErr != nil {
155+
logx.Errorf("error subscribing to queue %s: %v", queue.QueueName, subErr.Error())
156+
return
157+
}
158+
defer consContext.Stop()
159+
}
160+
if cm.mode == NatDefaultMode {
161+
for _, subject := range queue.Subjects {
162+
cm.conn.QueueSubscribe(subject, queue.QueueName, func(m *nats.Msg) {
163+
err := queue.Consumer.HandleMessage(&Msg{Subject: m.Subject, Data: m.Data})
164+
if err != nil {
165+
logx.Errorf("error handling message: %v", err.Error())
166+
} else {
167+
m.Ack()
168+
}
169+
})
170+
}
171+
}
172+
173+
<-cm.doneChan
174+
}

‎natsq/producer.go

+88
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
package natsq
2+
3+
import (
4+
"context"
5+
6+
"github.com/nats-io/nats.go"
7+
"github.com/nats-io/nats.go/jetstream"
8+
)
9+
10+
type DefaultProducer struct {
11+
conn *nats.Conn
12+
}
13+
14+
// NewDefaultProducer creates a new default NATS producer.
15+
// It takes a NatsConfig as input and returns a pointer to a DefaultProducer and an error.
16+
// It connects to the NATS server using the provided configuration.
17+
func NewDefaultProducer(c *NatsConfig) (*DefaultProducer, error) {
18+
sc, err := nats.Connect(c.ServerUri, c.Options...)
19+
if err != nil {
20+
return nil, err
21+
}
22+
23+
return &DefaultProducer{
24+
conn: sc,
25+
}, nil
26+
}
27+
28+
// Publish publishes a message with the specified subject and data using the default NATS producer.
29+
// It takes a subject string and data byte slice as input and returns an error if the publish fails.
30+
func (p *DefaultProducer) Publish(subject string, data []byte) error {
31+
return p.conn.Publish(subject, data)
32+
}
33+
34+
// Close closes the NATS connection of the default producer.
35+
func (p *DefaultProducer) Close() {
36+
if p.conn != nil {
37+
p.conn.Close()
38+
}
39+
}
40+
41+
type JetProducer struct {
42+
conn *nats.Conn
43+
js jetstream.JetStream
44+
ctx context.Context
45+
}
46+
47+
// NewJetProducer creates a new JetStream producer.
48+
// It takes a NatsConfig as input and returns a pointer to a JetProducer and an error.
49+
// It connects to the NATS server using the provided configuration and creates a new JetStream context.
50+
func NewJetProducer(c *NatsConfig) (*JetProducer, error) {
51+
sc, err := nats.Connect(c.ServerUri, c.Options...)
52+
if err != nil {
53+
return nil, err
54+
}
55+
js, err := jetstream.New(sc)
56+
if err != nil {
57+
return nil, err
58+
}
59+
return &JetProducer{
60+
conn: sc,
61+
js: js,
62+
}, nil
63+
}
64+
65+
// CreateOrUpdateStream creates or updates a JetStream stream with the specified configuration.
66+
// It takes a jetstream.StreamConfig as input and returns an error if the operation fails.
67+
func (j *JetProducer) CreateOrUpdateStream(config jetstream.StreamConfig) error {
68+
_, err := j.js.CreateOrUpdateStream(j.ctx, config)
69+
if err != nil {
70+
return err
71+
}
72+
return nil
73+
}
74+
75+
// Publish publishes a message with the specified subject and data using the JetStream producer.
76+
// It takes a subject string and data byte slice as input and returns an error if the publish fails.
77+
func (j *JetProducer) Publish(subject string, data []byte) error {
78+
_, err := j.js.Publish(j.ctx, subject, data)
79+
if err != nil {
80+
return err
81+
}
82+
return nil
83+
}
84+
85+
// Close closes the NATS connection of the JetStream producer.
86+
func (j *JetProducer) Close() {
87+
j.conn.Close()
88+
}

‎readme.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ Kafka Pub/Sub framework
5353

5454
### consumer example
5555

56-
config.json
56+
config.yaml
5757
```yaml
5858
Name: kq
5959
Brokers:

0 commit comments

Comments
 (0)
Please sign in to comment.