Skip to content

Commit 2b3c4c2

Browse files
authoredJul 24, 2024··
Merge branch 'master' into master
2 parents ff74599 + 5af862f commit 2b3c4c2

25 files changed

+1155
-1484
lines changed
 

‎dq/consumer.go

+5-4
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import (
1212

1313
const (
1414
expiration = 3600 // seconds
15-
guardValue = "1"
1615
tolerance = time.Minute * 30
1716
)
1817

@@ -45,17 +44,19 @@ func NewConsumer(c DqConf) Consumer {
4544
func (c *consumerCluster) Consume(consume Consume) {
4645
guardedConsume := func(body []byte) {
4746
key := hash.Md5Hex(body)
48-
body, ok := c.unwrap(body)
47+
taskBody, ok := c.unwrap(body)
4948
if !ok {
5049
logx.Errorf("discarded: %q", string(body))
5150
return
5251
}
5352

54-
ok, err := c.red.SetnxEx(key, guardValue, expiration)
53+
redisLock := redis.NewRedisLock(c.red, key)
54+
redisLock.SetExpire(expiration)
55+
ok, err := redisLock.Acquire()
5556
if err != nil {
5657
logx.Error(err)
5758
} else if ok {
58-
consume(body)
59+
consume(taskBody)
5960
}
6061
}
6162

‎dq/consumernode.go

+19-7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package dq
22

33
import (
4+
"errors"
45
"time"
56

67
"github.com/beanstalkd/go-beanstalk"
@@ -59,14 +60,25 @@ func (c *consumerNode) consumeEvents(consume Consume) {
5960
}
6061

6162
// the error can only be beanstalk.NameError or beanstalk.ConnError
62-
switch cerr := err.(type) {
63-
case beanstalk.ConnError:
64-
switch cerr.Err {
65-
case beanstalk.ErrTimeout:
63+
var cerr beanstalk.ConnError
64+
switch {
65+
case errors.As(err, &cerr):
66+
switch {
67+
case errors.Is(cerr.Err, beanstalk.ErrTimeout):
6668
// timeout error on timeout, just continue the loop
67-
case beanstalk.ErrBadChar, beanstalk.ErrBadFormat, beanstalk.ErrBuried, beanstalk.ErrDeadline,
68-
beanstalk.ErrDraining, beanstalk.ErrEmpty, beanstalk.ErrInternal, beanstalk.ErrJobTooBig,
69-
beanstalk.ErrNoCRLF, beanstalk.ErrNotFound, beanstalk.ErrNotIgnored, beanstalk.ErrTooLong:
69+
case
70+
errors.Is(cerr.Err, beanstalk.ErrBadChar),
71+
errors.Is(cerr.Err, beanstalk.ErrBadFormat),
72+
errors.Is(cerr.Err, beanstalk.ErrBuried),
73+
errors.Is(cerr.Err, beanstalk.ErrDeadline),
74+
errors.Is(cerr.Err, beanstalk.ErrDraining),
75+
errors.Is(cerr.Err, beanstalk.ErrEmpty),
76+
errors.Is(cerr.Err, beanstalk.ErrInternal),
77+
errors.Is(cerr.Err, beanstalk.ErrJobTooBig),
78+
errors.Is(cerr.Err, beanstalk.ErrNoCRLF),
79+
errors.Is(cerr.Err, beanstalk.ErrNotFound),
80+
errors.Is(cerr.Err, beanstalk.ErrNotIgnored),
81+
errors.Is(cerr.Err, beanstalk.ErrTooLong):
7082
// won't reset
7183
logx.Error(err)
7284
default:

‎dq/producer.go

+24-20
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
11
package dq
22

33
import (
4-
"bytes"
54
"log"
65
"math/rand"
7-
"strconv"
86
"strings"
97
"time"
108

@@ -25,15 +23,21 @@ type (
2523
Close() error
2624
Delay(body []byte, delay time.Duration) (string, error)
2725
Revoke(ids string) error
26+
27+
at(body []byte, at time.Time) (string, error)
28+
delay(body []byte, delay time.Duration) (string, error)
2829
}
2930

3031
producerCluster struct {
3132
nodes []Producer
3233
}
3334
)
3435

36+
var rng *rand.Rand
37+
3538
func init() {
36-
rand.Seed(time.Now().UnixNano())
39+
source := rand.NewSource(time.Now().UnixNano())
40+
rng = rand.New(source)
3741
}
3842

3943
func NewProducer(beanstalks []Beanstalk) Producer {
@@ -56,10 +60,8 @@ func NewProducer(beanstalks []Beanstalk) Producer {
5660
}
5761

5862
func (p *producerCluster) At(body []byte, at time.Time) (string, error) {
59-
wrapped := p.wrap(body, at)
60-
return p.insert(func(node Producer) (string, error) {
61-
return node.At(wrapped, at)
62-
})
63+
wrapped := wrap(body, at)
64+
return p.at(wrapped, at)
6365
}
6466

6567
func (p *producerCluster) Close() error {
@@ -73,10 +75,8 @@ func (p *producerCluster) Close() error {
7375
}
7476

7577
func (p *producerCluster) Delay(body []byte, delay time.Duration) (string, error) {
76-
wrapped := p.wrap(body, time.Now().Add(delay))
77-
return p.insert(func(node Producer) (string, error) {
78-
return node.Delay(wrapped, delay)
79-
})
78+
wrapped := wrap(body, time.Now().Add(delay))
79+
return p.delay(wrapped, delay)
8080
}
8181

8282
func (p *producerCluster) Revoke(ids string) error {
@@ -98,17 +98,29 @@ func (p *producerCluster) Revoke(ids string) error {
9898
return be.Err()
9999
}
100100

101+
func (p *producerCluster) at(body []byte, at time.Time) (string, error) {
102+
return p.insert(func(node Producer) (string, error) {
103+
return node.at(body, at)
104+
})
105+
}
106+
101107
func (p *producerCluster) cloneNodes() []Producer {
102108
return append([]Producer(nil), p.nodes...)
103109
}
104110

111+
func (p *producerCluster) delay(body []byte, delay time.Duration) (string, error) {
112+
return p.insert(func(node Producer) (string, error) {
113+
return node.delay(body, delay)
114+
})
115+
}
116+
105117
func (p *producerCluster) getWriteNodes() []Producer {
106118
if len(p.nodes) <= replicaNodes {
107119
return p.nodes
108120
}
109121

110122
nodes := p.cloneNodes()
111-
rand.Shuffle(len(nodes), func(i, j int) {
123+
rng.Shuffle(len(nodes), func(i, j int) {
112124
nodes[i], nodes[j] = nodes[j], nodes[i]
113125
})
114126
return nodes[:replicaNodes]
@@ -156,11 +168,3 @@ func (p *producerCluster) insert(fn func(node Producer) (string, error)) (string
156168

157169
return "", be.Err()
158170
}
159-
160-
func (p *producerCluster) wrap(body []byte, at time.Time) []byte {
161-
var builder bytes.Buffer
162-
builder.WriteString(strconv.FormatInt(at.UnixNano(), 10))
163-
builder.WriteByte(timeSep)
164-
builder.Write(body)
165-
return builder.Bytes()
166-
}

‎dq/producernode.go

+55-33
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ import (
88
"time"
99

1010
"github.com/beanstalkd/go-beanstalk"
11+
"github.com/zeromicro/go-zero/core/logx"
1112
)
1213

1314
var ErrTimeBeforeNow = errors.New("can't schedule task to past time")
@@ -27,46 +28,15 @@ func NewProducerNode(endpoint, tube string) Producer {
2728
}
2829

2930
func (p *producerNode) At(body []byte, at time.Time) (string, error) {
30-
now := time.Now()
31-
if at.Before(now) {
32-
return "", ErrTimeBeforeNow
33-
}
34-
35-
duration := at.Sub(now)
36-
return p.Delay(body, duration)
31+
return p.at(wrap(body, at), at)
3732
}
3833

3934
func (p *producerNode) Close() error {
4035
return p.conn.Close()
4136
}
4237

4338
func (p *producerNode) Delay(body []byte, delay time.Duration) (string, error) {
44-
conn, err := p.conn.get()
45-
if err != nil {
46-
return "", err
47-
}
48-
49-
id, err := conn.Put(body, PriNormal, delay, defaultTimeToRun)
50-
if err == nil {
51-
return fmt.Sprintf("%s/%s/%d", p.endpoint, p.tube, id), nil
52-
}
53-
54-
// the error can only be beanstalk.NameError or beanstalk.ConnError
55-
// just return when the error is beanstalk.NameError, don't reset
56-
switch cerr := err.(type) {
57-
case beanstalk.ConnError:
58-
switch cerr.Err {
59-
case beanstalk.ErrBadChar, beanstalk.ErrBadFormat, beanstalk.ErrBuried, beanstalk.ErrDeadline,
60-
beanstalk.ErrDraining, beanstalk.ErrEmpty, beanstalk.ErrInternal, beanstalk.ErrJobTooBig,
61-
beanstalk.ErrNoCRLF, beanstalk.ErrNotFound, beanstalk.ErrNotIgnored, beanstalk.ErrTooLong:
62-
// won't reset
63-
default:
64-
// beanstalk.ErrOOM, beanstalk.ErrTimeout, beanstalk.ErrUnknown and other errors
65-
p.conn.reset()
66-
}
67-
}
68-
69-
return "", err
39+
return p.delay(wrap(body, time.Now().Add(delay)), delay)
7040
}
7141

7242
func (p *producerNode) Revoke(jointId string) error {
@@ -96,3 +66,55 @@ func (p *producerNode) Revoke(jointId string) error {
9666
// if not in this beanstalk, ignore
9767
return nil
9868
}
69+
70+
func (p *producerNode) at(body []byte, at time.Time) (string, error) {
71+
now := time.Now()
72+
if at.Before(now) {
73+
return "", ErrTimeBeforeNow
74+
}
75+
76+
duration := at.Sub(now)
77+
return p.delay(body, duration)
78+
}
79+
80+
func (p *producerNode) delay(body []byte, delay time.Duration) (string, error) {
81+
conn, err := p.conn.get()
82+
if err != nil {
83+
return "", err
84+
}
85+
86+
id, err := conn.Put(body, PriNormal, delay, defaultTimeToRun)
87+
if err == nil {
88+
return fmt.Sprintf("%s/%s/%d", p.endpoint, p.tube, id), nil
89+
}
90+
91+
// the error can only be beanstalk.NameError or beanstalk.ConnError
92+
// just return when the error is beanstalk.NameError, don't reset
93+
var cerr beanstalk.ConnError
94+
switch {
95+
case errors.As(err, &cerr):
96+
switch {
97+
case
98+
errors.Is(cerr.Err, beanstalk.ErrBadChar),
99+
errors.Is(cerr.Err, beanstalk.ErrBadFormat),
100+
errors.Is(cerr.Err, beanstalk.ErrBuried),
101+
errors.Is(cerr.Err, beanstalk.ErrDeadline),
102+
errors.Is(cerr.Err, beanstalk.ErrDraining),
103+
errors.Is(cerr.Err, beanstalk.ErrEmpty),
104+
errors.Is(cerr.Err, beanstalk.ErrInternal),
105+
errors.Is(cerr.Err, beanstalk.ErrJobTooBig),
106+
errors.Is(cerr.Err, beanstalk.ErrNoCRLF),
107+
errors.Is(cerr.Err, beanstalk.ErrNotFound),
108+
errors.Is(cerr.Err, beanstalk.ErrNotIgnored),
109+
errors.Is(cerr.Err, beanstalk.ErrTooLong):
110+
// won't reset
111+
default:
112+
// beanstalk.ErrOOM, beanstalk.ErrTimeout, beanstalk.ErrUnknown and other errors
113+
p.conn.reset()
114+
}
115+
default:
116+
logx.Error(err)
117+
}
118+
119+
return "", err
120+
}

‎dq/wrapper.go

+15
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
package dq
2+
3+
import (
4+
"bytes"
5+
"strconv"
6+
"time"
7+
)
8+
9+
func wrap(body []byte, at time.Time) []byte {
10+
var builder bytes.Buffer
11+
builder.WriteString(strconv.FormatInt(at.UnixNano(), 10))
12+
builder.WriteByte(timeSep)
13+
builder.Write(body)
14+
return builder.Bytes()
15+
}
File renamed without changes.

‎example/dq/producer/node/producer.go

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
package main
2+
3+
import (
4+
"fmt"
5+
"strconv"
6+
"time"
7+
8+
"github.com/zeromicro/go-queue/dq"
9+
)
10+
11+
func main() {
12+
producer := dq.NewProducerNode("localhost:11300", "tube")
13+
14+
for i := 1000; i < 1005; i++ {
15+
_, err := producer.Delay([]byte(strconv.Itoa(i)), time.Second*5)
16+
if err != nil {
17+
fmt.Println(err)
18+
}
19+
}
20+
}

‎example/kq/consumer/queue.go

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package main
22

33
import (
4+
"context"
45
"fmt"
56

67
"github.com/zeromicro/go-queue/kq"
@@ -11,7 +12,7 @@ func main() {
1112
var c kq.KqConf
1213
conf.MustLoad("config.yaml", &c)
1314

14-
q := kq.MustNewQueue(c, kq.WithHandle(func(k, v string) error {
15+
q := kq.MustNewQueue(c, kq.WithHandle(func(ctx context.Context, k, v string) error {
1516
fmt.Printf("=> %s\n", v)
1617
return nil
1718
}))

‎example/kq/producer/produce.go

+6-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
package main
22

33
import (
4+
"context"
45
"encoding/json"
56
"fmt"
67
"log"
@@ -40,8 +41,11 @@ func main() {
4041
log.Fatal(err)
4142
}
4243

43-
fmt.Println(string(body))
44-
if err := pusher.Push(string(body)); err != nil {
44+
if err := pusher.Push(context.Background(), string(body)); err != nil {
45+
log.Fatal(err)
46+
}
47+
48+
if err := pusher.KPush(context.Background(), "test", string(body)); err != nil {
4549
log.Fatal(err)
4650
}
4751
}

‎example/natsq/consumer/consumer.go

+60
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
package main
2+
3+
import (
4+
"fmt"
5+
6+
"github.com/zeromicro/go-queue/natsq"
7+
)
8+
9+
type MyConsumer struct {
10+
Channel string
11+
}
12+
13+
func (c *MyConsumer) HandleMessage(m *natsq.Msg) error {
14+
fmt.Printf("%s Received %s's a message: %s\n", c.Channel, m.Subject, string(m.Data))
15+
return nil
16+
}
17+
18+
func main() {
19+
20+
mc1 := &MyConsumer{Channel: "vipUpgrade"}
21+
mc2 := &MyConsumer{Channel: "taskFinish"}
22+
23+
c := &natsq.NatsConfig{
24+
ServerUri: "nats://127.0.0.1:4222",
25+
}
26+
27+
//JetMode
28+
// cq := []*natsq.ConsumerQueue{
29+
// {
30+
// Consumer: mc1,
31+
// QueueName: "vipUpgrade",
32+
// StreamName: "ccc",
33+
// Subjects: []string{"ddd", "eee"},
34+
// },
35+
// {
36+
// Consumer: mc2,
37+
// QueueName: "taskFinish",
38+
// StreamName: "ccc",
39+
// Subjects: []string{"ccc", "eee"},
40+
// },
41+
// }
42+
//q := natsq.MustNewConsumerManager(c, cq, natsq.NatJetMode)
43+
44+
//DefaultMode
45+
cq := []*natsq.ConsumerQueue{
46+
{
47+
Consumer: mc1,
48+
QueueName: "vipUpgrade",
49+
Subjects: []string{"ddd", "eee"},
50+
},
51+
{
52+
Consumer: mc2,
53+
QueueName: "taskFinish",
54+
Subjects: []string{"ccc", "eee"},
55+
},
56+
}
57+
q := natsq.MustNewConsumerManager(c, cq, natsq.NatDefaultMode)
58+
q.Start()
59+
defer q.Stop()
60+
}

‎example/natsq/publisher/publisher.go

+68
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
package main
2+
3+
import (
4+
"log"
5+
"math/rand"
6+
"time"
7+
8+
"github.com/nats-io/nats.go/jetstream"
9+
"github.com/zeromicro/go-queue/natsq"
10+
)
11+
12+
func main() {
13+
14+
c := natsq.NatsConfig{
15+
ServerUri: "127.0.0.1:4222",
16+
}
17+
18+
// Default Mode
19+
p, _ := natsq.NewDefaultProducer(&c)
20+
for i := 0; i < 3; i++ {
21+
payload := randBody()
22+
err := p.Publish(randSub(), payload)
23+
if err != nil {
24+
log.Fatalf("Error publishing message: %v", err)
25+
} else {
26+
log.Printf("Published message: %s", string(payload))
27+
}
28+
}
29+
p.Close()
30+
31+
// JetMode
32+
j, _ := natsq.NewJetProducer(&c)
33+
j.CreateOrUpdateStream(jetstream.StreamConfig{
34+
Name: "ccc",
35+
Subjects: []string{"ccc", "ddd", "eee"},
36+
Storage: jetstream.FileStorage,
37+
NoAck: false,
38+
})
39+
for i := 0; i < 3; i++ {
40+
payload := randBody()
41+
err := j.Publish(randSub(), payload)
42+
if err != nil {
43+
log.Fatalf("Error publishing message: %v", err)
44+
} else {
45+
log.Printf("Published message: %s", string(payload))
46+
}
47+
}
48+
j.Close()
49+
}
50+
51+
func randSub() string {
52+
source := rand.NewSource(time.Now().UnixNano())
53+
// 创建一个新的随机数生成器
54+
rng := rand.New(source)
55+
strings := []string{"ccc", "ddd", "eee"}
56+
randomIndex := rng.Intn(len(strings))
57+
return strings[randomIndex]
58+
}
59+
60+
func randBody() []byte {
61+
charSet := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
62+
length := 10
63+
result := make([]byte, length)
64+
for i := range result {
65+
result[i] = charSet[rand.Intn(len(charSet))]
66+
}
67+
return result
68+
}

‎example/stan/publisher/producer.go

+3-2
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,13 @@ func main() {
4444
}
4545

4646
func randSub() string {
47-
rand.Seed(time.Now().UnixNano())
47+
source := rand.NewSource(time.Now().UnixNano())
48+
rng := rand.New(source)
4849
charSet := "abc"
4950
length := 1
5051
result := make([]byte, length)
5152
for i := range result {
52-
result[i] = charSet[rand.Intn(len(charSet))]
53+
result[i] = charSet[rng.Intn(len(charSet))]
5354
}
5455
return string(result)
5556
}

‎go.mod

+58-5
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,66 @@
11
module github.com/zeromicro/go-queue
22

3-
go 1.16
3+
go 1.20
44

55
require (
66
github.com/beanstalkd/go-beanstalk v0.2.0
7+
github.com/nats-io/nats.go v1.34.1
8+
github.com/nats-io/stan.go v0.10.4
9+
github.com/rabbitmq/amqp091-go v1.10.0
10+
github.com/segmentio/kafka-go v0.4.47
11+
github.com/stretchr/testify v1.9.0
12+
github.com/zeromicro/go-zero v1.6.6
13+
go.opentelemetry.io/otel v1.19.0
14+
)
15+
16+
require (
17+
github.com/beorn7/perks v1.0.1 // indirect
18+
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
19+
github.com/cespare/xxhash/v2 v2.2.0 // indirect
20+
github.com/davecgh/go-spew v1.1.1 // indirect
21+
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
22+
github.com/fatih/color v1.17.0 // indirect
23+
github.com/go-logr/logr v1.3.0 // indirect
24+
github.com/go-logr/stdr v1.2.2 // indirect
25+
github.com/gogo/protobuf v1.3.2 // indirect
26+
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect
27+
github.com/klauspost/compress v1.17.9 // indirect
28+
github.com/mattn/go-colorable v0.1.13 // indirect
29+
github.com/mattn/go-isatty v0.0.20 // indirect
30+
github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
731
github.com/nats-io/nats-server/v2 v2.9.15 // indirect
832
github.com/nats-io/nats-streaming-server v0.25.3 // indirect
9-
github.com/nats-io/stan.go v0.10.4
10-
github.com/rabbitmq/amqp091-go v1.8.0
11-
github.com/segmentio/kafka-go v0.4.38
12-
github.com/zeromicro/go-zero v1.4.3
33+
github.com/nats-io/nkeys v0.4.7 // indirect
34+
github.com/nats-io/nuid v1.0.1 // indirect
35+
github.com/openzipkin/zipkin-go v0.4.2 // indirect
36+
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
37+
github.com/pierrec/lz4/v4 v4.1.21 // indirect
38+
github.com/pmezard/go-difflib v1.0.0 // indirect
39+
github.com/prometheus/client_golang v1.18.0 // indirect
40+
github.com/prometheus/client_model v0.5.0 // indirect
41+
github.com/prometheus/common v0.45.0 // indirect
42+
github.com/prometheus/procfs v0.12.0 // indirect
43+
github.com/redis/go-redis/v9 v9.5.3 // indirect
44+
github.com/spaolacci/murmur3 v1.1.0 // indirect
45+
go.opentelemetry.io/otel/exporters/jaeger v1.17.0 // indirect
46+
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
47+
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
48+
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 // indirect
49+
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.19.0 // indirect
50+
go.opentelemetry.io/otel/exporters/zipkin v1.19.0 // indirect
51+
go.opentelemetry.io/otel/metric v1.19.0 // indirect
52+
go.opentelemetry.io/otel/sdk v1.19.0 // indirect
53+
go.opentelemetry.io/otel/trace v1.19.0 // indirect
54+
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
55+
go.uber.org/automaxprocs v1.5.3 // indirect
56+
golang.org/x/crypto v0.24.0 // indirect
57+
golang.org/x/net v0.26.0 // indirect
58+
golang.org/x/sys v0.21.0 // indirect
59+
golang.org/x/text v0.16.0 // indirect
60+
google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect
61+
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
62+
google.golang.org/grpc v1.64.0 // indirect
63+
google.golang.org/protobuf v1.34.2 // indirect
64+
gopkg.in/yaml.v2 v2.4.0 // indirect
65+
gopkg.in/yaml.v3 v3.0.1 // indirect
1366
)

‎go.sum

+122-1,339
Large diffs are not rendered by default.

‎kq/config.go

+14-12
Original file line numberDiff line numberDiff line change
@@ -9,16 +9,18 @@ const (
99

1010
type KqConf struct {
1111
service.ServiceConf
12-
Brokers []string
13-
Group string
14-
Topic string
15-
Offset string `json:",options=first|last,default=last"`
16-
Conns int `json:",default=1"`
17-
Consumers int `json:",default=8"`
18-
Processors int `json:",default=8"`
19-
MinBytes int `json:",default=10240"` // 10K
20-
MaxBytes int `json:",default=10485760"` // 10M
21-
Username string `json:",optional"`
22-
Password string `json:",optional"`
23-
ForceCommit bool `json:",default=true"`
12+
Brokers []string
13+
Group string
14+
Topic string
15+
CaFile string `json:",optional"`
16+
Offset string `json:",options=first|last,default=last"`
17+
Conns int `json:",default=1"`
18+
Consumers int `json:",default=8"`
19+
Processors int `json:",default=8"`
20+
MinBytes int `json:",default=10240"` // 10K
21+
MaxBytes int `json:",default=10485760"` // 10M
22+
Username string `json:",optional"`
23+
Password string `json:",optional"`
24+
ForceCommit bool `json:",default=true"`
25+
CommitInOrder bool `json:",default=false"`
2426
}

‎kq/internal/message.go

+34
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
package internal
2+
3+
import "github.com/segmentio/kafka-go"
4+
5+
type Message struct {
6+
*kafka.Message
7+
}
8+
9+
func NewMessage(msg *kafka.Message) *Message {
10+
return &Message{Message: msg}
11+
}
12+
13+
func (m *Message) GetHeader(key string) string {
14+
for _, h := range m.Headers {
15+
if h.Key == key {
16+
return string(h.Value)
17+
}
18+
}
19+
return ""
20+
}
21+
22+
func (m *Message) SetHeader(key, val string) {
23+
// Ensure uniqueness of keys
24+
for i := 0; i < len(m.Headers); i++ {
25+
if m.Headers[i].Key == key {
26+
m.Headers = append(m.Headers[:i], m.Headers[i+1:]...)
27+
i--
28+
}
29+
}
30+
m.Headers = append(m.Headers, kafka.Header{
31+
Key: key,
32+
Value: []byte(val),
33+
})
34+
}

‎kq/internal/message_test.go

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
package internal
2+
3+
import (
4+
"testing"
5+
6+
"github.com/segmentio/kafka-go"
7+
"github.com/stretchr/testify/assert"
8+
)
9+
10+
func TestMessageGetHeader(t *testing.T) {
11+
testCases := []struct {
12+
name string
13+
msg *Message
14+
key string
15+
expected string
16+
}{
17+
{
18+
name: "exists",
19+
msg: &Message{
20+
Message: &kafka.Message{Headers: []kafka.Header{
21+
{Key: "foo", Value: []byte("bar")},
22+
}}},
23+
key: "foo",
24+
expected: "bar",
25+
},
26+
{
27+
name: "not exists",
28+
msg: &Message{Message: &kafka.Message{Headers: []kafka.Header{}}},
29+
key: "foo",
30+
expected: "",
31+
},
32+
}
33+
34+
for _, tc := range testCases {
35+
t.Run(tc.name, func(t *testing.T) {
36+
result := tc.msg.GetHeader(tc.key)
37+
assert.Equal(t, tc.expected, result)
38+
})
39+
}
40+
}
41+
42+
func TestMessageSetHeader(t *testing.T) {
43+
msg := &Message{Message: &kafka.Message{Headers: []kafka.Header{
44+
{Key: "foo", Value: []byte("bar")}},
45+
}}
46+
47+
msg.SetHeader("foo", "bar2")
48+
msg.SetHeader("foo2", "bar2")
49+
msg.SetHeader("foo2", "bar3")
50+
msg.SetHeader("foo3", "bar4")
51+
52+
assert.ElementsMatch(t, msg.Headers, []kafka.Header{
53+
{Key: "foo", Value: []byte("bar2")},
54+
{Key: "foo2", Value: []byte("bar3")},
55+
{Key: "foo3", Value: []byte("bar4")},
56+
})
57+
}

‎kq/internal/trace.go

+35
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
package internal
2+
3+
import "go.opentelemetry.io/otel/propagation"
4+
5+
var _ propagation.TextMapCarrier = (*MessageCarrier)(nil)
6+
7+
// MessageCarrier injects and extracts traces from a types.Message.
8+
type MessageCarrier struct {
9+
msg *Message
10+
}
11+
12+
// NewMessageCarrier returns a new MessageCarrier.
13+
func NewMessageCarrier(msg *Message) MessageCarrier {
14+
return MessageCarrier{msg: msg}
15+
}
16+
17+
// Get returns the value associated with the passed key.
18+
func (m MessageCarrier) Get(key string) string {
19+
return m.msg.GetHeader(key)
20+
}
21+
22+
// Set stores the key-value pair.
23+
func (m MessageCarrier) Set(key string, value string) {
24+
m.msg.SetHeader(key, value)
25+
}
26+
27+
// Keys lists the keys stored in this carrier.
28+
func (m MessageCarrier) Keys() []string {
29+
out := make([]string, len(m.msg.Headers))
30+
for i, h := range m.msg.Headers {
31+
out[i] = h.Key
32+
}
33+
34+
return out
35+
}

‎kq/internal/trace_test.go

+93
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,93 @@
1+
package internal
2+
3+
import (
4+
"testing"
5+
6+
"github.com/segmentio/kafka-go"
7+
"github.com/stretchr/testify/assert"
8+
)
9+
10+
func TestMessageCarrierGet(t *testing.T) {
11+
testCases := []struct {
12+
name string
13+
carrier MessageCarrier
14+
key string
15+
expected string
16+
}{
17+
{
18+
name: "exists",
19+
carrier: NewMessageCarrier(&Message{&kafka.Message{Headers: []kafka.Header{
20+
{Key: "foo", Value: []byte("bar")},
21+
}}}),
22+
key: "foo",
23+
expected: "bar",
24+
},
25+
{
26+
name: "not exists",
27+
carrier: NewMessageCarrier(&Message{&kafka.Message{Headers: []kafka.Header{}}}),
28+
key: "foo",
29+
expected: "",
30+
},
31+
}
32+
33+
for _, tc := range testCases {
34+
t.Run(tc.name, func(t *testing.T) {
35+
result := tc.carrier.Get(tc.key)
36+
assert.Equal(t, tc.expected, result)
37+
})
38+
}
39+
}
40+
41+
func TestMessageCarrierSet(t *testing.T) {
42+
msg := Message{&kafka.Message{Headers: []kafka.Header{
43+
{Key: "foo", Value: []byte("bar")},
44+
}}}
45+
carrier := MessageCarrier{msg: &msg}
46+
47+
carrier.Set("foo", "bar2")
48+
carrier.Set("foo2", "bar2")
49+
carrier.Set("foo2", "bar3")
50+
carrier.Set("foo3", "bar4")
51+
52+
assert.ElementsMatch(t, carrier.msg.Headers, []kafka.Header{
53+
{Key: "foo", Value: []byte("bar2")},
54+
{Key: "foo2", Value: []byte("bar3")},
55+
{Key: "foo3", Value: []byte("bar4")},
56+
})
57+
}
58+
59+
func TestMessageCarrierKeys(t *testing.T) {
60+
testCases := []struct {
61+
name string
62+
carrier MessageCarrier
63+
expected []string
64+
}{
65+
{
66+
name: "one",
67+
carrier: MessageCarrier{msg: &Message{&kafka.Message{Headers: []kafka.Header{
68+
{Key: "foo", Value: []byte("bar")},
69+
}}}},
70+
expected: []string{"foo"},
71+
},
72+
{
73+
name: "none",
74+
carrier: MessageCarrier{msg: &Message{&kafka.Message{Headers: []kafka.Header{}}}},
75+
expected: []string{},
76+
},
77+
{
78+
name: "many",
79+
carrier: MessageCarrier{msg: &Message{&kafka.Message{Headers: []kafka.Header{
80+
{Key: "foo", Value: []byte("bar")},
81+
{Key: "baz", Value: []byte("quux")},
82+
}}}},
83+
expected: []string{"foo", "baz"},
84+
},
85+
}
86+
87+
for _, tc := range testCases {
88+
t.Run(tc.name, func(t *testing.T) {
89+
result := tc.carrier.Keys()
90+
assert.Equal(t, tc.expected, result)
91+
})
92+
}
93+
}

‎kq/pusher.go

+76-27
Original file line numberDiff line numberDiff line change
@@ -6,109 +6,158 @@ import (
66
"time"
77

88
"github.com/segmentio/kafka-go"
9+
"github.com/zeromicro/go-queue/kq/internal"
910
"github.com/zeromicro/go-zero/core/executors"
1011
"github.com/zeromicro/go-zero/core/logx"
12+
"go.opentelemetry.io/otel"
1113
)
1214

1315
type (
14-
PushOption func(options *chunkOptions)
16+
PushOption func(options *pushOptions)
1517

1618
Pusher struct {
17-
produer *kafka.Writer
19+
producer *kafka.Writer
1820
topic string
1921
executor *executors.ChunkExecutor
2022
}
2123

22-
chunkOptions struct {
24+
pushOptions struct {
25+
// kafka.Writer options
26+
allowAutoTopicCreation bool
27+
28+
// executors.ChunkExecutor options
2329
chunkSize int
2430
flushInterval time.Duration
31+
32+
// syncPush is used to enable sync push
33+
syncPush bool
2534
}
2635
)
2736

37+
// NewPusher returns a Pusher with the given Kafka addresses and topic.
2838
func NewPusher(addrs []string, topic string, opts ...PushOption) *Pusher {
2939
producer := &kafka.Writer{
3040
Addr: kafka.TCP(addrs...),
3141
Topic: topic,
3242
Balancer: &kafka.LeastBytes{},
3343
Compression: kafka.Snappy,
3444
}
45+
46+
var options pushOptions
47+
for _, opt := range opts {
48+
opt(&options)
49+
}
50+
51+
// apply kafka.Writer options
52+
producer.AllowAutoTopicCreation = options.allowAutoTopicCreation
53+
3554
pusher := &Pusher{
36-
produer: producer,
37-
topic: topic,
55+
producer: producer,
56+
topic: topic,
57+
}
58+
59+
// if syncPush is true, return the pusher directly
60+
if options.syncPush {
61+
return pusher
62+
}
63+
64+
// apply ChunkExecutor options
65+
var chunkOpts []executors.ChunkOption
66+
if options.chunkSize > 0 {
67+
chunkOpts = append(chunkOpts, executors.WithChunkBytes(options.chunkSize))
68+
}
69+
if options.flushInterval > 0 {
70+
chunkOpts = append(chunkOpts, executors.WithFlushInterval(options.flushInterval))
3871
}
72+
3973
pusher.executor = executors.NewChunkExecutor(func(tasks []interface{}) {
4074
chunk := make([]kafka.Message, len(tasks))
4175
for i := range tasks {
4276
chunk[i] = tasks[i].(kafka.Message)
4377
}
44-
if err := pusher.produer.WriteMessages(context.Background(), chunk...); err != nil {
78+
if err := pusher.producer.WriteMessages(context.Background(), chunk...); err != nil {
4579
logx.Error(err)
4680
}
47-
}, newOptions(opts)...)
81+
}, chunkOpts...)
4882

4983
return pusher
5084
}
5185

86+
// Close closes the Pusher and releases any resources used by it.
5287
func (p *Pusher) Close() error {
5388
if p.executor != nil {
5489
p.executor.Flush()
5590
}
5691

57-
return p.produer.Close()
92+
return p.producer.Close()
5893
}
5994

95+
// Name returns the name of the Kafka topic that the Pusher is sending messages to.
6096
func (p *Pusher) Name() string {
6197
return p.topic
6298
}
6399

64-
func (p *Pusher) Push(v string) error {
100+
// KPush sends a message to the Kafka topic.
101+
func (p *Pusher) KPush(ctx context.Context, k, v string) error {
65102
msg := kafka.Message{
66-
Key: []byte(strconv.FormatInt(time.Now().UnixNano(), 10)),
103+
Key: []byte(k), // current timestamp
67104
Value: []byte(v),
68105
}
69106
if p.executor != nil {
70107
return p.executor.Add(msg, len(v))
71108
} else {
72-
return p.produer.WriteMessages(context.Background(), msg)
109+
return p.producer.WriteMessages(ctx, msg)
73110
}
74111
}
75112

76-
func (p *Pusher) PushWithKey(k, v string) error {
113+
// Push sends a message to the Kafka topic.
114+
func (p *Pusher) Push(ctx context.Context, v string) error {
115+
return p.PushWithKey(ctx, strconv.FormatInt(time.Now().UnixNano(), 10), v)
116+
}
117+
118+
// PushWithKey sends a message with the given key to the Kafka topic.
119+
func (p *Pusher) PushWithKey(ctx context.Context, key, v string) error {
77120
msg := kafka.Message{
78-
Key: []byte(k),
121+
Key: []byte(key),
79122
Value: []byte(v),
80123
}
124+
125+
// wrap message into message carrier
126+
mc := internal.NewMessageCarrier(internal.NewMessage(&msg))
127+
// inject trace context into message
128+
otel.GetTextMapPropagator().Inject(ctx, mc)
129+
81130
if p.executor != nil {
82131
return p.executor.Add(msg, len(v))
83132
} else {
84-
return p.produer.WriteMessages(context.Background(), msg)
133+
return p.producer.WriteMessages(ctx, msg)
85134
}
86135
}
87136

137+
// WithChunkSize customizes the Pusher with the given chunk size.
88138
func WithChunkSize(chunkSize int) PushOption {
89-
return func(options *chunkOptions) {
139+
return func(options *pushOptions) {
90140
options.chunkSize = chunkSize
91141
}
92142
}
93143

144+
// WithFlushInterval customizes the Pusher with the given flush interval.
94145
func WithFlushInterval(interval time.Duration) PushOption {
95-
return func(options *chunkOptions) {
146+
return func(options *pushOptions) {
96147
options.flushInterval = interval
97148
}
98149
}
99150

100-
func newOptions(opts []PushOption) []executors.ChunkOption {
101-
var options chunkOptions
102-
for _, opt := range opts {
103-
opt(&options)
151+
// WithAllowAutoTopicCreation allows the Pusher to create the given topic if it does not exist.
152+
func WithAllowAutoTopicCreation() PushOption {
153+
return func(options *pushOptions) {
154+
options.allowAutoTopicCreation = true
104155
}
156+
}
105157

106-
var chunkOpts []executors.ChunkOption
107-
if options.chunkSize > 0 {
108-
chunkOpts = append(chunkOpts, executors.WithChunkBytes(options.chunkSize))
109-
}
110-
if options.flushInterval > 0 {
111-
chunkOpts = append(chunkOpts, executors.WithFlushInterval(options.flushInterval))
158+
// WithSyncPush enables the Pusher to push messages synchronously.
159+
func WithSyncPush() PushOption {
160+
return func(options *pushOptions) {
161+
options.syncPush = true
112162
}
113-
return chunkOpts
114163
}

‎kq/queue.go

+115-31
Original file line numberDiff line numberDiff line change
@@ -2,21 +2,29 @@ package kq
22

33
import (
44
"context"
5+
"crypto/tls"
6+
"crypto/x509"
7+
"errors"
58
"io"
69
"log"
10+
"os"
711
"time"
812

913
"github.com/segmentio/kafka-go"
1014
_ "github.com/segmentio/kafka-go/gzip"
1115
_ "github.com/segmentio/kafka-go/lz4"
1216
"github.com/segmentio/kafka-go/sasl/plain"
1317
_ "github.com/segmentio/kafka-go/snappy"
18+
"github.com/zeromicro/go-queue/kq/internal"
19+
"github.com/zeromicro/go-zero/core/contextx"
20+
"github.com/zeromicro/go-zero/core/logc"
1421
"github.com/zeromicro/go-zero/core/logx"
1522
"github.com/zeromicro/go-zero/core/queue"
1623
"github.com/zeromicro/go-zero/core/service"
1724
"github.com/zeromicro/go-zero/core/stat"
1825
"github.com/zeromicro/go-zero/core/threading"
1926
"github.com/zeromicro/go-zero/core/timex"
27+
"go.opentelemetry.io/otel"
2028
)
2129

2230
const (
@@ -26,12 +34,12 @@ const (
2634
)
2735

2836
type (
29-
ConsumeHandle func(key, value string) error
37+
ConsumeHandle func(ctx context.Context, key, value string) error
3038

31-
ConsumeErrorHandler func(msg kafka.Message, err error)
39+
ConsumeErrorHandler func(ctx context.Context, msg kafka.Message, err error)
3240

3341
ConsumeHandler interface {
34-
Consume(key, value string) error
42+
Consume(ctx context.Context, key, value string) error
3543
}
3644

3745
queueOptions struct {
@@ -51,6 +59,7 @@ type (
5159
channel chan kafka.Message
5260
producerRoutines *threading.RoutineGroup
5361
consumerRoutines *threading.RoutineGroup
62+
commitRunner *threading.StableRunner[kafka.Message, kafka.Message]
5463
metrics *stat.Metrics
5564
errorHandler ConsumeErrorHandler
5665
}
@@ -121,9 +130,26 @@ func newKafkaQueue(c KqConf, handler ConsumeHandler, options queueOptions) queue
121130
},
122131
}
123132
}
133+
if len(c.CaFile) > 0 {
134+
caCert, err := os.ReadFile(c.CaFile)
135+
if err != nil {
136+
log.Fatal(err)
137+
}
138+
139+
caCertPool := x509.NewCertPool()
140+
ok := caCertPool.AppendCertsFromPEM(caCert)
141+
if !ok {
142+
log.Fatal(err)
143+
}
144+
145+
readerConfig.Dialer.TLS = &tls.Config{
146+
RootCAs: caCertPool,
147+
InsecureSkipVerify: true,
148+
}
149+
}
124150
consumer := kafka.NewReader(readerConfig)
125151

126-
return &kafkaQueue{
152+
q := &kafkaQueue{
127153
c: c,
128154
consumer: consumer,
129155
handler: handler,
@@ -133,25 +159,51 @@ func newKafkaQueue(c KqConf, handler ConsumeHandler, options queueOptions) queue
133159
metrics: options.metrics,
134160
errorHandler: options.errorHandler,
135161
}
162+
if c.CommitInOrder {
163+
q.commitRunner = threading.NewStableRunner(func(msg kafka.Message) kafka.Message {
164+
if err := q.consumeOne(context.Background(), string(msg.Key), string(msg.Value)); err != nil {
165+
if q.errorHandler != nil {
166+
q.errorHandler(context.Background(), msg, err)
167+
}
168+
}
169+
170+
return msg
171+
})
172+
}
173+
174+
return q
136175
}
137176

138177
func (q *kafkaQueue) Start() {
139-
q.startConsumers()
140-
q.startProducers()
178+
if q.c.CommitInOrder {
179+
go q.commitInOrder()
141180

142-
q.producerRoutines.Wait()
143-
close(q.channel)
144-
q.consumerRoutines.Wait()
181+
if err := q.consume(func(msg kafka.Message) {
182+
if e := q.commitRunner.Push(msg); e != nil {
183+
logx.Error(e)
184+
}
185+
}); err != nil {
186+
logx.Error(err)
187+
}
188+
} else {
189+
q.startConsumers()
190+
q.startProducers()
191+
q.producerRoutines.Wait()
192+
close(q.channel)
193+
q.consumerRoutines.Wait()
194+
195+
logx.Infof("Consumer %s is closed", q.c.Name)
196+
}
145197
}
146198

147199
func (q *kafkaQueue) Stop() {
148200
q.consumer.Close()
149201
logx.Close()
150202
}
151203

152-
func (q *kafkaQueue) consumeOne(key, val string) error {
204+
func (q *kafkaQueue) consumeOne(ctx context.Context, key, val string) error {
153205
startTime := timex.Now()
154-
err := q.handler.Consume(key, val)
206+
err := q.handler.Consume(ctx, key, val)
155207
q.metrics.Add(stat.Task{
156208
Duration: timex.Since(startTime),
157209
})
@@ -162,18 +214,25 @@ func (q *kafkaQueue) startConsumers() {
162214
for i := 0; i < q.c.Processors; i++ {
163215
q.consumerRoutines.Run(func() {
164216
for msg := range q.channel {
165-
if err := q.consumeOne(string(msg.Key), string(msg.Value)); err != nil {
217+
// wrap message into message carrier
218+
mc := internal.NewMessageCarrier(internal.NewMessage(&msg))
219+
// extract trace context from message
220+
ctx := otel.GetTextMapPropagator().Extract(context.Background(), mc)
221+
// remove deadline and error control
222+
ctx = contextx.ValueOnlyFrom(ctx)
223+
224+
if err := q.consumeOne(ctx, string(msg.Key), string(msg.Value)); err != nil {
166225
if q.errorHandler != nil {
167-
q.errorHandler(msg, err)
226+
q.errorHandler(ctx, msg, err)
168227
}
169228

170229
if !q.c.ForceCommit {
171230
continue
172231
}
173232
}
174233

175-
if err := q.consumer.CommitMessages(context.Background(), msg); err != nil {
176-
logx.Errorf("commit failed, error: %v", err)
234+
if err := q.consumer.CommitMessages(ctx, msg); err != nil {
235+
logc.Errorf(ctx, "commit failed, error: %v", err)
177236
}
178237
}
179238
})
@@ -182,25 +241,50 @@ func (q *kafkaQueue) startConsumers() {
182241

183242
func (q *kafkaQueue) startProducers() {
184243
for i := 0; i < q.c.Consumers; i++ {
244+
i := i
185245
q.producerRoutines.Run(func() {
186-
for {
187-
msg, err := q.consumer.FetchMessage(context.Background())
188-
// io.EOF means consumer closed
189-
// io.ErrClosedPipe means committing messages on the consumer,
190-
// kafka will refire the messages on uncommitted messages, ignore
191-
if err == io.EOF || err == io.ErrClosedPipe {
192-
return
193-
}
194-
if err != nil {
195-
logx.Errorf("Error on reading message, %q", err.Error())
196-
continue
197-
}
246+
if err := q.consume(func(msg kafka.Message) {
198247
q.channel <- msg
248+
}); err != nil {
249+
logx.Infof("Consumer %s-%d is closed, error: %q", q.c.Name, i, err.Error())
250+
return
199251
}
200252
})
201253
}
202254
}
203255

256+
func (q *kafkaQueue) consume(handle func(msg kafka.Message)) error {
257+
for {
258+
msg, err := q.consumer.FetchMessage(context.Background())
259+
// io.EOF means consumer closed
260+
// io.ErrClosedPipe means committing messages on the consumer,
261+
// kafka will refire the messages on uncommitted messages, ignore
262+
if err == io.EOF || errors.Is(err, io.ErrClosedPipe) {
263+
return err
264+
}
265+
if err != nil {
266+
logx.Errorf("Error on reading message, %q", err.Error())
267+
continue
268+
}
269+
270+
handle(msg)
271+
}
272+
}
273+
274+
func (q *kafkaQueue) commitInOrder() {
275+
for {
276+
msg, err := q.commitRunner.Get()
277+
if err != nil {
278+
logx.Error(err)
279+
return
280+
}
281+
282+
if err := q.consumer.CommitMessages(context.Background(), msg); err != nil {
283+
logx.Errorf("commit failed, error: %v", err)
284+
}
285+
}
286+
}
287+
204288
func (q kafkaQueues) Start() {
205289
for _, each := range q.queues {
206290
q.group.Add(each)
@@ -252,8 +336,8 @@ type innerConsumeHandler struct {
252336
handle ConsumeHandle
253337
}
254338

255-
func (ch innerConsumeHandler) Consume(k, v string) error {
256-
return ch.handle(k, v)
339+
func (ch innerConsumeHandler) Consume(ctx context.Context, k, v string) error {
340+
return ch.handle(ctx, k, v)
257341
}
258342

259343
func ensureQueueOptions(c KqConf, options *queueOptions) {
@@ -270,8 +354,8 @@ func ensureQueueOptions(c KqConf, options *queueOptions) {
270354
options.metrics = stat.NewMetrics(c.Name)
271355
}
272356
if options.errorHandler == nil {
273-
options.errorHandler = func(msg kafka.Message, err error) {
274-
logx.Errorf("consume: %s, error: %v", string(msg.Value), err)
357+
options.errorHandler = func(ctx context.Context, msg kafka.Message, err error) {
358+
logc.Errorf(ctx, "consume: %s, error: %v", string(msg.Value), err)
275359
}
276360
}
277361
}

‎natsq/config.go

+11
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
package natsq
2+
3+
import (
4+
"github.com/nats-io/nats.go"
5+
)
6+
7+
type NatsConfig struct {
8+
ServerUri string
9+
ClientName string
10+
Options []nats.Option
11+
}

‎natsq/consumer.go

+174
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
package natsq
2+
3+
import (
4+
"context"
5+
"errors"
6+
"log"
7+
"sync"
8+
9+
"github.com/nats-io/nats.go"
10+
"github.com/nats-io/nats.go/jetstream"
11+
"github.com/zeromicro/go-zero/core/logx"
12+
"github.com/zeromicro/go-zero/core/queue"
13+
)
14+
15+
const (
16+
NatDefaultMode = iota
17+
NatJetMode
18+
)
19+
20+
type (
21+
Msg struct {
22+
Subject string
23+
Data []byte
24+
}
25+
26+
ConsumeHandle func(m *Msg) error
27+
28+
// ConsumeHandler Consumer interface, used to define the methods required by the consumer
29+
ConsumeHandler interface {
30+
HandleMessage(m *Msg) error
31+
}
32+
33+
// ConsumerQueue Consumer queue, used to maintain the relationship between a consumer queue
34+
ConsumerQueue struct {
35+
StreamName string // stream name
36+
QueueName string // queue name
37+
Subjects []string // Subscribe subject
38+
Consumer ConsumeHandler // consumer object
39+
JetOption []jetstream.PullConsumeOpt // Jetstream configuration
40+
}
41+
42+
// ConsumerManager Consumer manager for managing multiple consumer queues
43+
ConsumerManager struct {
44+
mutex sync.RWMutex // read-write lock
45+
conn *nats.Conn // nats connect
46+
mode uint // nats mode
47+
queues []ConsumerQueue // consumer queue list
48+
options []nats.Option // Connection configuration items
49+
doneChan chan struct{} // close channel
50+
}
51+
)
52+
53+
// MustNewConsumerManager creates a new ConsumerManager instance.
54+
// It connects to NATS server, registers the provided consumer queues, and returns the ConsumerManager.
55+
// If any error occurs during the process, it logs the error and continues.
56+
func MustNewConsumerManager(cfg *NatsConfig, cq []*ConsumerQueue, mode uint) queue.MessageQueue {
57+
sc, err := nats.Connect(cfg.ServerUri, cfg.Options...)
58+
if err != nil {
59+
logx.Errorf("failed to connect nats, error: %v", err)
60+
}
61+
cm := &ConsumerManager{
62+
conn: sc,
63+
options: cfg.Options,
64+
mode: mode,
65+
doneChan: make(chan struct{}),
66+
}
67+
if len(cq) == 0 {
68+
logx.Errorf("failed consumerQueue register to nats, error: cq len is 0")
69+
}
70+
for _, item := range cq {
71+
err = cm.registerQueue(item)
72+
if err != nil {
73+
logx.Errorf("failed to register nats, error: %v", err)
74+
}
75+
}
76+
77+
return cm
78+
}
79+
80+
// Start starts consuming messages from all the registered consumer queues.
81+
// It launches a goroutine for each consumer queue to subscribe and process messages.
82+
// The method blocks until the doneChan is closed.
83+
func (cm *ConsumerManager) Start() {
84+
cm.mutex.RLock()
85+
defer cm.mutex.RUnlock()
86+
87+
if len(cm.queues) == 0 {
88+
logx.Errorf("no consumer queues found")
89+
}
90+
for _, consumerQueue := range cm.queues {
91+
go cm.subscribe(consumerQueue)
92+
}
93+
<-cm.doneChan
94+
}
95+
96+
// Stop closes the NATS connection and stops the ConsumerManager.
97+
func (cm *ConsumerManager) Stop() {
98+
if cm.conn != nil {
99+
cm.conn.Close()
100+
}
101+
}
102+
103+
// registerQueue registers a new consumer queue with the ConsumerManager.
104+
// It validates the required fields of the ConsumerQueue and adds it to the list of queues.
105+
// If any required field is missing, it returns an error.
106+
func (cm *ConsumerManager) registerQueue(queue *ConsumerQueue) error {
107+
cm.mutex.Lock()
108+
defer cm.mutex.Unlock()
109+
110+
if cm.mode == NatJetMode && queue.StreamName == "" {
111+
return errors.New("stream name is required")
112+
}
113+
114+
if queue.QueueName == "" {
115+
return errors.New("queue name is required")
116+
}
117+
if len(queue.Subjects) == 0 {
118+
return errors.New("subject is required")
119+
}
120+
if queue.Consumer == nil {
121+
return errors.New("consumer is required")
122+
}
123+
124+
cm.queues = append(cm.queues, *queue)
125+
return nil
126+
}
127+
128+
// subscribe subscribes to the specified consumer queue and starts processing messages.
129+
// If the NATS mode is NatJetMode, it creates a JetStream consumer and consumes messages using the provided options.
130+
// If the NATS mode is NatDefaultMode, it subscribes to the specified subjects using the queue name.
131+
// The method blocks until the doneChan is closed.
132+
func (cm *ConsumerManager) subscribe(queue ConsumerQueue) {
133+
ctx := context.Background()
134+
if cm.mode == NatJetMode {
135+
js, _ := jetstream.New(cm.conn)
136+
stream, err := js.Stream(ctx, "ccc")
137+
if err != nil {
138+
log.Fatalf("Error creating stream: %v", err)
139+
return
140+
}
141+
consumer, _ := stream.CreateOrUpdateConsumer(ctx, jetstream.ConsumerConfig{
142+
Name: queue.QueueName,
143+
AckPolicy: jetstream.AckExplicitPolicy,
144+
FilterSubjects: queue.Subjects,
145+
})
146+
consContext, subErr := consumer.Consume(func(msg jetstream.Msg) {
147+
err := queue.Consumer.HandleMessage(&Msg{Subject: msg.Subject(), Data: msg.Data()})
148+
if err != nil {
149+
logx.Errorf("error handling message: %v", err.Error())
150+
} else {
151+
msg.Ack()
152+
}
153+
}, queue.JetOption...)
154+
if subErr != nil {
155+
logx.Errorf("error subscribing to queue %s: %v", queue.QueueName, subErr.Error())
156+
return
157+
}
158+
defer consContext.Stop()
159+
}
160+
if cm.mode == NatDefaultMode {
161+
for _, subject := range queue.Subjects {
162+
cm.conn.QueueSubscribe(subject, queue.QueueName, func(m *nats.Msg) {
163+
err := queue.Consumer.HandleMessage(&Msg{Subject: m.Subject, Data: m.Data})
164+
if err != nil {
165+
logx.Errorf("error handling message: %v", err.Error())
166+
} else {
167+
m.Ack()
168+
}
169+
})
170+
}
171+
}
172+
173+
<-cm.doneChan
174+
}

‎natsq/producer.go

+88
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
package natsq
2+
3+
import (
4+
"context"
5+
6+
"github.com/nats-io/nats.go"
7+
"github.com/nats-io/nats.go/jetstream"
8+
)
9+
10+
type DefaultProducer struct {
11+
conn *nats.Conn
12+
}
13+
14+
// NewDefaultProducer creates a new default NATS producer.
15+
// It takes a NatsConfig as input and returns a pointer to a DefaultProducer and an error.
16+
// It connects to the NATS server using the provided configuration.
17+
func NewDefaultProducer(c *NatsConfig) (*DefaultProducer, error) {
18+
sc, err := nats.Connect(c.ServerUri, c.Options...)
19+
if err != nil {
20+
return nil, err
21+
}
22+
23+
return &DefaultProducer{
24+
conn: sc,
25+
}, nil
26+
}
27+
28+
// Publish publishes a message with the specified subject and data using the default NATS producer.
29+
// It takes a subject string and data byte slice as input and returns an error if the publish fails.
30+
func (p *DefaultProducer) Publish(subject string, data []byte) error {
31+
return p.conn.Publish(subject, data)
32+
}
33+
34+
// Close closes the NATS connection of the default producer.
35+
func (p *DefaultProducer) Close() {
36+
if p.conn != nil {
37+
p.conn.Close()
38+
}
39+
}
40+
41+
type JetProducer struct {
42+
conn *nats.Conn
43+
js jetstream.JetStream
44+
ctx context.Context
45+
}
46+
47+
// NewJetProducer creates a new JetStream producer.
48+
// It takes a NatsConfig as input and returns a pointer to a JetProducer and an error.
49+
// It connects to the NATS server using the provided configuration and creates a new JetStream context.
50+
func NewJetProducer(c *NatsConfig) (*JetProducer, error) {
51+
sc, err := nats.Connect(c.ServerUri, c.Options...)
52+
if err != nil {
53+
return nil, err
54+
}
55+
js, err := jetstream.New(sc)
56+
if err != nil {
57+
return nil, err
58+
}
59+
return &JetProducer{
60+
conn: sc,
61+
js: js,
62+
}, nil
63+
}
64+
65+
// CreateOrUpdateStream creates or updates a JetStream stream with the specified configuration.
66+
// It takes a jetstream.StreamConfig as input and returns an error if the operation fails.
67+
func (j *JetProducer) CreateOrUpdateStream(config jetstream.StreamConfig) error {
68+
_, err := j.js.CreateOrUpdateStream(j.ctx, config)
69+
if err != nil {
70+
return err
71+
}
72+
return nil
73+
}
74+
75+
// Publish publishes a message with the specified subject and data using the JetStream producer.
76+
// It takes a subject string and data byte slice as input and returns an error if the publish fails.
77+
func (j *JetProducer) Publish(subject string, data []byte) error {
78+
_, err := j.js.Publish(j.ctx, subject, data)
79+
if err != nil {
80+
return err
81+
}
82+
return nil
83+
}
84+
85+
// Close closes the NATS connection of the JetStream producer.
86+
func (j *JetProducer) Close() {
87+
j.conn.Close()
88+
}

‎readme.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ Kafka Pub/Sub framework
5353

5454
### consumer example
5555

56-
config.json
56+
config.yaml
5757
```yaml
5858
Name: kq
5959
Brokers:

0 commit comments

Comments
 (0)
Please sign in to comment.