|
3 | 3 | # Released under the MIT License. |
4 | 4 | # Copyright, 2025, by Samuel Williams. |
5 | 5 |
|
| 6 | +require "async/variable" |
6 | 7 | require "async/redis/cluster_client" |
7 | 8 | require "sus/fixtures/async" |
8 | 9 | require "securerandom" |
|
29 | 30 |
|
30 | 31 | it "can subscribe to sharded channels and receive messages" do |
31 | 32 | received_message = nil |
32 | | - condition = Async::Condition.new |
33 | | - spublish_available = false |
| 33 | + ready = Async::Variable.new |
34 | 34 |
|
35 | 35 | # Set up the subscriber using cluster client's ssubscribe method |
36 | | - subscriber_task = reactor.async do |
37 | | - begin |
38 | | - cluster.ssubscribe(shard_channel) do |context| |
39 | | - condition.signal # Signal that we're ready |
40 | | - spublish_available = true |
41 | | - |
42 | | - type, name, message = context.listen |
43 | | - |
44 | | - expect(type).to be == "smessage" |
45 | | - expect(name).to be == shard_channel |
46 | | - received_message = message |
47 | | - end |
48 | | - rescue Protocol::Redis::ServerError => error |
49 | | - if error.message.include?("unknown command") |
50 | | - Console.warn("SSUBSCRIBE not available on this Redis version") |
51 | | - condition.signal |
52 | | - else |
53 | | - raise |
54 | | - end |
| 36 | + subscriber_task = Async do |
| 37 | + cluster.ssubscribe(shard_channel) do |context| |
| 38 | + ready.resolve |
| 39 | + |
| 40 | + type, name, message = context.listen |
| 41 | + |
| 42 | + expect(type).to be == "smessage" |
| 43 | + expect(name).to be == shard_channel |
| 44 | + received_message = message |
55 | 45 | end |
56 | 46 | end |
57 | 47 |
|
58 | 48 | # Set up the publisher |
59 | | - publisher_task = reactor.async do |
60 | | - condition.wait # Wait for subscriber to be ready |
| 49 | + publisher_task = Async do |
| 50 | + ready.wait |
61 | 51 |
|
62 | | - if spublish_available |
63 | | - begin |
64 | | - # Get a client on the same node as the subscriber for SPUBLISH |
65 | | - slot = cluster.slot_for(shard_channel) |
66 | | - publisher_client = cluster.client_for(slot) |
67 | | - publisher_client.call("SPUBLISH", shard_channel, shard_message) |
68 | | - rescue => error |
69 | | - Console.warn("SPUBLISH failed: #{error}") |
70 | | - end |
71 | | - end |
| 52 | + slot = cluster.slot_for(shard_channel) |
| 53 | + publisher_client = cluster.client_for(slot) |
| 54 | + publisher_client.call("SPUBLISH", shard_channel, shard_message) |
72 | 55 | end |
73 | 56 |
|
74 | 57 | publisher_task.wait |
75 | | - sleep(0.1) # Allow message delivery |
76 | | - subscriber_task.stop |
| 58 | + subscriber_task.wait |
77 | 59 |
|
78 | | - # Only check message if sharded pub/sub was available |
79 | | - if spublish_available && received_message |
80 | | - expect(received_message).to be == shard_message |
81 | | - else |
82 | | - Console.warn("Skipping assertion - sharded pub/sub not available on this Redis version") |
83 | | - end |
| 60 | + expect(received_message).to be == shard_message |
84 | 61 | end |
85 | 62 |
|
86 | 63 | it "distributes sharded messages across cluster nodes" do |
|
94 | 71 | ] |
95 | 72 |
|
96 | 73 | # Find channels that map to different slots/nodes |
97 | | - channel_slots = channels.map {|ch| [ch, cluster.slot_for(ch)]} |
| 74 | + channel_slots = channels.map {|channel| [channel, cluster.slot_for(channel)]} |
98 | 75 | unique_slots = channel_slots.map(&:last).uniq |
99 | 76 |
|
100 | 77 | # We should have channels distributed across different slots |
101 | 78 | expect(unique_slots.size).to be > 1 |
102 | 79 |
|
103 | 80 | received_messages = [] |
104 | | - condition = Async::Condition.new |
| 81 | + ready = Async::Variable.new |
105 | 82 | subscriber_count = 0 |
106 | 83 | target_count = channels.size |
107 | 84 |
|
108 | 85 | # Set up subscribers for each channel |
109 | 86 | subscriber_tasks = channels.map do |channel| |
110 | | - reactor.async do |
| 87 | + Async do |
111 | 88 | slot = cluster.slot_for(channel) |
112 | 89 | client = cluster.client_for(slot) |
113 | 90 |
|
114 | 91 | client.ssubscribe(channel) do |context| |
115 | 92 | subscriber_count += 1 |
116 | | - condition.signal if subscriber_count == target_count |
| 93 | + ready.resolve if subscriber_count == target_count |
117 | 94 |
|
118 | 95 | type, name, message = context.listen |
119 | 96 | received_messages << {channel: name, message: message, slot: slot} |
|
122 | 99 | end |
123 | 100 |
|
124 | 101 | # Set up publisher |
125 | | - publisher_task = reactor.async do |
126 | | - condition.wait # Wait for all subscribers |
| 102 | + publisher_task = Async do |
| 103 | + ready.wait # Wait for all subscribers |
127 | 104 |
|
128 | 105 | channels.each_with_index do |channel, index| |
129 | 106 | slot = cluster.slot_for(channel) |
130 | 107 | client = cluster.client_for(slot) |
131 | 108 |
|
132 | | - begin |
133 | | - client.call("SPUBLISH", channel, "message-#{index}") |
134 | | - rescue => error |
135 | | - Console.warn("SPUBLISH failed for #{channel}: #{error}") |
136 | | - # Clean up and skip if SPUBLISH not available |
137 | | - subscriber_tasks.each(&:stop) |
138 | | - return |
139 | | - end |
| 109 | + client.call("SPUBLISH", channel, "message-#{index}") |
140 | 110 | end |
141 | 111 | end |
142 | 112 |
|
143 | 113 | publisher_task.wait |
144 | | - sleep(0.1) # Allow time for message delivery |
145 | | - subscriber_tasks.each(&:stop) |
| 114 | + subscriber_tasks.each(&:wait) |
146 | 115 |
|
147 | 116 | # Verify we received messages for different channels |
148 | 117 | expect(received_messages.size).to be == channels.size |
|
197 | 166 |
|
198 | 167 | # Publish to sharded channel |
199 | 168 | shard_client = cluster.client_for(shard_slot) |
200 | | - begin |
201 | | - shard_client.call("SPUBLISH", shard_channel, "sharded message") |
202 | | - rescue => error |
203 | | - Console.warn("SPUBLISH not available: #{error}") |
204 | | - regular_task.stop |
205 | | - shard_task.stop |
206 | | - return |
207 | | - end |
| 169 | + shard_client.call("SPUBLISH", shard_channel, "sharded message") |
208 | 170 | end |
209 | 171 |
|
210 | 172 | publisher_task.wait |
211 | | - sleep(0.1) # Allow time for message delivery |
212 | | - regular_task.stop |
213 | | - shard_task.stop |
| 173 | + regular_task.wait |
| 174 | + shard_task.wait |
214 | 175 |
|
215 | 176 | # Should have received both messages |
216 | 177 | expect(received_messages.size).to be == 2 |
|
276 | 237 | publisher_client.publish(channel, "unified regular") |
277 | 238 |
|
278 | 239 | # Publish sharded message |
279 | | - begin |
280 | | - publisher_client.call("SPUBLISH", shard_channel, "unified sharded") |
281 | | - rescue => error |
282 | | - Console.warn("SPUBLISH not available: #{error}") |
283 | | - # Skip sharded part if not available |
284 | | - end |
| 240 | + publisher_client.call("SPUBLISH", shard_channel, "unified sharded") |
285 | 241 | end |
286 | 242 |
|
287 | 243 | publisher_task.wait |
288 | | - sleep(0.1) # Allow message delivery |
289 | | - subscriber_task.stop |
| 244 | + subscriber_task.wait |
290 | 245 |
|
291 | 246 | # Should receive both message types on same context |
292 | 247 | expect(received_messages.size).to be == 2 |
|
0 commit comments