@@ -454,29 +454,6 @@ type streamTracker struct {
454454 failed atomic.Int32
455455}
456456
457- // TODO taken from Cortex, see if we can refactor out an usable interface.
458- type pushTracker struct {
459- streamsPending atomic.Int32
460- streamsFailed atomic.Int32
461- done chan struct {}
462- err chan error
463- }
464-
465- // doneWithResult records the result of a stream push.
466- // If err is nil, the stream push is considered successful.
467- // If err is not nil, the stream push is considered failed.
468- func (p * pushTracker ) doneWithResult (err error ) {
469- if err == nil {
470- if p .streamsPending .Dec () == 0 {
471- p .done <- struct {}{}
472- }
473- } else {
474- if p .streamsFailed .Inc () == 1 {
475- p .err <- err
476- }
477- }
478- }
479-
480457func (d * Distributor ) waitSimulatedLatency (ctx context.Context , tenantID string , start time.Time ) {
481458 latency := d .validator .SimulatedPushLatency (tenantID )
482459 if latency > 0 {
@@ -754,10 +731,7 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
754731 const maxExpectedReplicationSet = 5 // typical replication factor 3 plus one for inactive plus one for luck
755732 var descs [maxExpectedReplicationSet ]ring.InstanceDesc
756733
757- tracker := pushTracker {
758- done : make (chan struct {}, 1 ), // buffer avoids blocking if caller terminates - sendSamples() only sends once on each
759- err : make (chan error , 1 ),
760- }
734+ tracker := newBasicPushTracker ()
761735 streamsToWrite := 0
762736 if d .cfg .IngesterEnabled {
763737 streamsToWrite += len (streams )
@@ -766,15 +740,15 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
766740 streamsToWrite += len (streams )
767741 }
768742 // We must correctly set streamsPending before beginning any writes to ensure we don't have a race between finishing all of one path before starting the other.
769- tracker .streamsPending . Store (int32 (streamsToWrite ))
743+ tracker .Add (int32 (streamsToWrite ))
770744
771745 if d .cfg .KafkaEnabled {
772746 subring , err := d .partitionRing .PartitionRing ().ShuffleShard (tenantID , d .validator .IngestionPartitionsTenantShardSize (tenantID ))
773747 if err != nil {
774748 return nil , err
775749 }
776750 // We don't need to create a new context like the ingester writes, because we don't return unless all writes have succeeded.
777- d .sendStreamsToKafka (ctx , streams , tenantID , & tracker , subring )
751+ d .sendStreamsToKafka (ctx , streams , tenantID , tracker , subring )
778752 }
779753
780754 if d .cfg .IngesterEnabled {
@@ -823,7 +797,7 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
823797 case d .ingesterTasks <- pushIngesterTask {
824798 ingester : ingester ,
825799 streamTracker : samples ,
826- pushTracker : & tracker ,
800+ pushTracker : tracker ,
827801 ctx : localCtx ,
828802 cancel : cancel ,
829803 }:
@@ -833,14 +807,11 @@ func (d *Distributor) PushWithResolver(ctx context.Context, req *logproto.PushRe
833807 }
834808 }
835809
836- select {
837- case err := <- tracker .err :
810+ if err := tracker .Wait (ctx ); err != nil {
838811 return nil , err
839- case <- tracker .done :
840- return & logproto.PushResponse {}, validationErr
841- case <- ctx .Done ():
842- return nil , ctx .Err ()
843812 }
813+
814+ return & logproto.PushResponse {}, nil
844815}
845816
846817// missingEnforcedLabels returns true if the stream is missing any of the required labels.
@@ -1135,7 +1106,7 @@ func (d *Distributor) truncateLines(vContext validationContext, stream *logproto
11351106
11361107type pushIngesterTask struct {
11371108 streamTracker []* streamTracker
1138- pushTracker * pushTracker
1109+ pushTracker PushTracker
11391110 ingester ring.InstanceDesc
11401111 ctx context.Context
11411112 cancel context.CancelFunc
@@ -1172,12 +1143,12 @@ func (d *Distributor) sendStreams(task pushIngesterTask) {
11721143 if task .streamTracker [i ].failed .Inc () <= int32 (task .streamTracker [i ].maxFailures ) {
11731144 continue
11741145 }
1175- task .pushTracker .doneWithResult (err )
1146+ task .pushTracker .Done (err )
11761147 } else {
11771148 if task .streamTracker [i ].succeeded .Inc () != int32 (task .streamTracker [i ].minSuccess ) {
11781149 continue
11791150 }
1180- task .pushTracker .doneWithResult (nil )
1151+ task .pushTracker .Done (nil )
11811152 }
11821153 }
11831154}
@@ -1209,14 +1180,14 @@ func (d *Distributor) sendStreamsErr(ctx context.Context, ingester ring.Instance
12091180 return err
12101181}
12111182
1212- func (d * Distributor ) sendStreamsToKafka (ctx context.Context , streams []KeyedStream , tenant string , tracker * pushTracker , subring * ring.PartitionRing ) {
1183+ func (d * Distributor ) sendStreamsToKafka (ctx context.Context , streams []KeyedStream , tenant string , tracker PushTracker , subring * ring.PartitionRing ) {
12131184 for _ , s := range streams {
12141185 go func (s KeyedStream ) {
12151186 err := d .sendStreamToKafka (ctx , s , tenant , subring )
12161187 if err != nil {
12171188 err = fmt .Errorf ("failed to write stream to kafka: %w" , err )
12181189 }
1219- tracker .doneWithResult (err )
1190+ tracker .Done (err )
12201191 }(s )
12211192 }
12221193}
0 commit comments