Skip to content

Commit 9cbc446

Browse files
committed
refactor: drop extra input finalizers
Simplify controllers, add migrations to clear finalizers which are no longer needed. See cosi-project/runtime#633 Signed-off-by: Andrey Smirnov <[email protected]>
1 parent 66c7d43 commit 9cbc446

14 files changed

+167
-316
lines changed

internal/backend/runtime/omni/controllers/helpers/helpers.go

Lines changed: 0 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -174,64 +174,6 @@ func ClearUserLabels(res resource.Resource) {
174174
})
175175
}
176176

177-
// HandleInputOptions optional args for HandleInput.
178-
type HandleInputOptions struct {
179-
id string
180-
}
181-
182-
// HandleInputOption optional arg for HandleInput.
183-
type HandleInputOption func(*HandleInputOptions)
184-
185-
// WithID maps the resource using another id.
186-
func WithID(id string) HandleInputOption {
187-
return func(hio *HandleInputOptions) {
188-
hio.id = id
189-
}
190-
}
191-
192-
// HandleInput reads the additional input resource and automatically manages finalizers.
193-
// By default maps the resource using same id.
194-
func HandleInput[T generic.ResourceWithRD, S generic.ResourceWithRD](ctx context.Context, r controller.ReaderWriter, finalizer string, main S, opts ...HandleInputOption) (T, error) {
195-
var zero T
196-
197-
options := HandleInputOptions{
198-
id: main.Metadata().ID(),
199-
}
200-
201-
for _, o := range opts {
202-
o(&options)
203-
}
204-
205-
res, err := safe.ReaderGetByID[T](ctx, r, options.id)
206-
if err != nil {
207-
if state.IsNotFoundError(err) {
208-
return zero, nil
209-
}
210-
211-
return zero, err
212-
}
213-
214-
if res.Metadata().Phase() == resource.PhaseTearingDown || main.Metadata().Phase() == resource.PhaseTearingDown {
215-
if err := r.RemoveFinalizer(ctx, res.Metadata(), finalizer); err != nil && !state.IsNotFoundError(err) {
216-
return zero, err
217-
}
218-
219-
if res.Metadata().Phase() == resource.PhaseTearingDown {
220-
return zero, nil
221-
}
222-
223-
return res, nil
224-
}
225-
226-
if !res.Metadata().Finalizers().Has(finalizer) {
227-
if err := r.AddFinalizer(ctx, res.Metadata(), finalizer); err != nil {
228-
return zero, err
229-
}
230-
}
231-
232-
return res, nil
233-
}
234-
235177
// GetTalosClient for the machine id.
236178
// Automatically pick secure or insecure client.
237179
func GetTalosClient[T interface {

internal/backend/runtime/omni/controllers/omni/bmc_config.go

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -10,39 +10,38 @@ import (
1010

1111
"github.com/cosi-project/runtime/pkg/controller"
1212
"github.com/cosi-project/runtime/pkg/controller/generic/qtransform"
13+
"github.com/cosi-project/runtime/pkg/safe"
14+
"github.com/cosi-project/runtime/pkg/state"
1315
"github.com/siderolabs/gen/xerrors"
1416
"go.uber.org/zap"
1517

1618
"github.com/siderolabs/omni/client/pkg/omni/resources/infra"
1719
"github.com/siderolabs/omni/client/pkg/omni/resources/omni"
1820
"github.com/siderolabs/omni/client/pkg/omni/resources/siderolink"
19-
"github.com/siderolabs/omni/internal/backend/runtime/omni/controllers/helpers"
2021
)
2122

2223
// BMCConfigController manages BMCConfig resource lifecycle.
2324
type BMCConfigController = qtransform.QController[*omni.InfraMachineBMCConfig, *infra.BMCConfig]
2425

2526
// NewBMCConfigController initializes BMCConfigController.
2627
func NewBMCConfigController() *BMCConfigController {
27-
name := "BMCConfigController"
28-
2928
return qtransform.NewQController(
3029
qtransform.Settings[*omni.InfraMachineBMCConfig, *infra.BMCConfig]{
31-
Name: name,
30+
Name: "BMCConfigController",
3231
MapMetadataFunc: func(config *omni.InfraMachineBMCConfig) *infra.BMCConfig {
3332
return infra.NewBMCConfig(config.Metadata().ID())
3433
},
3534
UnmapMetadataFunc: func(config *infra.BMCConfig) *omni.InfraMachineBMCConfig {
3635
return omni.NewInfraMachineBMCConfig(config.Metadata().ID())
3736
},
38-
TransformExtraOutputFunc: func(ctx context.Context, r controller.ReaderWriter, _ *zap.Logger, userConfig *omni.InfraMachineBMCConfig, config *infra.BMCConfig) error {
39-
link, err := helpers.HandleInput[*siderolink.Link](ctx, r, name, userConfig)
37+
TransformFunc: func(ctx context.Context, r controller.Reader, _ *zap.Logger, userConfig *omni.InfraMachineBMCConfig, config *infra.BMCConfig) error {
38+
link, err := safe.ReaderGetByID[*siderolink.Link](ctx, r, userConfig.Metadata().ID())
4039
if err != nil {
41-
return err
42-
}
40+
if state.IsNotFoundError(err) {
41+
return xerrors.NewTaggedf[qtransform.SkipReconcileTag]("no matching link found")
42+
}
4343

44-
if link == nil {
45-
return xerrors.NewTaggedf[qtransform.SkipReconcileTag]("no matching link found")
44+
return err
4645
}
4746

4847
providerID, ok := link.Metadata().Annotations().Get(omni.LabelInfraProviderID)
@@ -54,13 +53,6 @@ func NewBMCConfigController() *BMCConfigController {
5453

5554
config.TypedSpec().Value.Config = userConfig.TypedSpec().Value
5655

57-
return nil
58-
},
59-
FinalizerRemovalExtraOutputFunc: func(ctx context.Context, writer controller.ReaderWriter, _ *zap.Logger, userConfig *omni.InfraMachineBMCConfig) error {
60-
if _, err := helpers.HandleInput[*siderolink.Link](ctx, writer, name, userConfig); err != nil {
61-
return err
62-
}
63-
6456
return nil
6557
},
6658
},

internal/backend/runtime/omni/controllers/omni/cluster_destroy_status.go

Lines changed: 6 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ package omni
88
import (
99
"context"
1010
"fmt"
11-
"log"
1211

1312
"github.com/cosi-project/runtime/pkg/controller"
1413
"github.com/cosi-project/runtime/pkg/controller/generic/qtransform"
@@ -44,8 +43,10 @@ func NewClusterDestroyStatusController() *ClusterDestroyStatusController {
4443
UnmapMetadataFunc: func(clusterDestroyStatus *omni.ClusterDestroyStatus) *omni.Cluster {
4544
return omni.NewCluster(resources.DefaultNamespace, clusterDestroyStatus.Metadata().ID())
4645
},
47-
TransformExtraOutputFunc: func(ctx context.Context, r controller.ReaderWriter, _ *zap.Logger, cluster *omni.Cluster, clusterDestroyStatus *omni.ClusterDestroyStatus) error {
48-
remainingMachines := 0
46+
TransformFunc: func(ctx context.Context, r controller.Reader, _ *zap.Logger, cluster *omni.Cluster, clusterDestroyStatus *omni.ClusterDestroyStatus) error {
47+
if cluster.Metadata().Phase() != resource.PhaseTearingDown {
48+
return xerrors.NewTaggedf[qtransform.SkipReconcileTag]("not tearing down")
49+
}
4950

5051
msStatuses, err := r.List(ctx, omni.NewMachineSetStatus(resources.DefaultNamespace, "").Metadata(), state.WithLabelQuery(
5152
resource.LabelEqual(omni.LabelCluster, cluster.Metadata().ID()),
@@ -54,31 +55,6 @@ func NewClusterDestroyStatusController() *ClusterDestroyStatusController {
5455
return fmt.Errorf("failed to list control planes %w", err)
5556
}
5657

57-
remainingMachineSetIDs := make(map[resource.ID]struct{}, len(msStatuses.Items))
58-
for _, status := range msStatuses.Items {
59-
switch status.Metadata().Phase() {
60-
case resource.PhaseRunning:
61-
if !status.Metadata().Finalizers().Has(ClusterDestroyStatusControllerName) {
62-
if err = r.AddFinalizer(ctx, status.Metadata(), ClusterDestroyStatusControllerName); err != nil {
63-
return err
64-
}
65-
}
66-
remainingMachineSetIDs[status.Metadata().ID()] = struct{}{}
67-
case resource.PhaseTearingDown:
68-
if status.Metadata().Finalizers().Has(ClusterDestroyStatusControllerName) {
69-
if len(*status.Metadata().Finalizers()) == 1 {
70-
log.Printf("Removing finalizer for cluster %s", status.Metadata().ID())
71-
if err = r.RemoveFinalizer(ctx, status.Metadata(), ClusterDestroyStatusControllerName); err != nil {
72-
return err
73-
}
74-
75-
continue
76-
}
77-
remainingMachineSetIDs[status.Metadata().ID()] = struct{}{}
78-
}
79-
}
80-
}
81-
8258
cmStatuses, err := r.List(ctx, omni.NewClusterMachineStatus(resources.DefaultNamespace, "").Metadata(),
8359
state.WithLabelQuery(resource.LabelEqual(
8460
omni.LabelCluster, cluster.Metadata().ID()),
@@ -88,44 +64,9 @@ func NewClusterDestroyStatusController() *ClusterDestroyStatusController {
8864
return err
8965
}
9066

91-
incrementRemainingMachines := func(cmStatus resource.Resource) {
92-
if msId, ok := cmStatus.Metadata().Labels().Get(omni.LabelMachineSet); ok {
93-
if _, ok = remainingMachineSetIDs[msId]; ok {
94-
remainingMachines++
95-
}
96-
}
97-
}
98-
99-
for _, cmStatus := range cmStatuses.Items {
100-
switch cmStatus.Metadata().Phase() {
101-
case resource.PhaseRunning:
102-
if !cmStatus.Metadata().Finalizers().Has(ClusterDestroyStatusControllerName) {
103-
if err = r.AddFinalizer(ctx, cmStatus.Metadata(), ClusterDestroyStatusControllerName); err != nil {
104-
return err
105-
}
106-
}
107-
incrementRemainingMachines(cmStatus)
108-
case resource.PhaseTearingDown:
109-
if cmStatus.Metadata().Finalizers().Has(ClusterDestroyStatusControllerName) {
110-
if hasOnlyDestroyStatusFinalizers(cmStatus.Metadata()) {
111-
if err = r.RemoveFinalizer(ctx, cmStatus.Metadata(), ClusterDestroyStatusControllerName); err != nil {
112-
return err
113-
}
114-
115-
continue
116-
}
117-
incrementRemainingMachines(cmStatus)
118-
}
119-
}
120-
}
121-
122-
if cluster.Metadata().Phase() != resource.PhaseTearingDown {
123-
return xerrors.NewTaggedf[qtransform.SkipReconcileTag]("not tearing down")
124-
}
125-
12667
clusterDestroyStatus.TypedSpec().Value.Phase = fmt.Sprintf("Destroying: %s, %s",
127-
pluralize.NewClient().Pluralize("machine set", len(remainingMachineSetIDs), true),
128-
pluralize.NewClient().Pluralize("machine", remainingMachines, true),
68+
pluralize.NewClient().Pluralize("machine set", len(msStatuses.Items), true),
69+
pluralize.NewClient().Pluralize("machine", len(cmStatuses.Items), true),
12970
)
13071

13172
return nil

internal/backend/runtime/omni/controllers/omni/cluster_destroy_status_test.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,12 @@ func TestClusterDestroyStatusController(t *testing.T) {
6969
asrt.Equal("Destroying: 1 machine set, 2 machines", res.TypedSpec().Value.Phase)
7070
})
7171

72+
rtestutils.Destroy[*omni.ClusterMachineStatus](ctx, t, st, []string{"cm0", "cm1"})
73+
74+
rtestutils.AssertResource(ctx, t, st, c.Metadata().ID(), func(res *omni.ClusterDestroyStatus, asrt *assert.Assertions) {
75+
asrt.Equal("Destroying: 1 machine set, 0 machines", res.TypedSpec().Value.Phase)
76+
})
77+
7278
rtestutils.Destroy[*omni.MachineSetStatus](ctx, t, st, []string{machineSet.Metadata().ID()})
7379

7480
rtestutils.AssertResource(ctx, t, st, c.Metadata().ID(), func(res *omni.ClusterDestroyStatus, asrt *assert.Assertions) {

internal/backend/runtime/omni/controllers/omni/cluster_machine_status.go

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -41,14 +41,14 @@ func NewClusterMachineStatusController() *ClusterMachineStatusController {
4141
UnmapMetadataFunc: func(clusterMachineStatus *omni.ClusterMachineStatus) *omni.ClusterMachine {
4242
return omni.NewClusterMachine(resources.DefaultNamespace, clusterMachineStatus.Metadata().ID())
4343
},
44-
TransformExtraOutputFunc: func(ctx context.Context, r controller.ReaderWriter, _ *zap.Logger, clusterMachine *omni.ClusterMachine, clusterMachineStatus *omni.ClusterMachineStatus) error {
44+
TransformFunc: func(ctx context.Context, r controller.Reader, _ *zap.Logger, clusterMachine *omni.ClusterMachine, clusterMachineStatus *omni.ClusterMachineStatus) error {
4545
machine, err := safe.ReaderGet[*omni.Machine](ctx, r, resource.NewMetadata(resources.DefaultNamespace, omni.MachineType, clusterMachine.Metadata().ID(), resource.VersionUndefined))
4646
if err != nil && !state.IsNotFoundError(err) {
4747
return err
4848
}
4949

50-
machineSetNode, err := helpers.HandleInput[*omni.MachineSetNode](ctx, r, clusterMachineStatusControllerName, clusterMachine)
51-
if err != nil {
50+
machineSetNode, err := safe.ReaderGetByID[*omni.MachineSetNode](ctx, r, clusterMachine.Metadata().ID())
51+
if err != nil && !state.IsNotFoundError(err) {
5252
return err
5353
}
5454

@@ -224,14 +224,6 @@ func NewClusterMachineStatusController() *ClusterMachineStatusController {
224224
clusterMachineStatus.Metadata().Labels().Set(omni.ClusterMachineStatusLabelNodeName, clusterMachineIdentity.TypedSpec().Value.Nodename)
225225
}
226226

227-
return nil
228-
},
229-
FinalizerRemovalExtraOutputFunc: func(ctx context.Context, rw controller.ReaderWriter, _ *zap.Logger, clusterMachine *omni.ClusterMachine) error {
230-
_, err := helpers.HandleInput[*omni.MachineSetNode](ctx, rw, clusterMachineStatusControllerName, clusterMachine)
231-
if err != nil {
232-
return err
233-
}
234-
235227
return nil
236228
},
237229
},

internal/backend/runtime/omni/controllers/omni/infra_machine.go

Lines changed: 12 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -142,27 +142,6 @@ func (ctrl *InfraMachineController) Reconcile(ctx context.Context, _ *zap.Logger
142142
}
143143

144144
func (ctrl *InfraMachineController) reconcileTearingDown(ctx context.Context, r controller.QRuntime, link *siderolink.Link) error {
145-
if _, err := helpers.HandleInput[*omni.InfraMachineConfig](ctx, r, ctrl.Name(), link); err != nil {
146-
return err
147-
}
148-
149-
if _, err := helpers.HandleInput[*omni.MachineExtensions](ctx, r, ctrl.Name(), link); err != nil {
150-
return err
151-
}
152-
153-
if _, err := helpers.HandleInput[*omni.ClusterMachine](ctx, r, ctrl.Name(), link); err != nil {
154-
return err
155-
}
156-
157-
if _, err := helpers.HandleInput[*omni.MachineStatus](ctx, r, ctrl.Name(), link); err != nil {
158-
return err
159-
}
160-
161-
_, err := helpers.HandleInput[*siderolink.NodeUniqueToken](ctx, r, ctrl.Name(), link)
162-
if err != nil {
163-
return err
164-
}
165-
166145
md := infra.NewMachine(link.Metadata().ID()).Metadata()
167146

168147
ready, err := helpers.TeardownAndDestroy(ctx, r, md)
@@ -178,23 +157,23 @@ func (ctrl *InfraMachineController) reconcileTearingDown(ctx context.Context, r
178157
}
179158

180159
func (ctrl *InfraMachineController) reconcileRunning(ctx context.Context, r controller.QRuntime, link *siderolink.Link) error {
181-
config, err := helpers.HandleInput[*omni.InfraMachineConfig](ctx, r, ctrl.Name(), link)
182-
if err != nil {
160+
config, err := safe.ReaderGetByID[*omni.InfraMachineConfig](ctx, r, link.Metadata().ID())
161+
if err != nil && !state.IsNotFoundError(err) {
183162
return err
184163
}
185164

186-
machineExts, err := helpers.HandleInput[*omni.MachineExtensions](ctx, r, ctrl.Name(), link)
187-
if err != nil {
165+
machineExts, err := safe.ReaderGetByID[*omni.MachineExtensions](ctx, r, link.Metadata().ID())
166+
if err != nil && !state.IsNotFoundError(err) {
188167
return err
189168
}
190169

191-
machineStatus, err := helpers.HandleInput[*omni.MachineStatus](ctx, r, ctrl.Name(), link)
192-
if err != nil {
170+
machineStatus, err := safe.ReaderGetByID[*omni.MachineStatus](ctx, r, link.Metadata().ID())
171+
if err != nil && !state.IsNotFoundError(err) {
193172
return err
194173
}
195174

196-
nodeUniqueToken, err := helpers.HandleInput[*siderolink.NodeUniqueToken](ctx, r, ctrl.Name(), link)
197-
if err != nil {
175+
nodeUniqueToken, err := safe.ReaderGetByID[*siderolink.NodeUniqueToken](ctx, r, link.Metadata().ID())
176+
if err != nil && !state.IsNotFoundError(err) {
198177
return err
199178
}
200179

@@ -269,17 +248,18 @@ func (helper *infraMachineControllerHelper) modify(ctx context.Context, infraMac
269248
}
270249

271250
// the machine is deallocated, clear the cluster information and mark it for wipe by assigning it a new wipe ID
272-
273251
if infraMachine.TypedSpec().Value.ClusterTalosVersion != "" {
274252
infraMachine.TypedSpec().Value.WipeId = uuid.NewString()
275253
}
276254

277255
infraMachine.TypedSpec().Value.ClusterTalosVersion = ""
278256
infraMachine.TypedSpec().Value.Extensions = nil
279257

280-
_, err = helpers.HandleInput[*omni.ClusterMachine](ctx, helper.runtime, helper.controllerName, helper.link)
258+
if err = helper.runtime.RemoveFinalizer(ctx, clusterMachine.Metadata(), helper.controllerName); err != nil {
259+
return err
260+
}
281261

282-
return err
262+
return nil
283263
}
284264

285265
if err = helper.runtime.AddFinalizer(ctx, clusterMachine.Metadata(), helper.controllerName); err != nil {

0 commit comments

Comments
 (0)