From 92915bc1c7d402d89d4fc6558b4048773df4b6ef Mon Sep 17 00:00:00 2001 From: Omar Date: Thu, 21 Sep 2023 11:21:50 -0700 Subject: [PATCH] chore(upstream): Port upstream changes from Netflix (#2067) * fix(auth): instantiate filter correctly (cherry picked from commit 7465b7b82602b0efb4607c509b670d8653b96b65) * Merge pull request #571 in SPKR/keel-nflx from fix-java-toolchain to master Squashed commit of the following: commit 12b0c032bdbf9dd4e8158a729d74d376c82f5851 Author: Rob Fletcher Date: Thu Sep 16 17:07:42 2021 -0700 fix(build): make oss build also use the same toolchain commit d87f0d89a9a81c8799e9fd7212d90007da48ed5e Author: Danny Thomas Date: Fri Sep 17 09:40:58 2021 +1000 Nebula 8.1.0 commit 95f97331db80ac456586685694cf115f95f841f8 Author: Rob Fletcher Date: Wed Sep 15 15:42:21 2021 -0700 fix(build): make sure we build with Java 11 (cherry picked from commit fd26a11b42ff67d9a666f1eb06db5642f9bba7bd) * feat(scheduler): tuneable batch size for checking Squashed commit of the following: commit b0232ff14b74f324a3564d917514e8df36a6c7f0 Author: Emily Burns Date: Fri Sep 17 10:03:42 2021 -0700 feat(scheduler): tuneable batch size for checking (cherry picked from commit e7c767f16458fd9d679263afc416ba087a120f85) * fix(notifications): use the correct branch for notifications Squashed commit of the following: commit 9ff29f3c0f414eb9c6ed440de88990850abbf0f0 Author: Rani Date: Fri Sep 17 12:42:47 2021 -0700 fix(pr): moved branch selection logic commit de34f34a44e8815d3a421ef7018454e014721bea Author: Rani Date: Thu Sep 16 15:04:09 2021 -0700 fix(pr): added tests commit 46c9ed6a71a13d415413a4eb3cb8643f2f0e2858 Author: Rani Date: Thu Sep 16 12:56:24 2021 -0700 fix(notifications): use the correct branch for notifications (cherry picked from commit cb16c974b41f573cddf24b3d82f1b5b5624233cf) * feat(dgs): added a new boolean to the schema that indicates if preview environments are configured Squashed commit of the following: commit 15dbc86497c7f2a3981da5f347e941bb9faaff66 Author: Rani Date: Tue Sep 21 08:42:37 2021 -0700 feat(preview-envs): added a new boolean to the schema that indicates if preview environments are configured commit 5be000c28eb7274fb833d6fb6de083a5a690af0c Author: Rani Date: Tue Sep 21 08:41:51 2021 -0700 fix(dgs): moved processedConfig to a separate fetcher as it might be heavy for big configs (cherry picked from commit d1aeefc4c4d53f15fd42af59b98a3dfd0f089236) * feat(imdsv2): controlled and instumented rollout of IMDSv2 (cherry picked from commit cf796a4461767badabfc6e75cc0776a8144d2e57) * fix(imdsv2): bad column reference in query (cherry picked from commit 251aa614a28d95c428001eab91d5a5537196647d) * fix(imdsv2): actually translate IMDSv2 setting in spec to desired low-level state (cherry picked from commit 9f898ba1fd040a2ef7cbccfe13fe76c866bcc4c1) * fix(slack): Fix Slack callback handling Squashed commit of the following: commit 43834b9b0afc437eff15024c99333321bfb81f02 Author: Luis Pollo Date: Tue Sep 21 09:29:06 2021 -0700 test(slack): Enable running Slack callback tests with Netflix classpath commit 892669bac3d49757880a362f967e167cb2152d7f Author: Luis Pollo Date: Mon Sep 20 23:09:15 2021 -0700 refactor(slack): Rename SlackAppController to SlackAppServlet commit 13f271a7377f28a7015fab416ceca4a7ffd9511c Author: Luis Pollo Date: Mon Sep 20 17:44:46 2021 -0700 fix(slack): Fix Slack callback handling (cherry picked from commit a5f435a6daa18f63c4daf30f450ad7bc8f6a9a49) * chore(cluster): abstract construction of upsert stages Squashed commit of the following: commit 132bc6c7e9d57db28c2cb76deddf22ea7053bcd0 Author: Emily Burns Date: Tue Sep 21 14:52:42 2021 -0700 fix(test): rewrite tests using different framework commit 97fd19ce6f77544cdf5bac5c3bcb4769d857b0a4 Author: Emily Burns Date: Tue Sep 21 10:12:33 2021 -0700 fix(test): add more combined tests commit 198ab954849820414e53ab0fa86302dd7139dd82 Author: Emily Burns Date: Fri Sep 17 15:46:55 2021 -0700 fix(PR): fix refs for titus commit 01dfa7c5e05b56af287d6289250c9c57e9899153 Author: Emily Burns Date: Wed Sep 15 12:08:19 2021 -0700 chore(cluster): abstract construction of upsert stages (cherry picked from commit cfea585213160b1a0bb07514af66ab7fc1ee9bf3) * fix(imdsv2): get current state of IMDSv2 flag (cherry picked from commit 469f8c60e0aa189034c3c8b839cfb40b85ca2b65) * feat(preview-envs): Allow users to override FP email via notifications (cherry picked from commit 2cd00f332a847bcd4e1cb61616de5259267a2b7d) * fix(dgs): improve error message when git repo details are incorrect Squashed commit of the following: commit 611c3ed7218aed38d333bb8820e28287ac00b40f Author: Rani Date: Tue Sep 21 08:30:30 2021 -0700 fix(dgs): improve error message when git repo details are incorrect (cherry picked from commit d56dc01d391fc8a22bcb85189dd02dc572ccb83f) * fix(database): standardize table name (cherry picked from commit 9efbfe376a3a1a46f15c4e9caa73664129e1f760) * fix(delete): Fix pipeline behavior for multi-stage delete tasks (cherry picked from commit 00ebd9df5ebf6dc42652e3e980718fb2282e18be) * chore(imdsv2): added a missing test case (to reassure myself it worked) (cherry picked from commit c02af67f1baf28d819f29ada53af81dad6cdeee5) * fix(preview-envs): Fix moniker in destroyServerGroup (cherry picked from commit c49f18b4c45aad05c62cf098a99e0567f98a4e1a) * feat(resolvers): extracted a base class for rollout-capable resolvers (cherry picked from commit 4225e253b48ff61c2425a3407d7844ffbb36e86a) * feat(resolvers): add column to capture rollout state (cherry picked from commit 76153101685bfc3261a2a170c72a740fcafc5412) * fix(resolvers): fixed typo in migration (cherry picked from commit 98cbc9a90ee0aefc7c48e29fa3dfc350e45ec8b2) * feat(reports): Allow loading app cache from disk for local testing (cherry picked from commit a67491d070c8b0ee561e39bda211433ad004f06f) * feat(imdsv2): IMDSv2 resolver for Titus clusters (cherry picked from commit 5055ce2aae4f5f6e3f5dc94a0c3464ac70289b9c) * fix(import): invalidate the cache before fetching the app by name Squashed commit of the following: commit 1d3bbd296b5bca6eccecb57d7bcf1f06500643ce Author: Rani Date: Wed Sep 29 20:10:11 2021 -0700 fix(pr): renaming commit 09812c9aeb5f56124d3ebbf26f8024ce6da1817e Author: Rani Date: Wed Sep 29 10:06:58 2021 -0700 fix(import): invalidate the cache before fetching the app by name (cherry picked from commit 8533bc724aca404acccfd65524fa392dd145b7fb) * refactor(resolvers): Moved some of the Arrow Optics declarations into a common place for reuse (cherry picked from commit 40c4aff2006224493111667b425d5612745868f7) * fix(imdsv2): Wire up the Titus IMDSv2 resolver (cherry picked from commit 724d29d2bd2c6d666c2d1406f1f81053086962df) * fix(imdsv2): Fix bean name collision in IMDSv2 resolvers (cherry picked from commit 0a65d0ff7e5be41c40ae3140201a80482d86a6e1) * fix(notifications): explicit specify the notification branch Squashed commit of the following: commit b28ca2ed8b31f149adb35b17a8b670d954704383 Author: Rani Date: Wed Sep 29 20:56:30 2021 -0700 fix(pr): updated the tests to include prMerged event commit a73bea12f59bd5c42269c83e1a2f5ac1c092bcad Author: Rani Date: Wed Sep 29 20:50:30 2021 -0700 fix(notifications): explicit specify the notification branch (cherry picked from commit 10ca32f0ec7addc3bac74d53443abde3cb4f5971) * Merge pull request #598 in SPKR/keel-nflx from unpinMessage to master Squashed commit of the following: commit d609eb75fdefeb9499f837d3acdd17e8ab52e793 Author: Gal Yardeni Date: Thu Sep 30 14:38:28 2021 -0700 fix(unpinning): change message and rename commit 0eb7dfe2c457a4e30c233fc5d3f755f9a7e89dc6 Author: Gal Yardeni Date: Wed Sep 29 15:27:17 2021 -0700 fix(unpinning): fix unpinning message if pinned version is the same as current (cherry picked from commit c1d5f14c2f4a1825ef1c8670dbefffee7aaf1bdd) * fix(actuation): Properly trigger recheck of resources when a version is pinned or marked as bad Squashed commit of the following: commit 973291edc1604574858ef3db49a027de19c0f04a Author: Rani Horev Date: Thu Sep 30 22:47:56 2021 +0000 fix(pr): update test title commit 404f0721e70a1238456a74be372ac7b5907d8851 Author: Rani Date: Thu Sep 30 15:32:22 2021 -0700 fix(pr): fixed tests and add some more commit bd1fe2672b3fe0b90ed8debe62464641da323e42 Author: Rani Date: Wed Sep 29 16:52:10 2021 -0700 fix(actuation): trigger recheck of resources when a version is pinned or marked as bad (cherry picked from commit d4c694acbb894833f3c8b2773da45907633c2892) * feat(imdsv2): Removed the fast property that controls retries as it's redudant now we have status (cherry picked from commit 2c3c6c432ba66645c18492da3003ac45f660f43e) * fix(imdsv2): If user fixes IMDSv2 rollout after it failed, we should record success (cherry picked from commit 25119a5fe7101c5807e5a00066b8f344f9cf2c92) * fix(dgs): move resources to be under the artifact directly Squashed commit of the following: commit 00e726dd7d5731bf304958ca5764e679d41902ee Author: Rani Date: Fri Oct 1 12:15:22 2021 -0700 fix(dgs): move resources to be under the artifact directly (cherry picked from commit d029fb8abd0c015d40bd89e0391c4a04c7b45587) * fix(preview-envs): Revisit resource renaming Squashed commit of the following: commit 06ee91a3d19b2303299209ed0f003fb9bc1a37c7 Author: Luis Pollo Date: Thu Sep 30 16:45:05 2021 -0700 fix(preview-envs): Revisit resource renaming (cherry picked from commit 851b42c044464a8233dd222715ba5774c9bc910a) * Revert "fix(preview-envs): Revisit resource renaming" This reverts commit 851b42c044464a8233dd222715ba5774c9bc910a. (cherry picked from commit 1b71bff344db2bc10262f4b64bf964ed739ae0dd) * fix(build): make build compatible with Java 8 tooling (cherry picked from commit b2946dba9c9d1e52301e6cc3b1c3a157cc2b506d) * chore(logs): Fix logging of authenticated user (cherry picked from commit f2831239516bfa62a8bcfa166f12c1ccb74c62dc) * fix(build): put io.spinnaker.package plugin back in to see if that's what's breaking auth (cherry picked from commit 1a8718ff514530faffbb90af91267cc5318efa85) * fix(auth): desperately hoping this fixes auth (cherry picked from commit 32b4ba00ee664c7c7795c15081a0994ad444eee7) * chore(build): Align kork/fiat coordinates on com.spinnaker.netflix.{}.internal Squashed commit of the following: commit b59e5329982e47c151004efc0f492774d7db5bfd Author: Adam Jordens Date: Wed Oct 6 17:28:04 2021 -0700 chore(build): Align kork/fiat coordinates on com.spinnaker.netflix.{}.internal commit 000d61d09c3d57b08468712cb48326393cafc3f9 Author: Adam Jordens Date: Wed Oct 6 17:27:58 2021 -0700 Update dependency lock (cherry picked from commit 524b762e51596694a77edfa338af8caa87d9d8d8) * chore(build): see if we can get the build buildable on Java 8 (cherry picked from commit 5ba3bf9848d2464cf0417c0a45144cf9e922496d) * fix(preview-envs): Revisit resource renaming Squashed commit of the following: commit 99c72734c58f436b3c2e808304549cb2866f10a5 Author: Luis Pollo Date: Thu Oct 7 10:09:06 2021 -0700 fix(preview-envs): Fix loss of updated metadata in preview resources commit 879fd912ec7288c4b2ec87d939cb9fc515c0cb5e Author: Luis Pollo Date: Wed Oct 6 09:27:50 2021 -0700 fix(preview-envs): Fix moniker suffix logic commit 83bc075bda24c016ef535c3b2e7fe340f704bd3a Author: Luis Pollo Date: Fri Oct 1 22:33:23 2021 +0000 fix(preview-envs): Revisit resource renaming Squashed commit of the following: commit 06ee91a3d19b2303299209ed0f003fb9bc1a37c7 Author: Luis Pollo Date: Thu Sep 30 16:45:05 2021 -0700 fix(preview-envs): Revisit resource renaming (cherry picked from commit a8d17ed21cdfca9a8cf52fbc980c0a53750b5395) * chore(logs): Add more debug logs around delivery config import (cherry picked from commit 7d505253c28996ffdc0dccf7788142e37629d782) * fix(resourceTasks): load tasks from task tracking rep Squashed commit of the following: commit ca07cf691411b2470ee9a3eae5e9f559153c7c97 Author: Emily Burns Date: Thu Oct 7 16:20:54 2021 -0700 chore(pr): move resource stuff to separate fetcher commit eed03eaa40c09e0e8d7c2c49de06337d484e0141 Author: Emily Burns Date: Thu Oct 7 16:14:18 2021 -0700 fix(pr): return running and last batch of completed commit 68bead2822b3d74feb1a58ea2a6487889abca873 Author: Emily Burns Date: Thu Oct 7 12:56:49 2021 -0700 chor(PR): fetch latest batch of tasks commit 02d16c6b553eae474e786929e5c7fac5f25b40ad Author: Emily Burns Date: Thu Oct 7 11:29:17 2021 -0700 fix(resourceTasks): load tasks from task tracking rep (cherry picked from commit 2b906f754059e9f055649076583acdef83cd246c) * feat(constraint): restart constraint evaluation Squashed commit of the following: commit f13378aadd5a020cd89c698b883a96e1521154e1 Author: Rani Date: Fri Oct 8 12:28:37 2021 -0700 fix(pr): fixed tests commit 03ebc275a81d7eafdb3177501def46dff727eaa2 Author: Rani Date: Fri Oct 8 11:59:26 2021 -0700 fix(pr): added reference and simplify delete query commit cda6c956199ebf4e6484312010b7d31a87575582 Author: Rani Date: Thu Oct 7 20:40:39 2021 -0700 feat(constraint): restart constraint evaluation (cherry picked from commit 4feeca513d33cd4f3e4acfa8b5fdaa8c71e5751d) * Merge pull request #617 in SPKR/keel-nflx from currentVersion to master Squashed commit of the following: commit 09f4c783ead79715799654ae800e76d92086d7eb Merge: d9bda5c41 729313f24 Author: Gal Yardeni Date: Fri Oct 8 13:37:40 2021 -0700 Merge branch 'currentVersion' of ssh://stash.corp.netflix.com:7999/spkr/keel-nflx into currentVersion commit d9bda5c41c2b526ab742177506f2510092cabfc2 Author: Gal Yardeni Date: Fri Oct 8 13:36:57 2021 -0700 feat(latestVersion): adding latest version + bug fixes commit be24e4c1a55dba90863c42b572586b1e8b36bd95 Author: Gal Yardeni Date: Thu Oct 7 13:58:56 2021 -0700 feat(latestVersion): adding latest version + bug fixes commit a0a14f31072eb5afed2897d3c6d9bf19873e3468 Author: Gal Yardeni Date: Wed Oct 6 21:19:34 2021 -0700 feat(latestVersion): add latest approved version of an artifact commit 729313f24f87ef46b01b3395ecc714ddb0f492f3 Author: Gal Yardeni Date: Fri Oct 8 13:36:57 2021 -0700 feat(latestVersion): adding latest version + bug fixes commit b3140d16f0eed00aca669fab84895177e178feac Author: Gal Yardeni Date: Thu Oct 7 14:04:37 2021 -0700 feat(latestVersion): adding latest version + bug fixes commit e707108ab9f34e99e6cad187b7bd52e1a44d6185 Merge: 1121e4cf0 afffa4f4c Author: Gal Yardeni Date: Thu Oct 7 13:59:47 2021 -0700 feat(latestVersion): adding latest version + bug fixes commit 1121e4cf0ff542dde60236f38b6f3b95978cbe17 Author: Gal Yardeni Date: Thu Oct 7 13:58:56 2021 -0700 feat(latestVersion): adding latest version + bug fixes commit be603f5d50505246776a4cb51bb45a7d68f163a2 Author: Gal Yardeni Date: Wed Oct 6 21:19:34 2021 -0700 feat(latestVersion): add latest approved version of an artifact commit afffa4f4cd35070ffb0d4dfcd70a8d10ce085757 Author: Gal Yardeni Date: Wed Oct 6 21:19:34 2021 -0700 feat(latestVersion): add latest approved version of an artifact (cherry picked from commit 12b26f1388d2931901bba3137d579b5a2b8bcfb2) * fix(tasks): order sql query before fetching Squashed commit of the following: commit 7d6e559d77de00019f321fe324e3889ed60cb57c Author: Emily Burns Date: Fri Oct 8 14:08:52 2021 -0700 fix(tasks): order sql query before fetching (cherry picked from commit 67ed2626670bad03c40f2244bde1c8484f6e668c) * feat(artifacts): specify which artifact is it on MJ notification Squashed commit of the following: commit f5da4b94a00d2cb88f97d5a45a89d67b7eb12cc8 Author: Gal Yardeni Date: Mon Oct 11 13:45:37 2021 -0700 feat(artifacts): change the message location commit 6b3683ef05ca9189667bf16334dee0d8bbe0a083 Author: Gal Yardeni Date: Mon Oct 11 10:22:23 2021 -0700 feat(artifacts): specify which artifact is it on MJ notification (cherry picked from commit 71cbd7ed12a6c7105e96b9fc39fa49fdec1e2300) * feat(ops): cancel relevant in flight tasks pin/veto Squashed commit of the following: commit 2beabdca39bcd1880509a521e092cf2a1d6408e9 Author: Emily Burns Date: Mon Oct 11 14:02:52 2021 -0700 fix(pr): add user to cancel task commit eb8fa9813bcc67c5829ecfa081fd6cdaa01164a8 Author: Emily Burns Date: Fri Oct 8 10:55:09 2021 -0700 feat(ops): cancel relevant in flight tasks pin/veto (cherry picked from commit 6df3fde2709b896ffd47a4d1d5b6ddd7cf9224cd) * fix(ALBs): ensure listener actions are sorted consistently to avoid false deltas (cherry picked from commit 69226341309f2a27373e0288a064037f657f113b) * chore(verification): add logs and make batch size fast prop-able Squashed commit of the following: commit 461d7571f1b6cfd44d8ae7101dff6fd360832898 Author: Emily Burns Date: Tue Oct 12 13:35:07 2021 -0700 chore(verification): add logs and make batch size fast prop-able (cherry picked from commit 81b433a2c3d87896d2c6783ce6b3ee0a181b460d) * chore(batch-size): increase default batch size to 5, fast prop post deploy Squashed commit of the following: commit 6a03e98d8e9d5828a9c790f1abb490f1b06b22f6 Author: Emily Burns Date: Tue Oct 12 16:12:29 2021 -0700 chore(batch-size): increase default batch size to 5, fast prop post deploy (cherry picked from commit d29c8f2a8cd8bc7eade66e46257db43c7ae78aaa) * feat(compareLink): show which changeset were deployed as a part of the MJ message Squashed commit of the following: commit 96b5fe54e3839e8cc1ff7ba0ab170a1e8c8d801d Author: Gal Yardeni Date: Tue Oct 12 16:28:41 2021 -0700 feat(compareLink): fix condition commit bbee2d6d34bf3e2e92d36a5522b234dfed031e4d Author: Gal Yardeni Date: Tue Oct 12 16:27:21 2021 -0700 feat(compareLink): fix condition commit d33cab79dd6260147843e6987b022ce5b5e00e25 Author: Gal Yardeni Date: Tue Oct 12 16:01:23 2021 -0700 feat(compareLink): update message commit c57ac986243e301c8ce7de99ef07258eae77bd3b Author: Gal Yardeni Date: Tue Oct 12 11:47:42 2021 -0700 feat(compareLink): calculate link only if approved commit 2bdb23140c0d335fc6f2c8abf74797630502bd25 Author: Gal Yardeni Date: Tue Oct 12 11:26:35 2021 -0700 feat(compareLink): show which changeset were deployed as a part of the MJ message (cherry picked from commit cc4ff9705deb6e6c9ccc3d0b64dc09f790e129da) * feat(rollout): add managed rollout option for clusters Squashed commit of the following: commit 9b63140e6e9ebee331e4a64b328bc686cdfff7ac Author: Emily Burns Date: Wed Oct 13 10:36:52 2021 -0700 fix(PR): feedback updates commit 2a9ee44ac6d2a03e7008d60cf8369614ed1628e7 Author: Emily Burns Date: Mon Oct 11 11:30:45 2021 -0700 fix(PR): fix tests commit b45540927cd6186b3404419b9ec1148a601d1e28 Author: Emily Burns Date: Mon Oct 11 10:37:48 2021 -0700 feat(PR): move managedRollout to spec level, refators commit 0a00606c00d28587d1f7b0ab932960c50db3393e Author: Emily Burns Date: Thu Oct 7 09:25:58 2021 -0700 feat(rollout): add managed rollout option for clusters (cherry picked from commit 72cde8ec7b4acc13869c6552bb14eae0ce174a09) * chore(test): fix test for managed rollout Squashed commit of the following: commit ec6c0eb5860dc53d34bad163b17c434cab74b8c2 Author: Emily Burns Date: Wed Oct 13 13:32:21 2021 -0700 chore(test): fix test for managed rollout (cherry picked from commit 77bce15e4acc5c1fdf66426a7c0e92a327402b3f) * feat(export): First stab at exporting pipelines Squashed commit of the following: commit 349be9edb3a75ec48f8ccca0d2f98dc0b60a14f8 Author: Luis Pollo Date: Thu Oct 14 09:58:58 2021 -0700 feat(export): Improve error handling, add test script commit 8026169d9ff31967929bf10c24af08c18be132df Author: Luis Pollo Date: Wed Oct 13 18:06:08 2021 -0700 refactor(export): Dedupe environment and artifacts commit 88480fb0565ca0addeff4832bbc62b061e14165b Author: Luis Pollo Date: Tue Oct 12 18:41:38 2021 -0700 feat(export): Address feedback on processing of triggers commit a640407d3240863926406d0b28792d9a9ba33d25 Author: Luis Pollo Date: Tue Oct 12 17:50:07 2021 -0700 refactor(export): Refactor export from pipelines to leverage existing export support commit e818b22040d99518d63b1b967953b4939885b13b Author: Luis Pollo Date: Mon Oct 11 18:09:02 2021 -0700 chore(pr): Address review feedback commit 6d58c4e648088811c6c630a51cf65f0d46e5c67c Author: Luis Pollo Date: Mon Oct 11 14:00:38 2021 -0700 feat(export): First stab at exporting pipelines (cherry picked from commit 9802d705f0a61a9aeead6c1e31ee02c65aa404c2) * feat(tasks): get only one batch of tasks Squashed commit of the following: commit 6a557f2a351aafcb29d8c3008e297cbd3ed40198 Author: Emily Burns Date: Wed Oct 13 15:39:19 2021 -0700 feat(tasks): get only one batch of tasks (cherry picked from commit 4447d393bb532e3c1f164a5da9d5a6ebf4be26d3) * fix(ALBs): nested rule actions need to be serialized the same as default actions (cherry picked from commit 250bdfd7a73c60ad782a5b594db11d770876c316) * feat(preview-envs): Create preview environment artifacts automatically Squashed commit of the following: commit 202b50efd0ef59dde4e81f0ed91191ab529d435b Author: Luis Pollo Date: Thu Oct 14 17:04:33 2021 -0700 feat(preview-envs): Create preview environment artifacts automatically (cherry picked from commit 48dbf906e80aca6995a7936866c895190e6acf2c) * fix(task): load only 30s batch for all task Squashed commit of the following: commit 906af21f8421f2eb584e65774182b514cc105641 Author: Emily Burns Date: Mon Oct 18 10:04:31 2021 -0700 fix(task): load only 30s batch for all task (cherry picked from commit 0cb19d58cf8393e0e53f34795fba8741bf423d11) * chore(dgs): Migrate to match the reggie schema format Squashed commit of the following: commit cacd4334d8cec18b8a97286bc51d7b1126ab5ae9 Author: Rani Date: Mon Oct 18 15:09:06 2021 -0700 fix(pr): fix tests commit f0211aa15fd05140add2f7651154e600db73b24d Author: Rani Date: Mon Oct 18 14:25:11 2021 -0700 chore(pr): added some basic testing to verify both queries are working properly commit 5d1a800f1e2b7507ce0b8386fadbb274ecaae309 Author: Rani Date: Mon Oct 18 14:24:38 2021 -0700 chore(dgs): removed unused vars commit 8b4ef5961b7b76c2655d08b987987b2d31cbecf1 Author: Rani Date: Fri Oct 15 11:27:38 2021 -0700 fix(pr): missing prefix commit f119b5f06a2c0e2e06113d4eff3c1167e46bd70b Author: Rani Date: Fri Oct 15 11:12:08 2021 -0700 fix(pr): missing underscore commit ba2f7a88f85804aa861011743765b6dc193844e3 Author: Rani Date: Thu Oct 14 21:27:22 2021 -0700 chore(dgs): duplicate schema and all queries and mutations commit d7de84dabf53badd78f3e6beee5a9f9f09bf0210 Author: Rani Date: Thu Oct 14 21:26:57 2021 -0700 chore(web): update dgs version and locks (cherry picked from commit 890203f593259cc7e25642e5482b47b4f7c03569) * feat(rollback): rollback to a server group if one exists with no diff Squashed commit of the following: commit 749c21b78dda3a36650507c00033efbf15cb03f9 Author: Emily Burns Date: Mon Oct 18 16:11:36 2021 -0700 fix(PR): handle ec2 suspend processes commit d85e5f71063ae343a085511ebf0e991c31be9bcf Author: Emily Burns Date: Mon Oct 18 09:24:43 2021 -0700 fix(PR): address feedback commit a6514f9470725d625dbd5f4ba03b7326a5d4f379 Author: Emily Burns Date: Fri Oct 15 15:09:04 2021 -0700 fix(PR): ignore capacity when looking at diff for rollback commit cc3d3e7dfe5cf3b9e43d821bd32fb0c522199180 Author: Emily Burns Date: Fri Oct 15 14:29:13 2021 -0700 feat(rollback): rollback to a server group if one exists with no diff (cherry picked from commit 30cc9b91cf87414455d92efdd470f87ced6130bb) * fix(build) Remove buoy dependency The buoy dependency was introduced in: 09156c9b0 feat(rollout): add managed rollout option for clusters Remove the dependency in order to be compatible with oss * Revert "chore(build): Align kork/fiat coordinates on com.spinnaker.netflix.{}.internal" This reverts commit 6f7a545e2a1b0ff3e00b249f4b60a18ebded8912. * Revert "Merge pull request #571 in SPKR/keel-nflx from fix-java-toolchain to master" This reverts commit 9adc94d653a80eec04875ec564880260e49c1374. * Revert "fix(auth): instantiate filter correctly" This reverts commit fc6865247f9be61416d8d2d88dbb478edca55346. * fix(dependencies) Restore spinnakerGradleVersion Restore spinnakerGradleVersion. Otherwise, spring properties fail to load. Likely related to spring boot version. See: https://github.com/spinnaker/spinnaker-gradle-project/pull/184 * Revert "chore(build): see if we can get the build buildable on Java 8" This reverts commit ba831499d59c7c88b9c93a17bc2d30c0e020b430. * fix(exception): Move from UnsupportedOperationException to TODO * Revert "feat(preview-envs): Allow users to override FP email via notifications" This reverts commit dec48512a8d30624c1b698952365ae7836e7ffb9. * Revert "fix(slack): Fix Slack callback handling" This reverts commit db018a94108553a7fb44225a97059ae968b31bca. --------- Co-authored-by: Rob Fletcher Co-authored-by: Emily Burns Co-authored-by: Rani Horev Co-authored-by: Luis Pollo Co-authored-by: Gal Yardeni --- gradle.properties | 2 +- .../keel/api/ClusterDeployStrategy.kt | 12 +- .../keel/api/ManagedRolloutConfig.kt | 17 + .../com/netflix/spinnaker/keel/api/Moniker.kt | 15 + .../netflix/spinnaker/keel/api/Resource.kt | 15 + .../spinnaker/keel/api/ResourceSpec.kt | 13 +- .../keel/api/actuation/TaskLauncher.kt | 23 +- .../keel/api/artifacts/DeliveryArtifact.kt | 12 + .../keel/api/artifacts/PublishedArtifact.kt | 3 +- .../api/constraints/ConstraintRepository.kt | 2 +- .../keel/artifacts/ArtifactListener.kt | 2 +- .../clouddriver/model/ActiveServerGroup.kt | 3 +- .../api/plugins/BaseClusterHandlerTests.kt | 354 +- .../persistence/ApproveOldVersionTests.kt | 3 +- .../DeliveryConfigRepositoryTests.kt | 22 +- .../TaskTrackingRepositoryTests.kt | 93 +- .../keel/rollout/RolloutAwareResolverTests.kt | 272 ++ .../netflix/spinnaker/time/MutableClock.kt | 2 + .../spinnaker/config/ArtifactCheckConfig.kt | 10 + .../spinnaker/config/BaseSchedulerConfig.kt | 2 +- .../config/EnvironmentCheckConfig.kt | 10 + .../spinnaker/keel/BaseActionRunner.kt | 11 +- .../keel/actuation/CheckScheduler.kt | 37 +- .../actuation/EnvironmentPromotionChecker.kt | 42 +- .../keel/actuation/EnvironmentTaskCanceler.kt | 86 + .../actuation/ExecutionSummaryService.kt | 2 +- .../keel/api/plugins/BaseClusterHandler.kt | 444 ++- .../api/support/ConstraintRepositoryBridge.kt | 4 +- .../api/support/ExecutionConstructionUtils.kt | 0 .../keel/auth/AuthorizationSupport.kt | 58 +- .../keel/core/api/SubmittedDeliveryConfig.kt | 11 +- .../DependentEnvironmentFinder.kt | 54 + .../keel/persistence/ArtifactRepository.kt | 10 + .../keel/persistence/CombinedRepository.kt | 10 +- .../persistence/DeliveryConfigRepository.kt | 10 +- .../persistence/FeatureRolloutRepository.kt | 16 + .../keel/persistence/KeelRepository.kt | 14 +- .../persistence/TaskTrackingRepository.kt | 21 +- .../keel/rollout/FeatureRolloutAttempted.kt | 10 + .../keel/rollout/FeatureRolloutFailed.kt | 10 + .../keel/rollout/RolloutAwareResolver.kt | 152 + .../spinnaker/keel/rollout/RolloutStatus.kt | 5 + .../keel/services/ApplicationService.kt | 5 + .../keel/telemetry/TelemetryListener.kt | 33 +- .../keel/verification/VerificationRunner.kt | 1 + .../keel/actuation/CheckSchedulerTests.kt | 18 +- .../EnvironmentPromotionCheckerTests.kt | 374 +- .../actuation/EnvironmentTaskCancelerTests.kt | 101 + .../DependentEnvironmentFinderTests.kt | 172 + .../keel/services/ApplicationServiceTests.kt | 5 +- .../keel/services/ComparableLinksTests.kt | 6 +- .../api/ec2/ApplicationLoadBalancerSpec.kt | 19 +- .../keel/api/ec2/ClassicLoadBalancerSpec.kt | 3 + .../spinnaker/keel/api/ec2/ClusterSpec.kt | 84 +- .../api/ec2/InstanceMetadataServiceVersion.kt | 5 + .../keel/api/ec2/LaunchConfigurationSpec.kt | 3 +- .../keel/api/ec2/SecurityGroupSpec.kt | 13 + .../old/ApplicationLoadBalancerV1_1Spec.kt | 4 +- keel-ec2-plugin/keel-ec2-plugin.gradle | 1 + .../com/netflix/spinnaker/config/EC2Config.kt | 31 +- .../spinnaker/keel/ec2/_modelConversions.kt | 2 +- .../spinnaker/keel/ec2/optics/ec2Optics.kt | 60 + ...ApplicationLoadBalancerDefaultsResolver.kt | 5 +- .../InstanceMetadataServiceResolver.kt | 61 + .../ApplicationLoadBalancerHandler.kt | 16 +- .../keel/ec2/resource/ClusterHandler.kt | 512 +-- .../InstanceMetadataServiceResolverTests.kt | 112 + .../keel/ec2/resource/ClusterHandlerTests.kt | 12 +- .../resource/Ec2BaseClusterHandlerTests.kt | 175 +- .../InstanceMetadataResolutionTests.kt | 188 + .../keel/ec2/resource/LaunchConfigTests.kt | 76 +- .../spinnaker/keel/ec2/resource/TestUtils.kt | 19 +- .../spinnaker/config/EchoConfiguration.kt | 1 - .../keel/echo/ManualJudgementNotifier.kt | 4 +- .../spinnaker/keel/front50/Front50Cache.kt | 14 +- .../spinnaker/keel/front50/Front50Service.kt | 8 - .../keel/front50/model/Application.kt | 2 +- .../spinnaker/keel/front50/model/Cluster.kt | 44 + .../spinnaker/keel/front50/model/Delivery.kt | 21 - .../spinnaker/keel/front50/model/Pipeline.kt | 52 + .../spinnaker/keel/front50/model/Stage.kt | 20 - .../spinnaker/keel/front50/model/Stages.kt | 129 + .../keel/front50/model/DeliveryTest.kt | 44 - .../NotificationEventListener.kt | 6 +- .../slack/SlackNotificationEvent.kt | 8 +- .../slack/handlers/GitDataGenerator.kt | 18 +- .../handlers/ManualJudgementUpdateHandler.kt | 35 +- .../ManualJudgmentNotificationHandler.kt | 19 +- .../handlers/UnpinnedNotificationHandler.kt | 12 +- keel-optics/keel-optics.gradle | 4 + .../spinnaker/keel/optics/mapOptics.kt | 14 + .../spinnaker/keel/optics/resourceOptics.kt | 40 + .../keel/orca/OrcaExecutionSummaryService.kt | 98 +- .../spinnaker/keel/orca/OrcaService.kt | 6 + .../spinnaker/keel/orca/OrcaTaskLauncher.kt | 20 +- .../orca/OrcaExecutionSummaryServiceTests.kt | 44 +- .../keel/orca/OrcaTaskMonitorAgentTests.kt | 6 +- .../resources/failed-managed-rollout.json | 1750 ++++++++++ .../resources/managed-rollout-execution.json | 3053 ++++++----------- .../resources/running-managed-rollout.json | 1803 ++++++++++ .../PreviewEnvironmentCodeEventListener.kt | 205 +- .../netflix/spinnaker/keel/preview/utils.kt | 36 +- .../keel/scm/DeliveryConfigImportListener.kt | 6 +- .../com/netflix/spinnaker/keel/scm/utils.kt | 3 +- ...reviewEnvironmentCodeEventListenerTests.kt | 168 +- .../keel/preview/ResourceRenamingTests.kt | 121 + .../scm/DeliveryConfigImportListenerTests.kt | 46 +- keel-sql/keel-sql.gradle | 7 +- .../spinnaker/config/SqlConfiguration.kt | 7 + .../spinnaker/keel/sql/SqlActionRepository.kt | 1 + .../keel/sql/SqlArtifactRepository.kt | 15 + .../keel/sql/SqlDeliveryConfigRepository.kt | 61 +- .../keel/sql/SqlFeatureRolloutRepository.kt | 61 + .../keel/sql/SqlResourceRepository.kt | 1 + .../keel/sql/SqlTaskTrackingRepository.kt | 99 +- .../20210917-feature-rollout-table.yml | 35 + .../20210923-rename-feature-rollout-table.yml | 8 + ...20210928-add-status-to-feature-rollout.yml | 23 + .../20211007-task-tracking-indecies.yml | 11 + .../20211008-version-task-tracking.yml | 14 + .../20211014-artifact-is-preview-column.yml | 15 + .../main/resources/db/databaseChangeLog.yml | 18 + .../sql/SqlFeatureRolloutRepositoryTests.kt | 97 + .../sql/SqlTaskTrackingRepositoryTests.kt | 2 +- .../netflix/spinnaker/keel/test/resources.kt | 17 + .../keel/api/titus/TitusClusterSpec.kt | 11 +- keel-titus-plugin/keel-titus-plugin.gradle | 1 + .../netflix/spinnaker/config/TitusConfig.kt | 26 + .../spinnaker/keel/titus/ContainerRunner.kt | 1 + .../titus/InstanceMetadataServiceResolver.kt | 57 + .../keel/titus/TitusClusterHandler.kt | 300 +- .../spinnaker/keel/titus/_titusClusters.kt | 87 +- .../keel/titus/optics/titusOptics.kt | 58 + .../TestContainerVerificationEvaluator.kt | 5 +- .../InstanceMetadataServiceResolverTests.kt | 97 + .../resource/TitusBaseClusterHandlerTests.kt | 176 +- .../titus/resource/TitusClusterExportTests.kt | 27 +- .../resource/TitusClusterHandlerTests.kt | 48 +- .../TitusClusterScalingPolicyTests.kt | 20 +- keel-web/keel-web.gradle | 6 +- .../config/KeelConfigurationFinalizer.kt | 1 - .../keel/{services => admin}/AdminService.kt | 10 +- .../spinnaker/keel/dgs/ApplicationContext.kt | 4 +- .../spinnaker/keel/dgs/ApplicationFetcher.kt | 148 +- .../spinnaker/keel/dgs/ConfigFetcher.kt | 21 +- .../spinnaker/keel/dgs/GitIntegration.kt | 40 +- .../netflix/spinnaker/keel/dgs/Mutations.kt | 69 +- .../keel/dgs/ResourceDetailsFetcher.kt | 5 +- .../spinnaker/keel/dgs/ResourceFetcher.kt | 88 + .../netflix/spinnaker/keel/dgs/conversions.kt | 52 +- .../spinnaker/keel/export/ExportService.kt | 413 +++ .../spinnaker/keel/rest/AdminController.kt | 6 +- .../spinnaker/keel/rest/ExportController.kt | 104 +- .../src/main/resources/schema/schema.graphql | 465 ++- .../{services => admin}/AdminServiceTests.kt | 31 +- .../spinnaker/keel/dgs/BasicQueryTests.kt | 199 ++ .../spinnaker/keel/dgs/DgsTestConfig.kt | 18 +- .../keel/rest/AdminControllerTests.kt | 2 +- .../keel/rest/ExportControllerTests.kt | 3 +- .../src/test/resources/dgs/basicQuery.graphql | 18 + .../dgs/deprecatedBasicQuery.graphql | 18 + settings.gradle | 1 + 162 files changed, 11550 insertions(+), 3448 deletions(-) create mode 100644 keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ManagedRolloutConfig.kt create mode 100644 keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolverTests.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/config/ArtifactCheckConfig.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/config/EnvironmentCheckConfig.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCanceler.kt rename {keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api => keel-core/src/main/kotlin/com/netflix/spinnaker/keel}/actuation/ExecutionSummaryService.kt (96%) rename keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionUtils.kt => keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ExecutionConstructionUtils.kt (100%) create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinder.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/FeatureRolloutRepository.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutAttempted.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutFailed.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolver.kt create mode 100644 keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutStatus.kt create mode 100644 keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCancelerTests.kt create mode 100644 keel-core/src/test/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinderTests.kt create mode 100644 keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/InstanceMetadataServiceVersion.kt create mode 100644 keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/optics/ec2Optics.kt create mode 100644 keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolver.kt create mode 100644 keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolverTests.kt create mode 100644 keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/InstanceMetadataResolutionTests.kt create mode 100644 keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Cluster.kt delete mode 100644 keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Delivery.kt delete mode 100644 keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stage.kt create mode 100644 keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stages.kt delete mode 100644 keel-front50/src/test/kotlin/com/netflix/spinnaker/keel/front50/model/DeliveryTest.kt create mode 100644 keel-optics/keel-optics.gradle create mode 100644 keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/mapOptics.kt create mode 100644 keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/resourceOptics.kt create mode 100644 keel-orca/src/test/resources/failed-managed-rollout.json create mode 100644 keel-orca/src/test/resources/running-managed-rollout.json create mode 100644 keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/ResourceRenamingTests.kt create mode 100644 keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepository.kt create mode 100644 keel-sql/src/main/resources/db/changelog/20210917-feature-rollout-table.yml create mode 100644 keel-sql/src/main/resources/db/changelog/20210923-rename-feature-rollout-table.yml create mode 100644 keel-sql/src/main/resources/db/changelog/20210928-add-status-to-feature-rollout.yml create mode 100644 keel-sql/src/main/resources/db/changelog/20211007-task-tracking-indecies.yml create mode 100644 keel-sql/src/main/resources/db/changelog/20211008-version-task-tracking.yml create mode 100644 keel-sql/src/main/resources/db/changelog/20211014-artifact-is-preview-column.yml create mode 100644 keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepositoryTests.kt create mode 100644 keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolver.kt create mode 100644 keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/optics/titusOptics.kt create mode 100644 keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolverTests.kt rename keel-web/src/main/kotlin/com/netflix/spinnaker/keel/{services => admin}/AdminService.kt (97%) create mode 100644 keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceFetcher.kt create mode 100644 keel-web/src/main/kotlin/com/netflix/spinnaker/keel/export/ExportService.kt rename keel-web/src/test/kotlin/com/netflix/spinnaker/keel/{services => admin}/AdminServiceTests.kt (89%) create mode 100644 keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/BasicQueryTests.kt create mode 100644 keel-web/src/test/resources/dgs/basicQuery.graphql create mode 100644 keel-web/src/test/resources/dgs/deprecatedBasicQuery.graphql diff --git a/gradle.properties b/gradle.properties index 178c68428d..242da8a1af 100644 --- a/gradle.properties +++ b/gradle.properties @@ -7,7 +7,7 @@ org.gradle.parallel=true testContainersVersion=1.15.3 okHttpVersion=4.5.0 resilience4jVersion=1.5.0 -spinnakerGradleVersion=8.26.0 +spinnakerGradleVersion=8.23.0 # Used to control whether to spin up docker to run liquibase before jooq buildingInDocker=false diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ClusterDeployStrategy.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ClusterDeployStrategy.kt index 9d3cb701d3..a98310e7bb 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ClusterDeployStrategy.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ClusterDeployStrategy.kt @@ -13,6 +13,10 @@ abstract class ClusterDeployStrategy { companion object { val DEFAULT_WAIT_FOR_INSTANCES_UP: Duration = Duration.ofMinutes(30) + const val RED_BLACK_STRATEGY = "red-black" + const val HIGHLANDER_STRATEGY = "highlander" + const val NONE_STRATEGY = "none" + const val ROLLING_PUSH_STRATEGY = "rolling-push" } } @@ -28,7 +32,7 @@ data class RedBlack( // The order of this list is important for pauseTime based staggers override val stagger: List = emptyList() ) : ClusterDeployStrategy() { - override val strategy = "red-black" + override val strategy = RED_BLACK_STRATEGY override val isStaggered: Boolean get() = stagger.isNotEmpty() @@ -37,13 +41,13 @@ data class RedBlack( data class Highlander( override val health: DeployHealth = AUTO ) : ClusterDeployStrategy() { - override val strategy = "highlander" + override val strategy = HIGHLANDER_STRATEGY } data class NoStrategy( override val health: DeployHealth = AUTO ): ClusterDeployStrategy() { - override val strategy = "none" + override val strategy = NONE_STRATEGY } data class RollingPush( @@ -54,7 +58,7 @@ data class RollingPush( val totalRelaunches: Int? = null, val terminationOrder: TerminationOrder? = null ): ClusterDeployStrategy() { - override val strategy = "rolling-push" + override val strategy = ROLLING_PUSH_STRATEGY } enum class TerminationOrder { diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ManagedRolloutConfig.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ManagedRolloutConfig.kt new file mode 100644 index 0000000000..356bcfbb70 --- /dev/null +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ManagedRolloutConfig.kt @@ -0,0 +1,17 @@ +package com.netflix.spinnaker.keel.api + + +/** + * When managed rollout is enabled, we will deploy with a ManagedRollout stage instead of + * the normal deploy stage. + */ +data class ManagedRolloutConfig( + val enabled: Boolean = false, + val selectionStrategy: SelectionStrategy? = null +) + +// duplication of com.netflix.buoy.sdk.model.SelectionStrategy +// so that we don't add another dependency into this module +enum class SelectionStrategy { + ALPHABETICAL, OFF_PEAK +} diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Moniker.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Moniker.kt index 5845294836..c4e41e0c6e 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Moniker.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Moniker.kt @@ -15,4 +15,19 @@ data class Moniker( detail == null -> "$app-$stack" else -> "$app-${stack.orEmpty()}-$detail" } + + /** + * @return The [Moniker] with an updated [Moniker.detail] field containing as much of the specified + * [suffix] as possible while respecting max length constraints on resource names. + */ + fun withSuffix(suffix: String, maxNameLength: Int = 32): Moniker { + // calculates the truncation point in the detail field based on how many characters are left of the + // max name length after removing the current detail and accounting for empty stack and detail (which + // cause extra dashes to be added to the name) + var truncateAt = (maxNameLength - toName().length - suffix.length - 1) + if (stack == null) --truncateAt + if (detail == null) --truncateAt else truncateAt += detail!!.length + val updatedDetail = listOfNotNull(detail?.take(truncateAt), suffix).joinToString("-") + return copy(detail = updatedDetail) + } } diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Resource.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Resource.kt index 3eb4dfab78..df396cf9cf 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Resource.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/Resource.kt @@ -49,6 +49,21 @@ data class Resource( else -> null } + /** + * Adds the specified [suffix] to the resource [id] and all properties of the [spec] derived from it. + */ + fun deepRename(suffix: String): Resource { + val updatedSpec = spec.deepRename(suffix) + return copy( + spec = updatedSpec as T, + metadata = metadata + mapOf( + // this is so the resource ID is updated with the new name (which is in the spec) + "id" to updatedSpec.id, + "application" to application + ) + ) + } + // TODO: this is kinda dirty, but because we add uid to the metadata when persisting we don't really want to consider it in equality checks override fun equals(other: Any?): Boolean { if (this === other) return true diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ResourceSpec.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ResourceSpec.kt index f55f20fad8..51dc6fb290 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ResourceSpec.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ResourceSpec.kt @@ -10,8 +10,7 @@ interface ResourceSpec { * form the fully-qualified resource id. * * This can be a property that is part of the spec, or derived from other properties. If the - * latter remember to annotate the overridden property with - * [com.fasterxml.jackson.annotation.JsonIgnore]. + * latter, remember to annotate the overridden property with [com.fasterxml.jackson.annotation.JsonIgnore]. */ val id: String @@ -30,4 +29,14 @@ interface ResourceSpec { * other fields. */ val displayName: String + + /** + * Applies the given [suffix] to the resource [id], and to all aggregate properties of the spec + * whose names are derived from the [id]. + * + * @return a copy of the original [ResourceSpec] with the modified identifiers. + */ + @JvmDefault + fun deepRename(suffix: String): ResourceSpec = + throw TODO("Not implemented") } diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/TaskLauncher.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/TaskLauncher.kt index d1091688a6..0a2e8a76d3 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/TaskLauncher.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/TaskLauncher.kt @@ -16,21 +16,24 @@ interface TaskLauncher { resource = resource, description = description, correlationId = correlationId, - stages = listOf(job) + stages = listOf(job), + artifactVersion = null ) suspend fun submitJob( resource: Resource<*>, description: String, correlationId: String, - stages: List + stages: List, + artifactVersion: String? = null ): Task fun submitJobAsync( resource: Resource<*>, description: String, correlationId: String, - stages: List> + stages: List>, + artifactVersion: String? = null ): CompletableFuture suspend fun submitJob( @@ -43,7 +46,8 @@ interface TaskLauncher { correlationId: String? = null, stages: List, artifacts: List> = emptyList(), - parameters: Map = emptyMap() + parameters: Map = emptyMap(), + artifactVersion: String? = null ): Task = submitJob( user = user, @@ -56,7 +60,8 @@ interface TaskLauncher { stages = stages, type = SubjectType.CONSTRAINT, artifacts = artifacts, - parameters = parameters + parameters = parameters, + artifactVersion = artifactVersion ) /** @@ -76,7 +81,8 @@ interface TaskLauncher { stages: List, type: SubjectType, artifacts: List> = emptyList(), - parameters: Map = emptyMap() + parameters: Map = emptyMap(), + artifactVersion: String? = null ): Task suspend fun correlatedTasksRunning(correlationId: String): Boolean @@ -85,4 +91,9 @@ interface TaskLauncher { * @return The [TaskExecution] matching the [taskId]. */ suspend fun getTaskExecution(taskId: String): TaskExecution + + /** + * Cancels the given tasks as the provided user identity + */ + suspend fun cancelTasks(taskIds: List, user: String) } diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/DeliveryArtifact.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/DeliveryArtifact.kt index 27e36d34c8..e7333e6ad0 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/DeliveryArtifact.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/DeliveryArtifact.kt @@ -104,6 +104,9 @@ abstract class DeliveryArtifact { /** Filters for the artifact origin in source control. */ open val from: ArtifactOriginFilter? = null + /** Whether this artifact was created for a preview environment. */ + open val isPreview: Boolean = false + @get:ExcludedFromDiff val filteredByBranch: Boolean get() = from?.branch != null @@ -155,5 +158,14 @@ abstract class DeliveryArtifact { .map { (it.spec as? ArtifactReferenceProvider)?.artifactReference } .contains(reference) + /** + * returns the resource ids using the artifact in the environment + */ + fun resourcesUsing(environment: Environment) = + environment + .resources + .filter { reference == (it.spec as? ArtifactReferenceProvider)?.artifactReference } + .map { it.id } + override fun toString() = "${type.toUpperCase()} artifact $name (ref: $reference)" } diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/PublishedArtifact.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/PublishedArtifact.kt index 547d4e7998..2834d8691d 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/PublishedArtifact.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/artifacts/PublishedArtifact.kt @@ -35,6 +35,7 @@ data class PublishedArtifact( name: String, type: String, version: String, + reference: String? = null, status: ArtifactStatus? = null, createdAt: Instant? = null, gitMetadata: GitMetadata? = null, @@ -43,7 +44,7 @@ data class PublishedArtifact( ) : this( name = name, type = type.toLowerCase(), - reference = name, + reference = reference?: name, version = version, metadata = (metadata ?: emptyMap()) + mapOf( "releaseStatus" to status?.name, diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/constraints/ConstraintRepository.kt b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/constraints/ConstraintRepository.kt index e75a6507ae..be1e04ff72 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/constraints/ConstraintRepository.kt +++ b/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/constraints/ConstraintRepository.kt @@ -25,7 +25,7 @@ interface ConstraintRepository { fun getConstraintStateById(uid: UID): ConstraintState? - fun deleteConstraintState(deliveryConfigName: String, environmentName: String, type: String) + fun deleteConstraintState(deliveryConfigName: String, environmentName: String, reference: String, version: String, type: String): Int fun constraintStateFor(application: String): List diff --git a/keel-artifact/src/main/kotlin/com/netflix/spinnaker/keel/artifacts/ArtifactListener.kt b/keel-artifact/src/main/kotlin/com/netflix/spinnaker/keel/artifacts/ArtifactListener.kt index 09e66be90f..0c25c8bb65 100644 --- a/keel-artifact/src/main/kotlin/com/netflix/spinnaker/keel/artifacts/ArtifactListener.kt +++ b/keel-artifact/src/main/kotlin/com/netflix/spinnaker/keel/artifacts/ArtifactListener.kt @@ -90,7 +90,7 @@ class ArtifactListener( launch { val lastStoredVersions = repository.artifactVersions(artifact, artifactRefreshConfig.limit) val currentVersions = lastStoredVersions.map { it.version } - log.debug("Last recorded versions of $artifact: $currentVersions") + log.debug("Last recorded versions of $artifact: $currentVersions") val artifactSupplier = artifactSuppliers.supporting(artifact.type) val latestAvailableVersions = artifactSupplier.getLatestArtifacts(artifact.deliveryConfig, artifact, artifactRefreshConfig.limit) diff --git a/keel-clouddriver/src/main/kotlin/com/netflix/spinnaker/keel/clouddriver/model/ActiveServerGroup.kt b/keel-clouddriver/src/main/kotlin/com/netflix/spinnaker/keel/clouddriver/model/ActiveServerGroup.kt index a894a02eda..a9bacc5507 100644 --- a/keel-clouddriver/src/main/kotlin/com/netflix/spinnaker/keel/clouddriver/model/ActiveServerGroup.kt +++ b/keel-clouddriver/src/main/kotlin/com/netflix/spinnaker/keel/clouddriver/model/ActiveServerGroup.kt @@ -245,7 +245,8 @@ data class LaunchTemplateData( val keyName: String, val iamInstanceProfile: IamInstanceProfile, val monitoring: InstanceMonitoring, - val ramDiskId: String? + val ramDiskId: String?, + val metadataOptions: Map = emptyMap() ) data class IamInstanceProfile( diff --git a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandlerTests.kt b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandlerTests.kt index fc2d7ad81b..a42f2287fb 100644 --- a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandlerTests.kt +++ b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandlerTests.kt @@ -1,18 +1,28 @@ package com.netflix.spinnaker.keel.api.plugins import com.netflix.spinnaker.keel.api.ComputeResourceSpec +import com.netflix.spinnaker.keel.api.Moniker import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.ResourceDiff +import com.netflix.spinnaker.keel.api.actuation.Job +import com.netflix.spinnaker.keel.api.actuation.Task +import com.netflix.spinnaker.keel.api.actuation.TaskLauncher import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.core.serverGroup import com.netflix.spinnaker.time.MutableClock -import dev.minutest.junit.JUnit5Minutests -import dev.minutest.rootContext -import io.mockk.clearAllMocks +import io.mockk.coEvery import io.mockk.every import io.mockk.mockk import kotlinx.coroutines.runBlocking +import org.junit.jupiter.api.Test +import strikt.api.expect import strikt.api.expectThat +import strikt.assertions.hasSize +import strikt.assertions.isA +import strikt.assertions.isEqualTo import strikt.assertions.isFalse +import strikt.assertions.isNotEmpty +import strikt.assertions.isNull import strikt.assertions.isTrue import java.time.Clock @@ -20,77 +30,315 @@ abstract class BaseClusterHandlerTests< SPEC: ComputeResourceSpec<*>, // spec type RESOLVED: Any, // resolved type HANDLER : BaseClusterHandler - > : JUnit5Minutests { + > { abstract fun createSpyHandler( resolvers: List>, clock: Clock, - eventPublisher: EventPublisher): HANDLER + eventPublisher: EventPublisher, + taskLauncher: TaskLauncher, + ): HANDLER + abstract fun getSingleRegionCluster(): Resource - abstract fun getRegions(resource: Resource): List abstract fun getMultiRegionCluster(): Resource - abstract fun getDiffInMoreThanEnabled(): ResourceDiff> - abstract fun getDiffOnlyInEnabled(): ResourceDiff> + abstract fun getMultiRegionStaggeredDeployCluster(): Resource + abstract fun getMultiRegionManagedRolloutCluster(): Resource + + abstract fun getRegions(resource: Resource): List + abstract fun getDiffInMoreThanEnabled(resource: Resource): ResourceDiff> + + abstract fun getDiffOnlyInEnabled(resource: Resource): ResourceDiff> + abstract fun getDiffInCapacity(resource: Resource): ResourceDiff> + abstract fun getDiffInImage(resource: Resource, version: String? = null): ResourceDiff> + abstract fun getCreateAndModifyDiff(resource: Resource): ResourceDiff> + abstract fun getDiffForRollback(resource: Resource, version: String, currentMoniker: Moniker): ResourceDiff> + abstract fun getDiffForRollbackPlusCapacity(resource: Resource, version: String, currentMoniker: Moniker): ResourceDiff> + + abstract fun getRollbackServerGroupsByRegion(resource: Resource, version: String, rollbackMoniker: Moniker): Map> + abstract fun getRollbackServerGroupsByRegionZeroCapacity(resource: Resource, version: String, rollbackMoniker: Moniker): Map> val clock: Clock = MutableClock() - val eventPublisher: EventPublisher = mockk() + val eventPublisher: EventPublisher = mockk(relaxUnitFun = true) val resolvers: List> = emptyList() + val taskLauncher: TaskLauncher = mockk() data class Fixture, RESOLVED: Any, HANDLER : BaseClusterHandler>( val handler: HANDLER ) - fun test() = rootContext> { - fixture{ - Fixture( - // create spy handler here so we can test only base cluster logic, not handler - // specific logic - createSpyHandler(resolvers = resolvers, clock = clock, eventPublisher = eventPublisher), - ) + val handler by lazy { + // we create a spy handler so that we can override the results of functions + // without having to set up every little bit of cloud specific data + createSpyHandler( + resolvers = resolvers, + clock = clock, + eventPublisher = eventPublisher, + taskLauncher = taskLauncher, + ) + } + + @Test + fun `handler will take action if diff is in more than enabled`() { + val resource = getSingleRegionCluster() + val diff = getDiffInMoreThanEnabled(resource) + val response = runBlocking { handler.willTakeAction(resource, diff) } + expectThat(response.willAct).isTrue() + } + + @Test + fun `handler will take action if enabled diff and all regions are healthy`() { + every { handler.getUnhealthyRegionsForActiveServerGroup(any()) } returns emptyList() + val resource = getSingleRegionCluster() + val diff = getDiffOnlyInEnabled(resource) + val response = runBlocking { handler.willTakeAction(resource, diff) } + expectThat(response.willAct).isTrue() + } + + @Test + fun `handler will NOT take action if enabled diff and all regions are NOT healthy`() { + every { handler.getUnhealthyRegionsForActiveServerGroup(any()) } returns getRegions(getSingleRegionCluster()) + + val resource = getSingleRegionCluster() + val diff = getDiffOnlyInEnabled(resource) + val response = runBlocking { handler.willTakeAction(resource, diff) } + expectThat(response.willAct).isFalse() + } + + @Test + fun `staggered deploy, multi region, image diff`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() // done this way so we can capture the stages for multiple requests + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + val resource = getMultiRegionStaggeredDeployCluster() + runBlocking { handler.upsert(resource, getDiffInImage(resource)) } + + val firstRegionStages = slots[0] + val secondRegionStages = slots[1] + expect { + // first region + that(firstRegionStages).isNotEmpty().hasSize(2) + val deployStage1 = firstRegionStages[0] + that(deployStage1["type"]).isEqualTo("createServerGroup") + that(deployStage1["refId"]).isEqualTo("1") + that(deployStage1["requisiteRefIds"]).isNull() + val waitStage = firstRegionStages[1] + that(waitStage["type"]).isEqualTo("wait") + that(waitStage["refId"]).isEqualTo("2") + that(waitStage["requisiteStageRefIds"] as? List<*>).isEqualTo(listOf("1")) + + //second region + that(secondRegionStages).isNotEmpty().hasSize(2) + val dependsOnExecutionStage = secondRegionStages[0] + that(dependsOnExecutionStage["type"]).isEqualTo("dependsOnExecution") + that(dependsOnExecutionStage["refId"]).isEqualTo("1") + that(dependsOnExecutionStage["requisiteRefIds"]).isNull() + val deployStage2 = secondRegionStages[1] + that(deployStage2["type"]).isEqualTo("createServerGroup") + that(deployStage2["refId"]).isEqualTo("2") + that(deployStage2["requisiteStageRefIds"] as? List<*>).isEqualTo(listOf("1")) + } + } + + @Test + fun `staggered deploy, multi region, capacity diff (no stagger resize stages)`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() // done this way so we can capture the stages for multiple requests + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + val resource = getMultiRegionStaggeredDeployCluster() + runBlocking { handler.upsert(resource, getDiffInCapacity(resource)) } + + val region1Stages = slots[0] + val region2Stages = slots[1] + val stages = slots.associate { + it[0]["region"] to it[0] + } + + expect { + that(region1Stages).isNotEmpty().hasSize(1) + val resizeEast = stages["east"] as Map + that(resizeEast["type"]).isEqualTo("resizeServerGroup") + that(resizeEast["refId"]).isEqualTo("1") + that(resizeEast["requisiteRefIds"]).isNull() + that(resizeEast["region"]).isEqualTo("east") + + that(region2Stages).isNotEmpty().hasSize(1) + val resizeWest = stages["west"] as Map + that(resizeWest["type"]).isEqualTo("resizeServerGroup") + that(resizeWest["refId"]).isEqualTo("1") + that(resizeWest["requisiteRefIds"]).isNull() + that(resizeWest["region"]).isEqualTo("west") + } + } + + @Test + fun `non staggered deploy, multi region, image diff`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() // done this way so we can capture the stages for multiple requests + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + val resource = getMultiRegionCluster() + runBlocking { handler.upsert(resource, getDiffInImage(resource)) } + + val firstRegionStages = slots[0] + val secondRegionStages = slots[1] + expect { + // first region + that(firstRegionStages).isNotEmpty().hasSize(1) + val deployStage1 = firstRegionStages[0] + that(deployStage1["type"]).isEqualTo("createServerGroup") + that(deployStage1["refId"]).isEqualTo("1") + + //second region + that(secondRegionStages).isNotEmpty().hasSize(1) + val deployStage2 = secondRegionStages[0] + that(deployStage2["type"]).isEqualTo("createServerGroup") + that(deployStage2["refId"]).isEqualTo("1") + } + } + + @Test + fun `non staggered deploy, one region, capacity diff`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + val resource = getSingleRegionCluster() + runBlocking { handler.upsert(resource, getDiffInCapacity(resource)) } + expect { + that(slots.size).isEqualTo(1) + val stages = slots[0] + that(stages.size).isEqualTo(1) + that(stages.first()["type"]).isEqualTo("resizeServerGroup") + that(stages.first()["refId"]).isEqualTo("1") + } + } + + @Test + fun `non staggered deploy, one region, image diff`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + val resource = getSingleRegionCluster() + runBlocking { handler.upsert(resource, getDiffInImage(resource)) } + expect { + that(slots.size).isEqualTo(1) + val stages = slots[0] + that(stages.size).isEqualTo(1) + that(stages.first()["type"]).isEqualTo("createServerGroup") + that(stages.first()["refId"]).isEqualTo("1") + } + } + + @Test + fun `managed rollout image diff`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots)) } returns Task("id", "name") + + val resource = getMultiRegionManagedRolloutCluster() + runBlocking { handler.upsert(resource, getDiffInImage(resource)) } + val stages = slots[0] + expect { + that(slots.size).isEqualTo(1) + that(stages.size).isEqualTo(1) + val managedRolloutStage = stages.first() + that(managedRolloutStage["type"]).isEqualTo("managedRollout") + that(managedRolloutStage["refId"]).isEqualTo("1") + that(managedRolloutStage["input"]).isA>() + } + } + + @Test + fun `managed rollout image diff plus capacity change`() { + coEvery { handler.getServerGroupsByRegion(any()) } returns emptyMap() + + val slots = mutableListOf>() + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + val resource = getMultiRegionManagedRolloutCluster() + runBlocking { handler.upsert(resource, getCreateAndModifyDiff(resource)) } + val firstTask = slots[0] + val secondTask = slots[1] + expect { + that(slots.size).isEqualTo(2) + that(firstTask).isNotEmpty().hasSize(1) + that(secondTask).isNotEmpty().hasSize(1) + val modifyStage = firstTask.first() + that(modifyStage["type"]).isEqualTo("resizeServerGroup") + that(modifyStage["refId"]).isEqualTo("1") + val managedRolloutStage = secondTask.first() + that(managedRolloutStage["type"]).isEqualTo("managedRollout") + that(managedRolloutStage["refId"]).isEqualTo("1") + val targets = (managedRolloutStage["input"] as Map)["targets"] as List> + that(targets).hasSize(1) } + } - after { - clearAllMocks() + @Test + fun `will rollback to a given server group`() { + val resource = getSingleRegionCluster() + val version = "sha:222" + val currentMoniker = resource.spec.moniker.copy(sequence = 2) + val rollbackMoniker = resource.spec.moniker.copy(sequence = 1) + coEvery { handler.getServerGroupsByRegion(resource) } returns + getRollbackServerGroupsByRegion(resource, version, rollbackMoniker) + + val slots = mutableListOf>() + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + runBlocking { handler.upsert(resource, getDiffForRollback(resource, version, currentMoniker)) } + + val stages = slots[0] + expect { + that(slots.size).isEqualTo(1) + that(stages.size).isEqualTo(1) + val rollbackStage = stages.first() + that(rollbackStage["type"]).isEqualTo("rollbackServerGroup") + that(rollbackStage["rollbackContext"]).isA>() + val rollbackContext = rollbackStage["rollbackContext"] as Map + that(rollbackContext["rollbackServerGroupName"]).isEqualTo(currentMoniker.serverGroup) + that(rollbackContext["restoreServerGroupName"]).isEqualTo(rollbackMoniker.serverGroup) } + } - context("testing whether handler will take action") { - context("diff in more then just too many server groups enabled") { - test("handler will take action") { - val resource = getSingleRegionCluster() - val diff = getDiffInMoreThanEnabled() - val response = runBlocking { handler.willTakeAction(resource, diff) } - expectThat(response.willAct).isTrue() - } - } - - context("diff only in number of server groups enabled") { - context("all regions healthy") { - before { - every { handler.getUnhealthyRegionsForActiveServerGroup(any()) } returns listOf() - } - test("handler will take action") { - val resource = getSingleRegionCluster() - val diff = getDiffOnlyInEnabled() - val response = runBlocking { handler.willTakeAction(resource, diff) } - expectThat(response.willAct).isTrue() - } - } - - context("one region unhealthy") { - before { - every { handler.getUnhealthyRegionsForActiveServerGroup(any()) } returns getRegions(getSingleRegionCluster()) - } - test("handler will not take action") { - val resource = getSingleRegionCluster() - val diff = getDiffOnlyInEnabled() - val response = runBlocking { handler.willTakeAction(resource, diff) } - expectThat(response.willAct).isFalse() - } - } - } + @Test + fun `rollback to disabled server group with wrong capacity`() { + // the rollback tasks fixes the capacity + val resource = getSingleRegionCluster() + val version = "sha:222" + val currentMoniker = resource.spec.moniker.copy(sequence = 2) + val rollbackMoniker = resource.spec.moniker.copy(sequence = 1) + coEvery { handler.getServerGroupsByRegion(resource) } returns + getRollbackServerGroupsByRegionZeroCapacity(resource, version, rollbackMoniker) + + val slots = mutableListOf>() + coEvery { taskLauncher.submitJob(any(), any(), any(), capture(slots), any()) } returns Task("id", "name") + + runBlocking { handler.upsert(resource, getDiffForRollbackPlusCapacity(resource, version, currentMoniker)) } + + val stages = slots[0] + expect { + that(slots.size).isEqualTo(1) + that(stages.size).isEqualTo(1) + val rollbackStage = stages.first() + that(rollbackStage["type"]).isEqualTo("rollbackServerGroup") + that(rollbackStage["rollbackContext"]).isA>() + val rollbackContext = rollbackStage["rollbackContext"] as Map + that(rollbackContext["rollbackServerGroupName"]).isEqualTo(currentMoniker.serverGroup) + that(rollbackContext["restoreServerGroupName"]).isEqualTo(rollbackMoniker.serverGroup) } } } + diff --git a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ApproveOldVersionTests.kt b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ApproveOldVersionTests.kt index 295f09c714..22037991e4 100644 --- a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ApproveOldVersionTests.kt +++ b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ApproveOldVersionTests.kt @@ -105,7 +105,8 @@ abstract class ApproveOldVersionTests : JUnit5Minutests { environmentConstraintRunner, publisher, ArtifactConfig(), - springEnv + springEnv, + MutableClock() ) val artifact = DebianArtifact( diff --git a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepositoryTests.kt b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepositoryTests.kt index 66c9bdf6a6..a0bccadcc7 100644 --- a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepositoryTests.kt +++ b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepositoryTests.kt @@ -48,6 +48,7 @@ import strikt.api.expectThrows import strikt.assertions.all import strikt.assertions.contains import strikt.assertions.containsExactlyInAnyOrder +import strikt.assertions.filterNot import strikt.assertions.first import strikt.assertions.flatMap import strikt.assertions.hasSize @@ -161,6 +162,8 @@ abstract class DeliveryConfigRepositoryTests { @@ -21,8 +26,9 @@ abstract class TaskTrackingRepositoryTests { val subject by lazy { factory(clock) } - val taskRecord1 = TaskRecord("123", "Upsert server group", RESOURCE, randomString(), randomString(), randomString()) - val taskRecord2 = TaskRecord("456", "Bake", RESOURCE, randomString(), null, null) + val taskRecord1 = TaskRecord("1", "Upsert server group", RESOURCE, randomString(), randomString(), randomString(), "v1") + val taskRecord2 = TaskRecord("2", "Bake", RESOURCE, randomString(), null, null, "v2") + val taskRecord3 = TaskRecord("3", "Upsert server group", RESOURCE, "app", "env", "resource", "v3") @AfterEach fun cleanup() { @@ -55,4 +61,87 @@ abstract class TaskTrackingRepositoryTests { subject.updateStatus(taskRecord1.id, SUCCEEDED) expectThat(subject.getIncompleteTasks()).isEmpty() } + + @Test + fun `fetching by batch works`() { + // we fetch a singe batch of tasks + + subject.store(taskRecord3.copy(id = "1", name = "upsert1", artifactVersion = "v1")) + clock.tickMinutes(2) + subject.updateStatus("1", SUCCEEDED) + clock.tickMinutes(2) + + subject.store(taskRecord3.copy(id = "4", name = "upsert2", artifactVersion = "v2")) + clock.tickSeconds(1) + subject.store(taskRecord3.copy(id = "5", name = "upsert3", artifactVersion = "v2")) + clock.tickSeconds(1) + subject.store(taskRecord3.copy(id = "6", name = "upsert4", artifactVersion = "v2")) + + clock.tickMinutes(2) + subject.updateStatus("4", TERMINAL) + + //since the second 'wave' of tasks has one failed task, we fetch that whole wave + val tasks = subject.getLatestBatchOfTasks("resource") + expectThat(tasks).hasSize(3) + expectThat(tasks.map { it.id }).containsExactlyInAnyOrder("4", "5", "6") + } + + @Test + fun `fetching empty batch works`() { + expectCatching { subject.getLatestBatchOfTasks("resource") } + .isSuccess() + .isEmpty() + } + + @Test + fun `fetching by batch includes only tasks started within 30s for the same version`() { + for (i in 1..5) { + val id = "$i" + subject.store( + TaskRecord( + id = id, + name = "($i)Upsert server group", + subjectType = RESOURCE, + application = "app", + environmentName = "env", + resourceId = "resource", + artifactVersion = "v1" + ) + ) + clock.tickMinutes(5) // task runs for 5 minutes + subject.updateStatus(id, SUCCEEDED) + clock.tickMinutes(6) + } + + val tasks = subject.getLatestBatchOfTasks("resource") + expectThat(tasks).hasSize(1) + expectThat(tasks.map { it.artifactVersion }).containsExactlyInAnyOrder("v1") + expectThat(tasks.map { it.id }).containsExactlyInAnyOrder("5") + } + + @Test + fun `given 40 sequentially completed tasks, we only return the latest task because it doesn't have any others in its batch`() { + for (i in 1..40) { + val id = "$i" + subject.store( + TaskRecord( + id = id, + name = "($i)Upsert server group", + subjectType = RESOURCE, + application = "app", + environmentName = "env", + resourceId = "resource", + artifactVersion = id + ) + ) + clock.tickMinutes(5) // task runs for 5 minutes + subject.updateStatus(id, SUCCEEDED) + clock.tickDays(1) + clock.tickMinutes((1L..1000L).random()) + } + + val tasks = subject.getLatestBatchOfTasks("resource") + expectThat(tasks).hasSize(1) + expectThat(tasks.map { it.id }).containsExactlyInAnyOrder("40") + } } diff --git a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolverTests.kt b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolverTests.kt new file mode 100644 index 0000000000..399e8eec4d --- /dev/null +++ b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolverTests.kt @@ -0,0 +1,272 @@ +package com.netflix.spinnaker.keel.rollout + +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.ResourceKind +import com.netflix.spinnaker.keel.api.ResourceSpec +import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder +import com.netflix.spinnaker.keel.events.ResourceState.Diff +import com.netflix.spinnaker.keel.events.ResourceState.Ok +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.rollout.RolloutStatus.FAILED +import com.netflix.spinnaker.keel.rollout.RolloutStatus.IN_PROGRESS +import com.netflix.spinnaker.keel.rollout.RolloutStatus.NOT_STARTED +import com.netflix.spinnaker.keel.rollout.RolloutStatus.SKIPPED +import com.netflix.spinnaker.keel.rollout.RolloutStatus.SUCCESSFUL +import com.netflix.spinnaker.keel.test.resource +import io.mockk.mockk +import io.mockk.verify +import org.junit.jupiter.api.Test +import strikt.api.Assertion +import strikt.api.expectThat +import io.mockk.coEvery as every + +abstract class RolloutAwareResolverTests> { + abstract val kind: ResourceKind + abstract val spec: SPEC + abstract val previousEnvironmentSpec: SPEC + abstract val nonExistentResolvedResource: RESOLVED + + abstract fun createResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + resourceToCurrentState: suspend (Resource) -> RESOLVED, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher + ): RESOLVER + + abstract fun SPEC.withFeatureApplied(): SPEC + abstract fun SPEC.withFeatureNotApplied(): SPEC + abstract fun SPEC.toResolvedType(featureActive: Boolean): RESOLVED + abstract fun Assertion.Builder>.featureIsApplied(): Assertion.Builder> + abstract fun Assertion.Builder>.featureIsNotApplied(): Assertion.Builder> + + private val dependentEnvironmentFinder: DependentEnvironmentFinder = mockk() + private val resourceToCurrentState: suspend (Resource) -> RESOLVED = mockk() + private val featureRolloutRepository: FeatureRolloutRepository = mockk(relaxUnitFun = true) { + every { rolloutStatus(any(), any()) } returns (NOT_STARTED to 0) + } + private val eventPublisher: EventPublisher = mockk(relaxUnitFun = true) + + private fun SPEC.toResource() = resource( + kind = kind, + spec = this + ) + + private val resolver by lazy { + createResolver( + dependentEnvironmentFinder, + resourceToCurrentState, + featureRolloutRepository, + eventPublisher + ) + } + + @Test + fun `activates the feature if not specified and there are no previous environments`() { + val resource = spec.toResource() + + // the cluster currently uses v1 + every { resourceToCurrentState(resource) } returns spec.toResolvedType(false) + + // there are no previous environments to consider + every { dependentEnvironmentFinder.resourceStatusesInDependentEnvironments(any()) } returns emptyMap() + every { dependentEnvironmentFinder.resourcesOfSameKindInDependentEnvironments(any>()) } returns emptyList() + + expectThat(resolver(resource)).featureIsApplied() + + verify { featureRolloutRepository.markRolloutStarted(resolver.featureName, resource.id) } + } + + @Test + fun `leaves setting alone if it is explicitly specified`() { + // there are no previous environments to consider + every { dependentEnvironmentFinder.resourceStatusesInDependentEnvironments(any()) } returns emptyMap() + every { dependentEnvironmentFinder.resourcesOfSameKindInDependentEnvironments(any>()) } returns emptyList() + + val resource = spec.withFeatureNotApplied().toResource() + + expectThat(resolver(resource)).featureIsNotApplied() + + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify { featureRolloutRepository.updateStatus(resolver.featureName, resource.id, SKIPPED) } + } + + @Test + fun `activates the feature if the resource is already using it`() { + val resource = spec.toResource() + + // the cluster currently uses v2 + every { resourceToCurrentState(resource) } returns spec.toResolvedType(true) + + expectThat(resolver(resource)).featureIsApplied() + + // this is not considered starting a rollout + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + + // we take this as confirmation the rollout worked + verify { featureRolloutRepository.updateStatus(resolver.featureName, resource.id, SUCCESSFUL) } + } + + @Test + fun `does not activate the feature if a previous environment is unstable`() { + val resource = spec.toResource() + + // the cluster currently uses v2 + every { resourceToCurrentState(resource) } returns spec.toResolvedType(false) + + // resources in the previous environment are not in a stable state + every { + dependentEnvironmentFinder.resourceStatusesInDependentEnvironments(any()) + } returns listOf(previousEnvironmentSpec.toResource()).associate { it.id to Diff } + + expectThat(resolver(resource)).featureIsNotApplied() + + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify { featureRolloutRepository.updateStatus(resolver.featureName, resource.id, NOT_STARTED) } + } + + @Test + fun `uses v2 if this is a new cluster regardless of the state of any preceding ones`() { + val resource = spec.toResource() + + // this cluster doesn't even exist yet + every { resourceToCurrentState(resource) } returns nonExistentResolvedResource + + // resources in the previous environment are not in a stable state (e.g. whole app is being created) + every { + dependentEnvironmentFinder.resourceStatusesInDependentEnvironments(any()) + } returns listOf(previousEnvironmentSpec.toResource()).associate { it.id to Diff } + + expectThat(resolver(resource)).featureIsApplied() + + // this isn't really a rollout, but we still want to track success in case we have to roll it back + verify { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify { eventPublisher.publishEvent(ofType()) } + } + + @Test + fun `does not apply v2 if v2 has not been rolled out to a previous environment`() { + val resource = spec.toResource() + val previousEnvironmentResource = previousEnvironmentSpec.toResource() + + // the cluster currently uses v1 + every { resourceToCurrentState(resource) } returns spec.toResolvedType(false) + + // the previous environment is in a stable state… + every { + dependentEnvironmentFinder.resourceStatusesInDependentEnvironments((any())) + } returns listOf(previousEnvironmentResource).associate { it.id to Ok } + + // … but its clusters are also still using v1 + every { + dependentEnvironmentFinder.resourcesOfSameKindInDependentEnvironments(any>()) + } returns listOf(previousEnvironmentResource) + every { resourceToCurrentState(previousEnvironmentResource) } returns previousEnvironmentSpec.toResolvedType(false) + + expectThat(resolver(resource)).featureIsNotApplied() + + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify { featureRolloutRepository.updateStatus(resolver.featureName, resource.id, NOT_STARTED) } + } + + @Test + fun `applies v2 if v2 has successfully been rolled out to a previous environment`() { + val resource = spec.toResource() + val previousEnvironmentResource = previousEnvironmentSpec.toResource() + + // the cluster currently uses v1 + every { resourceToCurrentState(spec.toResource()) } returns spec.toResolvedType(false) + + // the previous environment is in a stable state… + every { + dependentEnvironmentFinder.resourceStatusesInDependentEnvironments((any())) + } returns listOf(previousEnvironmentResource).associate { it.id to Ok } + every { + dependentEnvironmentFinder.resourcesOfSameKindInDependentEnvironments(any>()) + } returns listOf(previousEnvironmentResource) + + // … and its clusters are already upgraded to v2 + every { resourceToCurrentState(previousEnvironmentResource) } returns previousEnvironmentSpec.toResolvedType(true) + + expectThat(resolver(resource)).featureIsApplied() + + verify { featureRolloutRepository.markRolloutStarted(resolver.featureName, resource.id) } + } + + @Test + fun `stops rollout if it has been attempted before and seemingly not worked`() { + val resource = spec.toResource() + + // a rollout was attempted before, but the cluster is still using v1 (e.g. failed to start with v2) + every { featureRolloutRepository.rolloutStatus(resolver.featureName, resource.id) } returns (IN_PROGRESS to 1) + every { resourceToCurrentState(spec.toResource()) } returns spec.toResolvedType(false) + + // there are no previous environments to consider + every { dependentEnvironmentFinder.resourceStatusesInDependentEnvironments(any()) } returns emptyMap() + every { dependentEnvironmentFinder.resourcesOfSameKindInDependentEnvironments(any>()) } returns emptyList() + + // the rollout is NOT attempted again + expectThat(resolver(resource)).featureIsNotApplied() + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify(exactly = 0) { eventPublisher.publishEvent(ofType()) } + verify { featureRolloutRepository.updateStatus(resolver.featureName, resource.id, FAILED) } + + // … and we emit an event to indicate it may not be working + verify { eventPublisher.publishEvent(FeatureRolloutFailed(resolver.featureName, resource.id)) } + } + + @Test + fun `applies v2 if it has been successfully applied before, but the current state has gone out of sync`() { + val resource = spec.toResource() + + // a rollout was attempted before, but the cluster is still using v1 (e.g. failed to start with v2) + every { featureRolloutRepository.rolloutStatus(resolver.featureName, resource.id) } returns (SUCCESSFUL to 1) + every { resourceToCurrentState(spec.toResource()) } returns spec.toResolvedType(false) + + // we know it's safe to use V2 + expectThat(resolver(resource)).featureIsApplied() + + // this is not a new rollout so we don't update the database or trigger events + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify(exactly = 0) { eventPublisher.publishEvent(any()) } + verify(exactly = 0) { featureRolloutRepository.updateStatus(any(), any(), any()) } + } + + @Test + fun `does not apply v2 if it has been unsuccessfully applied before`() { + val resource = spec.toResource() + + // a rollout was attempted before, but the cluster is still using v1 (e.g. failed to start with v2) + every { featureRolloutRepository.rolloutStatus(resolver.featureName, resource.id) } returns (FAILED to 1) + every { resourceToCurrentState(spec.toResource()) } returns spec.toResolvedType(false) + + // we know it's not safe to use V2 + expectThat(resolver(resource)).featureIsNotApplied() + + // this is not a new rollout so we don't update the database or trigger events + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify(exactly = 0) { eventPublisher.publishEvent(any()) } + verify(exactly = 0) { featureRolloutRepository.updateStatus(any(), any(), any()) } + } + + @Test + fun `records success if a rollout fails initially but the user fixes it`() { + val resource = spec.toResource() + + // a rollout was attempted before and failed + every { featureRolloutRepository.rolloutStatus(resolver.featureName, resource.id) } returns (FAILED to 1) + + // but the user has fixed things so the feature has been applied + every { resourceToCurrentState(spec.toResource()) } returns spec.toResolvedType(true) + + // we know it's safe to use V2 + expectThat(resolver(resource)).featureIsApplied() + + // this is not a new rollout + verify(exactly = 0) { featureRolloutRepository.markRolloutStarted(any(), any()) } + verify(exactly = 0) { eventPublisher.publishEvent(any()) } + + // but we should now record that it's successful + verify { featureRolloutRepository.updateStatus(resolver.featureName, resource.id, SUCCESSFUL) } + } +} diff --git a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/time/MutableClock.kt b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/time/MutableClock.kt index 108a69de12..a918179427 100644 --- a/keel-core-test/src/main/kotlin/com/netflix/spinnaker/time/MutableClock.kt +++ b/keel-core-test/src/main/kotlin/com/netflix/spinnaker/time/MutableClock.kt @@ -77,6 +77,8 @@ class MutableClock( fun tickMinutes(minutes: Long) = incrementBy(Duration.ofMinutes(minutes)).let { instant } + fun tickDays(days: Long) = incrementBy(Duration.ofDays(days)).let { instant } + fun tickHours(hours: Long) = incrementBy(Duration.ofHours(hours)).let { instant } fun instant(newInstant: Instant) { diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/config/ArtifactCheckConfig.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/config/ArtifactCheckConfig.kt new file mode 100644 index 0000000000..da8f41311b --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/config/ArtifactCheckConfig.kt @@ -0,0 +1,10 @@ +package com.netflix.spinnaker.config + +import org.springframework.boot.context.properties.ConfigurationProperties + +@ConfigurationProperties(prefix = "keel.artifact-check") +class ArtifactCheckConfig : BaseSchedulerConfig() { + // only uses properties from the BaseSchedulerConfig, + // but this is here to give a separate prefix for overriding the values + // via fast property or in the config file. +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/config/BaseSchedulerConfig.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/config/BaseSchedulerConfig.kt index d7b029f753..214ce2867e 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/config/BaseSchedulerConfig.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/config/BaseSchedulerConfig.kt @@ -7,6 +7,6 @@ import java.time.Duration */ open class BaseSchedulerConfig { var minAgeDuration: Duration = Duration.ofMinutes(1) - var batchSize: Int = 1 + var batchSize: Int = 5 var timeoutDuration: Duration = Duration.ofMinutes(2) } diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/config/EnvironmentCheckConfig.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/config/EnvironmentCheckConfig.kt new file mode 100644 index 0000000000..be44f15d5a --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/config/EnvironmentCheckConfig.kt @@ -0,0 +1,10 @@ +package com.netflix.spinnaker.config + +import org.springframework.boot.context.properties.ConfigurationProperties + +@ConfigurationProperties(prefix = "keel.environment-check") +class EnvironmentCheckConfig : BaseSchedulerConfig() { + // only uses properties from the BaseSchedulerConfig, + // but this is here to give a separate prefix for overriding the values + // via fast property or in the config file. +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/BaseActionRunner.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/BaseActionRunner.kt index 51aa832a43..ffbf5d3a1f 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/BaseActionRunner.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/BaseActionRunner.kt @@ -75,31 +75,36 @@ abstract class BaseActionRunner { suspend fun runFor(context: ArtifactInEnvironmentContext) { with(context) { val statuses = getActions() + .also { log.debug("Checking status for ${context.shortName()}: $it") } .map { action -> action to latestStatus(context, action) } + log.debug("Status for ${context.shortName()}: $statuses") + if (actionBlocked(context)) { - log.debug("${logSubject()} is blocked, skipping.") + log.debug("${logSubject()} is blocked for ${shortName()}, skipping.") incrementBlockedCounter(context) return } if (runInSeries() && statuses.anyStillRunning) { - log.debug("${logSubject()} already running for environment {} of application {}", environment.name, deliveryConfig.application) + log.debug("${logSubject()} already running for ${context.shortName()}") return } statuses.firstOutstanding?.let { action -> + log.debug("Starting action ${action.type} ${action.id} for $context") start(context, action) publishStartEvent(context, action) - } ?: log.debug("${logSubject()} complete for environment {} of application {}", environment.name, deliveryConfig.application) + } ?: log.debug("${logSubject()} complete for ${context.shortName()}") } } private suspend fun latestStatus(context: ArtifactInEnvironmentContext, action: T): ConstraintStatus? { val oldState = getPreviousState(context, action) + log.debug("Old state for ${context.shortName()}: $oldState") val newState = if (oldState?.status == PENDING) { evaluate(context, action, oldState) .also { newState -> diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/CheckScheduler.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/CheckScheduler.kt index 099036cc12..79e27cac54 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/CheckScheduler.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/CheckScheduler.kt @@ -2,6 +2,8 @@ package com.netflix.spinnaker.keel.actuation import com.netflix.spectator.api.BasicTag import com.netflix.spectator.api.Registry +import com.netflix.spinnaker.config.ArtifactCheckConfig +import com.netflix.spinnaker.config.EnvironmentCheckConfig import com.netflix.spinnaker.config.EnvironmentDeletionConfig import com.netflix.spinnaker.config.EnvironmentVerificationConfig import com.netflix.spinnaker.config.PostDeployActionsConfig @@ -53,6 +55,8 @@ import kotlin.math.max EnvironmentDeletionConfig::class, EnvironmentVerificationConfig::class, PostDeployActionsConfig::class, + EnvironmentCheckConfig::class, + ArtifactCheckConfig::class ) @Component class CheckScheduler( @@ -64,6 +68,8 @@ class CheckScheduler( private val artifactHandlers: Collection, private val postDeployActionRunner: PostDeployActionRunner, private val resourceCheckConfig: ResourceCheckConfig, + private val environmentCheckConfig: EnvironmentCheckConfig, + private val artifactCheckConfig: ArtifactCheckConfig, private val verificationConfig: EnvironmentVerificationConfig, private val postDeployConfig: PostDeployActionsConfig, private val environmentDeletionConfig: EnvironmentDeletionConfig, @@ -94,6 +100,21 @@ class CheckScheduler( private val checkMinAge: Duration get() = springEnv.getProperty("keel.check.min-age-duration", Duration::class.java, resourceCheckConfig.minAgeDuration) + private val resourceBatchSize: Int + get() = springEnv.getProperty("keel.resource-check.batch-size", Int::class.java, resourceCheckConfig.batchSize) + + private val environmentBatchSize: Int + get() = springEnv.getProperty("keel.environment-check.batch-size", Int::class.java, environmentCheckConfig.batchSize) + + private val artifactBatchSize: Int + get() = springEnv.getProperty("keel.artifact-check.batch-size", Int::class.java, artifactCheckConfig.batchSize) + + private val verificationBatchSize: Int + get() = springEnv.getProperty("keel.verification.batch-size", Int::class.java, verificationConfig.batchSize) + + private val postDeployBatchSize: Int + get() = springEnv.getProperty("keel.post-deploy.batch-size", Int::class.java, postDeployConfig.batchSize) + @Scheduled(fixedDelayString = "\${keel.resource-check.frequency:PT1S}") fun checkResources() { if (enabled.get()) { @@ -102,7 +123,7 @@ class CheckScheduler( supervisorScope { runCatching { repository - .resourcesDueForCheck(checkMinAge, resourceCheckConfig.batchSize) + .resourcesDueForCheck(checkMinAge, resourceBatchSize) } .onFailure { publisher.publishEvent(ResourceLoadFailed(it)) @@ -142,7 +163,7 @@ class CheckScheduler( val job = launch(blankMDC) { supervisorScope { repository - .deliveryConfigsDueForCheck(checkMinAge, resourceCheckConfig.batchSize) + .deliveryConfigsDueForCheck(checkMinAge, environmentBatchSize) .forEach { try { /** @@ -152,7 +173,7 @@ class CheckScheduler( * TODO: consider refactoring environmentPromotionChecker so that it can be called for * individual environments, allowing fairer timeouts. */ - withTimeout(resourceCheckConfig.timeoutDuration.toMillis() * max(it.environments.size, 1)) { + withTimeout(environmentCheckConfig.timeoutDuration.toMillis() * max(it.environments.size, 1)) { launch { environmentPromotionChecker.checkEnvironments(it) } } } catch (e: TimeoutCancellationException) { @@ -178,7 +199,7 @@ class CheckScheduler( val job = launch(blankMDC) { supervisorScope { environmentDeletionRepository - .itemsDueForCheck(checkMinAge, resourceCheckConfig.batchSize) + .itemsDueForCheck(checkMinAge, environmentBatchSize) .forEach { try { withTimeout(environmentDeletionConfig.check.timeoutDuration.toMillis()) { @@ -203,10 +224,10 @@ class CheckScheduler( publisher.publishEvent(ScheduledArtifactCheckStarting) val job = launch(blankMDC) { supervisorScope { - repository.artifactsDueForCheck(checkMinAge, resourceCheckConfig.batchSize) + repository.artifactsDueForCheck(checkMinAge, artifactBatchSize) .forEach { artifact -> try { - withTimeout(resourceCheckConfig.timeoutDuration.toMillis()) { + withTimeout(artifactCheckConfig.timeoutDuration.toMillis()) { launch { artifactHandlers.forEach { handler -> handler.handle(artifact) @@ -235,7 +256,7 @@ class CheckScheduler( val job = launch(blankMDC) { supervisorScope { repository - .nextEnvironmentsForVerification(verificationConfig.minAgeDuration, verificationConfig.batchSize) + .nextEnvironmentsForVerification(verificationConfig.minAgeDuration, verificationBatchSize) .forEach { try { withTimeout(verificationConfig.timeoutDuration.toMillis()) { @@ -270,7 +291,7 @@ class CheckScheduler( val job = launch(blankMDC) { supervisorScope { repository - .nextEnvironmentsForPostDeployAction(postDeployConfig.minAgeDuration, postDeployConfig.batchSize) + .nextEnvironmentsForPostDeployAction(postDeployConfig.minAgeDuration, postDeployBatchSize) .forEach { try { withTimeout(postDeployConfig.timeoutDuration.toMillis()) { diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionChecker.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionChecker.kt index 6593b19eb2..5d00d9a614 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionChecker.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionChecker.kt @@ -8,6 +8,8 @@ import com.netflix.spinnaker.keel.api.artifacts.DeliveryArtifact import com.netflix.spinnaker.keel.api.constraints.ConstraintStatus.PASS import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactVetoes import com.netflix.spinnaker.keel.core.api.PinnedEnvironment +import com.netflix.spinnaker.keel.core.api.PromotionStatus.CURRENT +import com.netflix.spinnaker.keel.core.api.PromotionStatus.DEPLOYING import com.netflix.spinnaker.keel.persistence.KeelRepository import com.netflix.spinnaker.keel.telemetry.ArtifactVersionApproved import com.netflix.spinnaker.keel.telemetry.EnvironmentCheckComplete @@ -31,7 +33,7 @@ class EnvironmentPromotionChecker( private val publisher: ApplicationEventPublisher, private val artifactConfig: ArtifactConfig, private val springEnv: SpringEnvironment, - private val clock: Clock = Clock.systemDefaultZone() + private val clock: Clock ) { private val log by lazy { LoggerFactory.getLogger(javaClass) } @@ -82,6 +84,7 @@ class EnvironmentPromotionChecker( val pinnedVersion = pinnedEnvs.versionFor(environment.name, artifact) // approve version first to fast track deployment approveVersion(deliveryConfig, artifact, pinnedVersion!!, environment) + triggerResourceRecheckForPinnedVersion(deliveryConfig, artifact, pinnedVersion!!, environment) // then evaluate constraints constraintRunner.checkEnvironment(envContext) } else { @@ -121,6 +124,12 @@ class EnvironmentPromotionChecker( if (versionSelected == null) { log.warn("No version of {} passes constraints for environment {}", artifact, environment.name) } + triggerResourceRecheckForVetoedVersion( + deliveryConfig, + artifact, + environment, + vetoedArtifacts[envPinKey(environment.name, artifact)] + ) } } else { log.debug("Skipping checks for {} as it is not used in environment {}", artifact, environment.name) @@ -139,13 +148,42 @@ class EnvironmentPromotionChecker( } } + private fun triggerResourceRecheckForVetoedVersion( + deliveryConfig: DeliveryConfig, + artifact: DeliveryArtifact, + targetEnvironment: Environment, + vetoedArtifacts: EnvironmentArtifactVetoes? + ) { + if (vetoedArtifacts == null) return + val currentVersion = repository.getCurrentlyDeployedArtifactVersion(deliveryConfig, artifact, targetEnvironment.name)?.version + if (vetoedArtifacts.versions.map { it.version }.contains(currentVersion)) { + log.info("Triggering recheck for environment ${targetEnvironment.name} of application ${deliveryConfig.application} that is currently on a vetoed version of ${artifact.reference}") + // trigger a recheck of the resources if the current version is vetoed + repository.triggerResourceRecheck(targetEnvironment.name, deliveryConfig.application) + } + } + + private fun triggerResourceRecheckForPinnedVersion( + deliveryConfig: DeliveryConfig, + artifact: DeliveryArtifact, + version: String, + targetEnvironment: Environment + ) { + val status = repository.getArtifactPromotionStatus(deliveryConfig, artifact, version, targetEnvironment.name) + if (status !in listOf(CURRENT, DEPLOYING)) { + log.info("Triggering recheck for pinned environment ${targetEnvironment.name} of application ${deliveryConfig.application} that are on the wrong version. Pinned version $version of ${artifact.reference}") + // trigger a recheck of the resources if the version isn't already on its way to the environment + repository.triggerResourceRecheck(targetEnvironment.name, deliveryConfig.application) + } + } + private fun approveVersion( deliveryConfig: DeliveryConfig, artifact: DeliveryArtifact, version: String, targetEnvironment: Environment ) { - log.debug("Approving version $version of ${artifact.type} artifact ${artifact.name} in environment ${targetEnvironment.name}") + log.debug("Approving application ${deliveryConfig.application} version $version of ${artifact.reference} in environment ${targetEnvironment.name}") val isNewVersion = repository .approveVersionFor(deliveryConfig, artifact, version, targetEnvironment.name) if (isNewVersion) { diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCanceler.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCanceler.kt new file mode 100644 index 0000000000..5ccafb3465 --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCanceler.kt @@ -0,0 +1,86 @@ +package com.netflix.spinnaker.keel.actuation + +import com.netflix.spinnaker.keel.api.actuation.TaskLauncher +import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactPin +import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactVeto +import com.netflix.spinnaker.keel.persistence.KeelRepository +import com.netflix.spinnaker.keel.persistence.TaskTrackingRepository +import kotlinx.coroutines.runBlocking +import org.slf4j.LoggerFactory +import org.springframework.stereotype.Component + +/** + * Handles finding and canceling relevent in flight tasks when a user takes a pin or veto action. + */ +@Component +class EnvironmentTaskCanceler( + val taskTrackingRepository: TaskTrackingRepository, + val keelRepository: KeelRepository, + val taskLauncher: TaskLauncher +) { + + private val log by lazy { LoggerFactory.getLogger(javaClass) } + + /** + * When a user vetos a version they want that version gone from the environment as quickly as possible. + * + * This function finds any in flight tasks that are deploying the vetoed version to the relevant + * resources, and cancels them. + */ + fun cancelTasksForVeto( + application: String, + veto: EnvironmentArtifactVeto, + user: String + ) { + val inFlightTasks = taskTrackingRepository.getInFlightTasks(application, veto.targetEnvironment) + val relevantResources: List = getRelevantResourceIds(application, veto.targetEnvironment, veto.reference) + // for a veto, we want to cancel tasks that are deploying the vetoed version + val tasksToCancel = inFlightTasks + .filter { it.resourceId in relevantResources && it.artifactVersion == veto.version } + .map { it.id } + + log.info("Canceling tasks $tasksToCancel in application $application because of a veto: {}", veto) + + cancelTasks(tasksToCancel, user) + } + + /** + * When a user pins they want that version deployed as quickly as possible. + * + * This function finds any in flight tasks that are deploying _different_ versions to the relevant + * resources, and cancels them. + */ + fun cancelTasksForPin( + application: String, + pin: EnvironmentArtifactPin, + user: String + ) { + val inFlightTasks = taskTrackingRepository.getInFlightTasks(application, pin.targetEnvironment) + val relevantResources: List = getRelevantResourceIds(application, pin.targetEnvironment, pin.reference) + + // for a pin, we want to cancel tasks that are NOT deploying the pinned version + val tasksToCancel = inFlightTasks + .filter { it.resourceId in relevantResources && it.artifactVersion != pin.version } + .map { it.id } + + log.info("Canceling tasks $tasksToCancel in application $application because of a pin: {}", pin) + + cancelTasks(tasksToCancel, user) + } + + fun getRelevantResourceIds( + application: String, + environmentName: String, + artifactReference: String + ): List { + val config = keelRepository.getDeliveryConfigForApplication(application) + val env = config.environmentNamed(environmentName) + return config.matchingArtifactByReference(artifactReference)?.resourcesUsing(env) ?: emptyList() + } + + private fun cancelTasks(taskIds: List, user: String) { + if (taskIds.isNotEmpty()) { + runBlocking { taskLauncher.cancelTasks(taskIds, user) } + } + } +} diff --git a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/ExecutionSummaryService.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/ExecutionSummaryService.kt similarity index 96% rename from keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/ExecutionSummaryService.kt rename to keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/ExecutionSummaryService.kt index 4374fa2941..05aec25d03 100644 --- a/keel-api/src/main/kotlin/com/netflix/spinnaker/keel/api/actuation/ExecutionSummaryService.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/actuation/ExecutionSummaryService.kt @@ -1,4 +1,4 @@ -package com.netflix.spinnaker.keel.api.actuation +package com.netflix.spinnaker.keel.actuation import com.netflix.spinnaker.keel.api.TaskStatus import java.time.Instant diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandler.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandler.kt index c0fe27da9d..c39c66fbf4 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandler.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/plugins/BaseClusterHandler.kt @@ -1,12 +1,24 @@ package com.netflix.spinnaker.keel.api.plugins +import com.netflix.spinnaker.keel.api.ClusterDeployStrategy import com.netflix.spinnaker.keel.api.ComputeResourceSpec +import com.netflix.spinnaker.keel.api.Moniker import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.ResourceDiff -import com.netflix.spinnaker.keel.api.ResourceSpec +import com.netflix.spinnaker.keel.api.actuation.Job import com.netflix.spinnaker.keel.api.actuation.Task import com.netflix.spinnaker.keel.api.actuation.TaskLauncher +import com.netflix.spinnaker.keel.core.orcaClusterMoniker +import com.netflix.spinnaker.keel.core.serverGroup +import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.diff.toIndividualDiffs +import com.netflix.spinnaker.keel.orca.dependsOn +import com.netflix.spinnaker.keel.orca.restrictedExecutionWindow +import com.netflix.spinnaker.keel.orca.waitStage +import kotlinx.coroutines.Deferred +import kotlinx.coroutines.async +import kotlinx.coroutines.coroutineScope +import kotlinx.coroutines.runBlocking /** * Common cluster functionality. @@ -24,16 +36,52 @@ abstract class BaseClusterHandler, RESOLVED: Any>( */ abstract fun getDesiredRegion(diff: ResourceDiff): String + /** + * parses the desired account from a resource diff + */ + abstract fun getDesiredAccount(diff: ResourceDiff): String + + + abstract fun ResourceDiff.moniker(): Moniker + + abstract fun RESOLVED.moniker(): Moniker + /** * returns true if the diff is only in whether there are too many clusters enabled */ abstract fun ResourceDiff.isEnabledOnly(): Boolean + /** + * return true if diff is only in capacity + */ + abstract fun ResourceDiff.isCapacityOnly(): Boolean + + /** + * return true if diff is only that the server group is disabled + * + * This is only relevant for ec2, because disabled server groups have + * the scaling processes []Launch, AddToLoadBalancer, Terminate] suspended. + * So by default this method returns false to indicate that the diff is + * not ignorable. + */ + open fun ResourceDiff.isSuspendPropertiesAndCapacityOnly() = false + + /** + * return true if diff is only in autoscaling + */ + abstract fun ResourceDiff.isAutoScalingOnly(): Boolean + /** * returns a list of regions where the active server group is unhealthy */ abstract fun getUnhealthyRegionsForActiveServerGroup(resource: Resource): List + /** + * Generates a list of possible correlation ids used to check for running executions + */ + fun generateCorrelationIds(resource: Resource): List = + resource.regions().map { "${resource.id}:$it" } + "${resource.id}:managed-rollout" + override suspend fun willTakeAction( resource: Resource, resourceDiff: ResourceDiff> @@ -72,14 +120,14 @@ abstract class BaseClusterHandler, RESOLVED: Any>( mapOf( "type" to "destroyServerGroup", "asgName" to serverGroup.name, - "moniker" to serverGroup.moniker, + "moniker" to serverGroup.moniker.orcaClusterMoniker, "serverGroupName" to serverGroup.name, "region" to region, "credentials" to resource.spec.locations.account, "cloudProvider" to cloudProvider, "user" to resource.serviceAccount, - // the following 3 properties just say "halt this branch of the pipeline" - "completeOtherBranchesThenFail" to false, + // the following 3 properties just say "try all parallel branches but fail the pipeline if any branch fails" + "completeOtherBranchesThenFail" to true, "continuePipeline" to false, "failPipeline" to false, ) @@ -94,7 +142,8 @@ abstract class BaseClusterHandler, RESOLVED: Any>( description = "Delete cluster ${resource.name} in account ${resource.spec.locations.account}" + " (${regions.joinToString()})", correlationId = "${resource.id}:delete", - stages = stages + stages = stages, + artifactVersion = null ) ) } @@ -108,4 +157,389 @@ abstract class BaseClusterHandler, RESOLVED: Any>( * gets current state of the resource and returns the current image, by region. */ abstract suspend fun getImage(resource: Resource): CurrentImages + + /** + * return a list of stages that resize the server group based on the diff + */ + abstract fun ResourceDiff.resizeServerGroupJob(): Job + + /** + * return a list of stages that modify the scaling policy based on the diff + */ + abstract fun ResourceDiff.modifyScalingPolicyJob(startingRefId: Int = 0): List + + /** + * return the jobs needed to upsert a server group w/o using managed rollout + */ + abstract fun ResourceDiff.upsertServerGroupJob(resource: Resource, startingRefId: Int = 0, version: String? = null): Job + + abstract suspend fun getServerGroupsByRegion(resource: Resource): Map> + + abstract fun ResourceDiff.rollbackServerGroupJob(resource: Resource, rollbackServerGroup: RESOLVED): Job + + /** + * return the job needed to upsert a server group using managed rollout + */ + abstract fun Resource.upsertServerGroupManagedRolloutJob(diffs: List>, version: String? = null): Job + + /** + * return the version that will be deployed, represented as an appversion or a tag or a sha + */ + abstract fun ResourceDiff.version(resource: Resource): String + + /** + * return a list of jobs that disable the oldest server group + */ + abstract fun ResourceDiff.disableOtherServerGroupJob( + resource: Resource, + desiredVersion: String + ): Job + + fun accountRegionString(diff: ResourceDiff): String = + "${getDesiredAccount(diff)}/${getDesiredRegion(diff)}" + + fun accountRegionString(resource: Resource, diffs: List>): String = + "${resource.account()}/${diffs.map { getDesiredRegion(it) }.joinToString(",")}" + + fun ResourceDiff.capacityOnlyMessage(): String = + "Resize server group ${moniker()} in " + + accountRegionString(this) + + fun ResourceDiff.autoScalingOnlyMessage(): String = + "Modify auto-scaling of server group ${moniker()} in " + + accountRegionString(this) + + fun ResourceDiff.enabledOnlyMessage(job: Job): String = + "Disable extra active server group ${job["asgName"]} in " + + accountRegionString(this) + + fun ResourceDiff.capacityAndAutoscalingMessage(): String= + "Modify capacity and auto-scaling of server group ${moniker()} in " + + accountRegionString(this) + + fun ResourceDiff.upsertMessage(version: String): String = + "Deploy $version to server group ${moniker()} in " + + accountRegionString(this) + + fun Resource.upsertManagedRolloutMessage(version: String, diffs: List>): String = + "Deploy $version to cluster ${moniker()} in " + + accountRegionString(this, diffs) + " using a managed rollout" + + fun ResourceDiff.rollbackMessage(version: String, rollbackServerGroup: RESOLVED) = + "Rolling back cluster ${moniker()} to $version in ${accountRegionString(this)} (disabling ${current?.moniker()?.serverGroup}, enabling ${rollbackServerGroup.moniker().serverGroup})" + + abstract fun correlationId(resource: Resource, diff: ResourceDiff): String + abstract fun Resource.isStaggeredDeploy(): Boolean + abstract fun Resource.isManagedRollout(): Boolean + abstract fun Resource.regions(): List + abstract fun Resource.moniker(): Moniker + abstract fun Resource.account(): String + abstract fun ResourceDiff.hasScalingPolicies(): Boolean + abstract fun ResourceDiff.isCapacityOrAutoScalingOnly(): Boolean + + /** + * Checks each region to see if there is a valid server group to roll back to, returns it if so. + */ + @Suppress("UNCHECKED_CAST") + suspend fun List>.getRollbackServerGroupsByRegion(resource: Resource): Map { + val serverGroupsByRegion = getServerGroupsByRegion(resource) + + return serverGroupsByRegion.mapValues { regionalList -> + val region = regionalList.key + val regionalServerGroups = regionalList.value + val result = find { getDesiredRegion(it) == region }?.let { regionalDiff -> + regionalServerGroups.firstOrNull { + val diff = DefaultResourceDiff(regionalDiff.desired, it) + !diff.hasChanges() || diff.isIgnorableForRollback() + } + } + result + }.filterValues { it != null } as Map + } + + // rollback task fixes capacity + private fun ResourceDiff.isIgnorableForRollback() = + isCapacityOnly() || isSuspendPropertiesAndCapacityOnly() + + /** + * @return `true` if [current] exists and the diff includes a scaling policy change. + */ + abstract fun ResourceDiff.hasScalingPolicyDiff(): Boolean + + /** + * @return `true` if [current] doesn't exist and desired includes a scaling policy. + */ + fun ResourceDiff.shouldDeployAndModifyScalingPolicies(): Boolean = + (current == null && hasScalingPolicies()) || + (current != null && !isCapacityOrAutoScalingOnly() && hasScalingPolicyDiff()) + + abstract fun Resource.getDeployWith(): ClusterDeployStrategy + + /** + * For titus, a deploying sha can be associated with more than one tag. + * For ec2, this is irrelevant. + * + * override this function if more than one 'version' needs to be marked as deploying + * when a cluster deploy happens. + */ + open fun ResourceDiff.getDeployingVersions(resource: Resource): Set = + setOf(version(resource)) + + /** + * consolidates the general orchestration logic to the top level, and delegates the cloud-specific bits + * to the individual cluster handlers. + */ + override suspend fun upsert(resource: Resource, resourceDiff: ResourceDiff>): List = + coroutineScope { + val diffs = resourceDiff + .toIndividualDiffs() + .filter { diff -> diff.hasChanges() } + + val deferred: MutableList> = mutableListOf() + + val rollbackServerGroups = diffs.getRollbackServerGroupsByRegion(resource) + + val modifyDiffs = diffs + .filter { + it.isCapacityOrAutoScalingOnly() || it.isEnabledOnly() || it.isCapacityOnly() || rollbackServerGroups[getDesiredRegion(it)] != null + } + val createDiffs = diffs - modifyDiffs + + if (modifyDiffs.isNotEmpty()) { + deferred.addAll( + modifyInPlace(resource, modifyDiffs, rollbackServerGroups) + ) + } + + val version = diffs.first().version(resource) + + if (resource.isStaggeredDeploy() && createDiffs.isNotEmpty()) { + val tasks = upsertStaggered(resource, createDiffs, version) + return@coroutineScope tasks + deferred.map { it.await() } + } + + // if managed rollout, do an upsert managed rollout stage. + if (resource.isManagedRollout() && createDiffs.isNotEmpty()) { + val task = upsertManagedRollout(resource, createDiffs, version) + return@coroutineScope listOf(task) + deferred.map { it.await() } + } + + deferred.addAll( + upsertUnstaggered(resource, createDiffs, version) + ) + + if (createDiffs.isNotEmpty()) { + val versions = createDiffs.map { it.getDeployingVersions(resource) }.flatten().toSet() + notifyArtifactDeploying(resource, versions) + } + + return@coroutineScope deferred.map { it.await() } + } + + /** + * Modifies an existing server group instead of launching a new server group. + * This either modifies the enabled server group, or enables a disabled server group. + */ + suspend fun modifyInPlace( + resource: Resource, + diffs: List>, + rollbackServerGroups: Map + ): List> = + coroutineScope { + diffs.mapNotNull { diff -> + val rollbackServerGroup = rollbackServerGroups[getDesiredRegion(diff)] + val (job, description) = when { + diff.isCapacityOnly() -> listOf(diff.resizeServerGroupJob()) to diff.capacityOnlyMessage() + diff.isAutoScalingOnly() -> diff.modifyScalingPolicyJob() to diff.autoScalingOnlyMessage() + diff.isEnabledOnly() -> { + val appVersion = diff.version(resource) + val job = diff.disableOtherServerGroupJob(resource, appVersion) + listOf(job) to diff.enabledOnlyMessage(job) + } + rollbackServerGroup != null -> { + listOf(diff.rollbackServerGroupJob(resource, rollbackServerGroup)) to diff.rollbackMessage(diff.version(resource), rollbackServerGroup) + } + else -> listOf(diff.resizeServerGroupJob()) + diff.modifyScalingPolicyJob(1) to diff.capacityAndAutoscalingMessage() + } + + if (job.isEmpty()) { + null + } else { + log.info("Modifying server group in-place using task: {}", job) + + async { + taskLauncher.submitJob( + resource = resource, + description = description, + correlationId = correlationId(resource, diff), + stages = job, + artifactVersion = diff.version(resource) + ) + } + } + } + } + + /** + * Deploys a new server group + */ + suspend fun upsertUnstaggered( + resource: Resource, + diffs: List>, + version: String, + dependsOn: String? = null + ): List> = + coroutineScope { + diffs.mapNotNull { diff -> + val stages: MutableList> = mutableListOf() + var refId = 0 + + if (dependsOn != null) { + stages.add(dependsOn(dependsOn)) + refId++ + } + + stages.add(diff.upsertServerGroupJob(resource, refId, version)) + refId++ + + if (diff.shouldDeployAndModifyScalingPolicies()) { + stages.addAll(diff.modifyScalingPolicyJob(refId)) + } + + if (stages.isEmpty()) { + null + } else { + log.info("Upsert server group using task: {}", stages) + + async { + taskLauncher.submitJob( + resource = resource, + description = diff.upsertMessage(version), + correlationId = correlationId(resource, diff), + stages = stages, + artifactVersion = diff.version(resource) + ) + } + } + } + } + + suspend fun upsertStaggered( + resource: Resource, + diffs: List>, + version: String + ): List = + coroutineScope { + val regionalDiffs = diffs.associateBy { getDesiredRegion(it) } + val tasks: MutableList = mutableListOf() + var priorExecutionId: String? = null + val staggeredRegions = resource.getDeployWith().stagger.map { + it.region + } + .toSet() + + // If any, these are deployed in-parallel after all regions with a defined stagger + val unstaggeredRegions = regionalDiffs.keys - staggeredRegions + + for (stagger in resource.getDeployWith().stagger) { + if (!regionalDiffs.containsKey(stagger.region)) { + continue + } + + val diff = regionalDiffs[stagger.region] as ResourceDiff + val stages: MutableList> = mutableListOf() + var refId = 0 + + /** + * Given regions staggered as [A, B, C], this makes the execution of the B + * `createServerGroup` task dependent on the A task, and C dependent on B, + * while preserving the unstaggered behavior of an orca task per region. + */ + if (priorExecutionId != null) { + stages.add(dependsOn(priorExecutionId)) + refId++ + } + + val stage = diff.upsertServerGroupJob(resource, refId).toMutableMap() + + refId++ + + /** + * If regions are staggered by time windows, add a `restrictedExecutionWindow` + * to the `createServerGroup` stage. + */ + if (stagger.hours != null) { + val hours = stagger.hours!!.split("-").map { it.toInt() } + stage.putAll(restrictedExecutionWindow(hours[0], hours[1])) + } + + stages.add(stage) + + if (diff.shouldDeployAndModifyScalingPolicies()) { + stages.addAll(diff.modifyScalingPolicyJob(refId)) + } + + if (stagger.pauseTime != null) { + stages.add( + waitStage(stagger.pauseTime!!, stages.size) + ) + } + + val deferred = async { + taskLauncher.submitJob( + resource = resource, + description = diff.upsertMessage(version), + correlationId = correlationId(resource, diff), + stages = stages, + artifactVersion = diff.version(resource) + ) + } + + notifyArtifactDeploying(resource, diff.getDeployingVersions(resource)) + + val task = deferred.await() + priorExecutionId = task.id + tasks.add(task) + } + + /** + * `ClusterSpec.stagger` doesn't have to define a stagger for all of the regions clusters. + * If a cluster deploys into 4 regions [A, B, C, D] but only defines a stagger for [A, B], + * [C, D] will deploy in parallel after the completion of B and any pauseTime it defines. + */ + if (unstaggeredRegions.isNotEmpty()) { + val unstaggeredDiffs = regionalDiffs + .filter { unstaggeredRegions.contains(it.key) } + .map { it.value } + + tasks.addAll( + upsertUnstaggered(resource, unstaggeredDiffs, version, priorExecutionId) + .map { it.await() } + ) + } + + return@coroutineScope tasks + } + + suspend fun upsertManagedRollout( + resource: Resource, + diffs: List>, + version: String + ): Task { + val stages = listOf(resource.upsertServerGroupManagedRolloutJob(diffs, version).toMutableMap()) + log.info("Upsert server group using managed rollout and task: {}", stages) + + return taskLauncher.submitJob( + resource = resource, + description = resource.upsertManagedRolloutMessage(version, diffs), + correlationId = "${resource.id}:managed-rollout", + stages = stages + ) + } + + fun notifyArtifactDeploying(resource: Resource, versions: Set) { + versions.forEach { version -> + notifyArtifactDeploying(resource, version) + } + } } diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ConstraintRepositoryBridge.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ConstraintRepositoryBridge.kt index 99c822e880..6b41f45e70 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ConstraintRepositoryBridge.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ConstraintRepositoryBridge.kt @@ -43,8 +43,8 @@ class ConstraintRepositoryBridge( return keelRepository.getConstraintStateById(uid) } - override fun deleteConstraintState(deliveryConfigName: String, environmentName: String, type: String) { - return keelRepository.deleteConstraintState(deliveryConfigName, environmentName, type) + override fun deleteConstraintState(deliveryConfigName: String, environmentName: String, reference: String, version: String, type: String): Int { + return keelRepository.deleteConstraintState(deliveryConfigName, environmentName, reference, version, type) } override fun constraintStateFor(application: String): List { diff --git a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionUtils.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ExecutionConstructionUtils.kt similarity index 100% rename from keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionUtils.kt rename to keel-core/src/main/kotlin/com/netflix/spinnaker/keel/api/support/ExecutionConstructionUtils.kt diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/auth/AuthorizationSupport.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/auth/AuthorizationSupport.kt index ac45d84c2b..34b919847c 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/auth/AuthorizationSupport.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/auth/AuthorizationSupport.kt @@ -108,18 +108,7 @@ class AuthorizationSupport( DELIVERY_CONFIG -> repository.getDeliveryConfig(identifier).application else -> throw InvalidRequestException("Invalid target type ${target.name} for application permission check") } - AuthenticatedRequest.allowAnonymous { - permissionEvaluator.hasPermission(auth, application, "APPLICATION", action.name) - }.also { allowed -> - log.debug( - "[ACCESS {}] User {}: {} access to application {}.", - allowed.toAuthorization(), auth.principal, action.name, application - ) - - if (!allowed) { - throw AccessDeniedException("User ${auth.principal} does not have access to application $application") - } - } + checkPermission(auth, application, "APPLICATION", action.name) } } @@ -138,18 +127,7 @@ class AuthorizationSupport( APPLICATION -> repository.getDeliveryConfigForApplication(identifier).serviceAccount DELIVERY_CONFIG -> repository.getDeliveryConfig(identifier).serviceAccount } - AuthenticatedRequest.allowAnonymous { - permissionEvaluator.hasPermission(auth, serviceAccount, "SERVICE_ACCOUNT", "ignored") - }.also { allowed -> - log.debug( - "[ACCESS {}] User {}: access to service account {}.", - allowed.toAuthorization(), auth.principal, serviceAccount - ) - - if (!allowed) { - throw AccessDeniedException("User ${auth.principal} does not have access to service account $serviceAccount") - } - } + checkPermission(auth, serviceAccount, "SERVICE_ACCOUNT", "ACCESS") } } @@ -176,18 +154,7 @@ class AuthorizationSupport( locatableResources.forEach { val locations = (it.spec as Locatable<*>).locations val account = (locations as AccountAwareLocations<*>).account - AuthenticatedRequest.allowAnonymous { - permissionEvaluator.hasPermission(auth, account, "ACCOUNT", action.name) - }.also { allowed -> - log.debug( - "[ACCESS {}] User {}: {} access to cloud account {}.", - allowed.toAuthorization(), auth.principal, action.name, account - ) - - if (!allowed) { - throw AccessDeniedException("User ${auth.principal} does not have access to cloud account $account") - } - } + checkPermission(auth, account, "ACCOUNT", action.name) } } } @@ -202,6 +169,23 @@ class AuthorizationSupport( } } + /** + * Ensures the user (as determined by the passed in [Authentication]) has the specified permission to the + * specified resource. + */ + private fun checkPermission(authentication: Authentication, resourceName: String, resourceType: String, permission: String) { + val user = AuthenticatedRequest.getSpinnakerUser().orElse("unknown") + val allowed = AuthenticatedRequest.allowAnonymous { + permissionEvaluator.hasPermission(authentication, resourceName, resourceType, permission) + } + + log.debug("[ACCESS ${allowed.toAuthorization()}] User $user: $permission permission to $resourceType $resourceName.") + if (!allowed) { + throw AccessDeniedException( + "User $user does not have ${permission.humanFriendly()} permission to ${resourceType.humanFriendly()} $resourceName") + } + } + private fun passes(authorizationCheck: () -> Unit) = try { authorizationCheck() @@ -211,4 +195,6 @@ class AuthorizationSupport( } private fun Boolean.toAuthorization() = if (this) "ALLOWED" else "DENIED" + + private fun String.humanFriendly() = this.toLowerCase().replace('_', ' ') } diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/core/api/SubmittedDeliveryConfig.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/core/api/SubmittedDeliveryConfig.kt index 4589ad493a..62ddeb1cde 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/core/api/SubmittedDeliveryConfig.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/core/api/SubmittedDeliveryConfig.kt @@ -60,6 +60,15 @@ data class SubmittedEnvironment( @Description("Optional locations that are propagated to any [resources] where they are not specified.") val locations: SubnetAwareLocations? = null ) { + // We declare the metadata field here such that it's not used in equals() and hashCode(), since we don't + // care about the metadata when comparing environments. + val metadata: MutableMap = mutableMapOf() + + fun addMetadata(vararg metadata: Pair) = + apply { + this.metadata.putAll(metadata) + } + fun toEnvironment(serviceAccount: String? = null) = Environment( name = name, resources = resources.mapTo(mutableSetOf()) { resource -> @@ -71,5 +80,5 @@ data class SubmittedEnvironment( verifyWith = verifyWith, notifications = notifications, postDeploy = postDeploy - ) + ).addMetadata(metadata) } diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinder.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinder.kt new file mode 100644 index 0000000000..98dd1771f4 --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinder.kt @@ -0,0 +1,54 @@ +package com.netflix.spinnaker.keel.environments + +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.ResourceSpec +import com.netflix.spinnaker.keel.core.api.DependsOnConstraint +import com.netflix.spinnaker.keel.events.ResourceState +import com.netflix.spinnaker.keel.persistence.DeliveryConfigRepository +import org.springframework.stereotype.Component + +/** + * Used to find resources in a previous environment based on the chain of depends-on constraints. + */ +@Component +class DependentEnvironmentFinder(private val deliveryConfigRepository: DeliveryConfigRepository) { + /** + * Finds resources of the same kind as [resource] in any previous environment (via depends-on constraints). If no + * previous environment exists (i.e. [resource]'s environment has no depends-on constraint) or + */ + fun resourcesOfSameKindInDependentEnvironments(resource: Resource): Collection> { + val dependentEnvironmentNames = dependentEnvironmentNames(resource) + return if (dependentEnvironmentNames.isEmpty()) { + emptyList() + } else { + deliveryConfigRepository.deliveryConfigFor(resource.id) + .environments + .filter { it.name in dependentEnvironmentNames } + .flatMap { it.resources } + .filter { it.kind == resource.kind } as Collection> + } + } + + fun resourceStatusesInDependentEnvironments(resource: Resource<*>): Map { + val dependentEnvironmentNames = dependentEnvironmentNames(resource) + return if (dependentEnvironmentNames.isEmpty()) { + emptyMap() + } else { + val deliveryConfig = deliveryConfigRepository.deliveryConfigFor(resource.id) + return deliveryConfig + .environments + .map { it.name } + .filter { it in dependentEnvironmentNames } + .map { deliveryConfigRepository.resourceStatusesInEnvironment(deliveryConfig.name, it) } + .reduce(Map::plus) + } + } + + private fun dependentEnvironmentNames(resource: Resource): List { + val environment = deliveryConfigRepository.environmentFor(resource.id) + return environment + .constraints + .filterIsInstance() + .map(DependsOnConstraint::environment) + } +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ArtifactRepository.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ArtifactRepository.kt index 237127ca6b..2404ec5d1c 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ArtifactRepository.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/ArtifactRepository.kt @@ -398,6 +398,16 @@ interface ArtifactRepository : PeriodicallyCheckedRepository { startTime: Instant, endTime: Instant ): Int + + /** + * @return the latest artifact version of [artifact] approved for use in [environmentName] + * + */ + fun getLatestApprovedInEnvArtifactVersion( + config: DeliveryConfig, + artifact: DeliveryArtifact, + environmentName: String + ): PublishedArtifact? } class NoSuchArtifactException(name: String, type: ArtifactType) : diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/CombinedRepository.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/CombinedRepository.kt index 284e6450a5..33d3e9d88f 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/CombinedRepository.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/CombinedRepository.kt @@ -249,9 +249,6 @@ class CombinedRepository( override fun deleteEnvironment(deliveryConfigName: String, environmentName: String) = deliveryConfigRepository.deleteEnvironment(deliveryConfigName, environmentName) - override fun storeEnvironment(deliveryConfigName: String, environment: Environment) = - deliveryConfigRepository.storeEnvironment(deliveryConfigName, environment) - override fun storeConstraintState(state: ConstraintState) { val previousState = getConstraintState( deliveryConfigName = state.deliveryConfigName, @@ -303,8 +300,8 @@ class CombinedRepository( override fun getConstraintStateById(uid: UID): ConstraintState? = deliveryConfigRepository.getConstraintStateById(uid) - override fun deleteConstraintState(deliveryConfigName: String, environmentName: String, type: String) = - deliveryConfigRepository.deleteConstraintState(deliveryConfigName, environmentName, type) + override fun deleteConstraintState(deliveryConfigName: String, environmentName: String, reference: String, version: String, type: String): Int = + deliveryConfigRepository.deleteConstraintState(deliveryConfigName, environmentName, reference, version, type) override fun constraintStateFor(application: String): List = deliveryConfigRepository.constraintStateFor(application) @@ -421,6 +418,9 @@ class CombinedRepository( override fun getArtifactVersion(artifact: DeliveryArtifact, version: String, status: ArtifactStatus?): PublishedArtifact? = artifactRepository.getArtifactVersion(artifact, version, status) + override fun getLatestApprovedInEnvArtifactVersion(config: DeliveryConfig, artifact: DeliveryArtifact, environmentName: String): PublishedArtifact? = + artifactRepository.getLatestApprovedInEnvArtifactVersion(config, artifact, environmentName) + override fun updateArtifactMetadata(artifact: PublishedArtifact, artifactMetadata: ArtifactMetadata) = artifactRepository.updateArtifactMetadata(artifact, artifactMetadata) diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepository.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepository.kt index 4e974d9f13..07afdc41f0 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepository.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/DeliveryConfigRepository.kt @@ -9,6 +9,7 @@ import com.netflix.spinnaker.keel.api.artifacts.PublishedArtifact import com.netflix.spinnaker.keel.api.constraints.ConstraintState import com.netflix.spinnaker.keel.core.api.ApplicationSummary import com.netflix.spinnaker.keel.core.api.UID +import com.netflix.spinnaker.keel.events.ResourceState import com.netflix.spinnaker.keel.persistence.DependentAttachFilter.ATTACH_ALL import com.netflix.spinnaker.kork.exceptions.ConfigurationException import com.netflix.spinnaker.kork.exceptions.SystemException @@ -51,6 +52,11 @@ interface DeliveryConfigRepository : PeriodicallyCheckedRepository + /** + * Retrieve the current status of all resources in [environmentName]. + */ + fun resourceStatusesInEnvironment(deliveryConfigName: String, environmentName: String) : Map + /** * Retrieve the [DeliveryConfig] a resource belongs to (the parent of its environment). */ @@ -143,8 +149,10 @@ interface DeliveryConfigRepository : PeriodicallyCheckedRepository + fun updateStatus(feature: String, resourceId: String, status: RolloutStatus) +} + +fun FeatureRolloutRepository.markRolloutStarted(feature: String, resource: Resource<*>) = + markRolloutStarted(feature, resource.id) + +fun FeatureRolloutRepository.rolloutStatus(feature: String, resource: Resource<*>) = + rolloutStatus(feature, resource.id) diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/KeelRepository.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/KeelRepository.kt index a1e631bb21..65678494aa 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/KeelRepository.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/KeelRepository.kt @@ -109,16 +109,6 @@ interface KeelRepository : KeelReadOnlyRepository { fun deleteEnvironment(deliveryConfigName: String, environmentName: String) - /** - * Stores/updates an [Environment] associated with a [DeliveryConfig]. - * - * Generally, updating environments should be done via [store]. This method is primarily - * intended to support the creation of preview environments, where none of the other - * properties of the delivery config have changed, which allows us to use a more efficient - * storage algorithm. - */ - fun storeEnvironment(deliveryConfigName: String, environment: Environment) - /** * If the constraint state changed, publishes a [ConstraintStateChanged] event. */ @@ -126,7 +116,7 @@ interface KeelRepository : KeelReadOnlyRepository { fun getConstraintStateById(uid: UID): ConstraintState? - fun deleteConstraintState(deliveryConfigName: String, environmentName: String, type: String) + fun deleteConstraintState(deliveryConfigName: String, environmentName: String, reference: String, version: String, type: String): Int fun queueArtifactVersionForApproval( deliveryConfigName: String, @@ -188,6 +178,8 @@ interface KeelRepository : KeelReadOnlyRepository { fun getArtifactVersion(artifact: DeliveryArtifact, version: String, status: ArtifactStatus? = null): PublishedArtifact? + fun getLatestApprovedInEnvArtifactVersion(config: DeliveryConfig, artifact: DeliveryArtifact, environmentName: String): PublishedArtifact? + fun updateArtifactMetadata(artifact: PublishedArtifact, artifactMetadata: ArtifactMetadata) fun deleteArtifact(artifact: DeliveryArtifact) diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/TaskTrackingRepository.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/TaskTrackingRepository.kt index 95ee2833ab..050480c904 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/TaskTrackingRepository.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/persistence/TaskTrackingRepository.kt @@ -2,12 +2,21 @@ package com.netflix.spinnaker.keel.persistence import com.netflix.spinnaker.keel.api.TaskStatus import com.netflix.spinnaker.keel.api.actuation.SubjectType +import java.time.Instant interface TaskTrackingRepository { fun store(task: TaskRecord) fun getIncompleteTasks(): Set fun updateStatus(taskId: String, status: TaskStatus) + fun getTasks(resourceId: String, limit: Int = 5): Set + fun getInFlightTasks(application: String, environmentName: String): Set fun delete(taskId: String) + + /** + * @return all running tasks, plus an completed tasks that were + * launched in that "batch" (within 30 seconds of them) + */ + fun getLatestBatchOfTasks(resourceId: String): Set } data class TaskRecord( @@ -16,5 +25,15 @@ data class TaskRecord( val subjectType: SubjectType, val application: String, val environmentName: String?, - val resourceId: String? + val resourceId: String?, + val artifactVersion: String? +) + +data class TaskForResource( + val id: String, + val name: String, + val resourceId: String, + val startedAt: Instant, + val endedAt: Instant?, + val artifactVersion: String? ) diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutAttempted.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutAttempted.kt new file mode 100644 index 0000000000..91c0b3a200 --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutAttempted.kt @@ -0,0 +1,10 @@ +package com.netflix.spinnaker.keel.rollout + +import com.netflix.spinnaker.keel.api.Resource + +/** + * Event triggered to indicate an attempt to roll out [feature] to [respourceId] was started. + */ +class FeatureRolloutAttempted(val feature: String, val resourceId: String) { + constructor(feature: String, resource: Resource<*>) : this(feature, resource.id) +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutFailed.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutFailed.kt new file mode 100644 index 0000000000..68fdadf0f3 --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/FeatureRolloutFailed.kt @@ -0,0 +1,10 @@ +package com.netflix.spinnaker.keel.rollout + +import com.netflix.spinnaker.keel.api.Resource + +/** + * Published to indicate an attempt to roll out [feature] to [resourceId] seems not to have worked. + */ +data class FeatureRolloutFailed(val feature: String, val resourceId: String) { + constructor(feature: String, resource: Resource<*>) : this(feature, resource.id) +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolver.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolver.kt new file mode 100644 index 0000000000..2a2aa2ff16 --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutAwareResolver.kt @@ -0,0 +1,152 @@ +package com.netflix.spinnaker.keel.rollout + +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.ResourceSpec +import com.netflix.spinnaker.keel.api.plugins.Resolver +import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder +import com.netflix.spinnaker.keel.events.ResourceState.Ok +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.persistence.markRolloutStarted +import com.netflix.spinnaker.keel.rollout.RolloutStatus.FAILED +import com.netflix.spinnaker.keel.rollout.RolloutStatus.IN_PROGRESS +import com.netflix.spinnaker.keel.rollout.RolloutStatus.NOT_STARTED +import com.netflix.spinnaker.keel.rollout.RolloutStatus.SKIPPED +import com.netflix.spinnaker.keel.rollout.RolloutStatus.SUCCESSFUL +import kotlinx.coroutines.Dispatchers.IO +import kotlinx.coroutines.async +import kotlinx.coroutines.awaitAll +import kotlinx.coroutines.runBlocking +import org.slf4j.LoggerFactory + +/** + * Base class for [Resolver] implementations that are used to safely roll out features to each environment in an + * application in turn. + * + * If a feature is explicitly set (in any state) in the spec, the resolver will be a no-op. Otherwise it will activate + * the feature for a given resource if: + * - it is currently active, or all of the following apply: + * - all resources of the same type in previous environments have the feature activated. + * - all resources in previous environments are "healthy". + * - the rollout of the feature was not attempted before for the same resource. + */ +abstract class RolloutAwareResolver( + private val dependentEnvironmentFinder: DependentEnvironmentFinder, + private val resourceToCurrentState: suspend (Resource) -> RESOLVED, + private val featureRolloutRepository: FeatureRolloutRepository, + private val eventPublisher: EventPublisher +) : Resolver { + + /** + * The name of the feature this resolver deals with. + */ + abstract val featureName: String + + /** + * @return `true` if the feature is explicitly set (whether enabled or disabled) in [resource], `false` if not (and + * therefore this resolver needs to make a decision about it). + */ + abstract fun isExplicitlySpecified(resource: Resource): Boolean + + /** + * @return `true` if the feature is enabled on [actualResource], `false` if not. + */ + abstract fun isAppliedTo(actualResource: RESOLVED): Boolean // TODO: do we need a "partially applied" state for things like multi-region resources? + + /** + * @return a copy of [resource] with the feature activated. + */ + abstract fun activate(resource: Resource): Resource + + /** + * @return a copy of [resource] with the feature deactivated. + */ + abstract fun deactivate(resource: Resource): Resource + + /** + * `true` if the state of this resolved resource indicates that it exists, `false` if it's a new resource that has not + * been created yet. + */ + abstract val RESOLVED.exists: Boolean + + override fun invoke(resource: Resource): Resource { + val currentState by lazy { + runBlocking(IO) { + resourceToCurrentState(resource) + } + } + + val (status, attemptCount) = featureRolloutRepository.rolloutStatus(featureName, resource.id) + + return when { + isExplicitlySpecified(resource) -> { + log.debug("rollout {}->{}: feature is explicitly specified", featureName, resource.id) + featureRolloutRepository.updateStatus(featureName, resource.id, SKIPPED) + resource + } + isNewResource(currentState) -> { + log.debug("rollout {}->{}: new resource, so applying right away", featureName, resource.id) + featureRolloutRepository.markRolloutStarted(featureName, resource.id) + eventPublisher.publishEvent(FeatureRolloutAttempted(featureName, resource)) + activate(resource) + } + isAlreadyRolledOutToThisResource(currentState) -> { + log.debug("rollout {}->{}: feature is active", featureName, resource.id) + featureRolloutRepository.updateStatus(featureName, resource.id, SUCCESSFUL) + activate(resource) + } + status == SUCCESSFUL -> { + log.debug("rollout {}->{}: already successfully rolled out", featureName, resource.id) + activate(resource) + } + status == FAILED -> { + log.debug("rollout {}->{}: rollout was unsuccessful previously", featureName, resource.id) + deactivate(resource) + } + status == IN_PROGRESS -> { + log.warn("rollout {}->{}: attempted before and appears to have failed", featureName, resource.id) + eventPublisher.publishEvent(FeatureRolloutFailed(featureName, resource)) + featureRolloutRepository.updateStatus(featureName, resource.id, FAILED) + deactivate(resource) + } + !previousEnvironmentsStable(resource) -> { + log.debug("rollout {}->{}: dependent environments are not currently stable", featureName, resource.id) + featureRolloutRepository.updateStatus(featureName, resource.id, NOT_STARTED) + deactivate(resource) + } + !isRolledOutToPreviousEnvironments(resource) -> { + log.debug("rollout {}->{}: not yet rolled out to dependent environments", featureName, resource.id) + featureRolloutRepository.updateStatus(featureName, resource.id, NOT_STARTED) + deactivate(resource) + } + else -> { + log.debug("rollout {}->{}: rolling out feature", featureName, resource.id) + featureRolloutRepository.markRolloutStarted(featureName, resource) + eventPublisher.publishEvent(FeatureRolloutAttempted(featureName, resource)) + activate(resource) + } + } + } + + private fun isAlreadyRolledOutToThisResource(currentState: RESOLVED): Boolean = + isAppliedTo(currentState) + + private fun isNewResource(currentState: RESOLVED): Boolean = + !currentState.exists + + private fun isRolledOutToPreviousEnvironments(resource: Resource): Boolean = + runBlocking(IO) { + dependentEnvironmentFinder.resourcesOfSameKindInDependentEnvironments(resource) + .map { async { resourceToCurrentState(it) } } + .awaitAll() + .all { isAppliedTo(it) } + } + + private fun previousEnvironmentsStable(resource: Resource): Boolean { + val dependentEnvironmentResourceStatuses = + dependentEnvironmentFinder.resourceStatusesInDependentEnvironments(resource) + return dependentEnvironmentResourceStatuses.values.all { it == Ok } + } + + private val log by lazy { LoggerFactory.getLogger(javaClass) } +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutStatus.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutStatus.kt new file mode 100644 index 0000000000..fe5b317292 --- /dev/null +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/rollout/RolloutStatus.kt @@ -0,0 +1,5 @@ +package com.netflix.spinnaker.keel.rollout + +enum class RolloutStatus { + NOT_STARTED, IN_PROGRESS, FAILED, SUCCESSFUL, SKIPPED +} diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/services/ApplicationService.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/services/ApplicationService.kt index a837566131..e2e65537be 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/services/ApplicationService.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/services/ApplicationService.kt @@ -3,6 +3,7 @@ package com.netflix.spinnaker.keel.services import com.netflix.spectator.api.BasicTag import com.netflix.spectator.api.Registry import com.netflix.spinnaker.config.ArtifactConfig +import com.netflix.spinnaker.keel.actuation.EnvironmentTaskCanceler import com.netflix.spinnaker.keel.api.ArtifactInEnvironmentContext import com.netflix.spinnaker.keel.api.DeliveryConfig import com.netflix.spinnaker.keel.api.Environment @@ -89,6 +90,7 @@ class ApplicationService( private val spectator: Registry, private val artifactConfig: ArtifactConfig, private val artifactVersionLinks: ArtifactVersionLinks, + private val environmentTaskCanceler: EnvironmentTaskCanceler ) : CoroutineScope { override val coroutineContext: CoroutineContext = Dispatchers.Default @@ -174,8 +176,10 @@ class ApplicationService( } fun pin(user: String, application: String, pin: EnvironmentArtifactPin) { + log.info("Pinning application $application by user $user: {}", pin) val config = repository.getDeliveryConfigForApplication(application) repository.pinEnvironment(config, pin.copy(pinnedBy = user)) + environmentTaskCanceler.cancelTasksForPin(application, pin, user) repository.triggerDeliveryConfigRecheck(application) // recheck environments to reflect pin immediately publisher.publishEvent(PinnedNotification(config, pin.copy(pinnedBy = user))) } @@ -202,6 +206,7 @@ class ApplicationService( if (!succeeded) { throw InvalidVetoException(application, veto.targetEnvironment, veto.reference, veto.version) } + environmentTaskCanceler.cancelTasksForVeto(application, veto, user) repository.triggerDeliveryConfigRecheck(application) // recheck environments to reflect veto immediately publisher.publishEvent(MarkAsBadNotification( config = config, diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/telemetry/TelemetryListener.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/telemetry/TelemetryListener.kt index 1b38a60ce1..5f607fa144 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/telemetry/TelemetryListener.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/telemetry/TelemetryListener.kt @@ -13,6 +13,8 @@ import com.netflix.spinnaker.keel.actuation.ScheduledPostDeployActionRunStarting import com.netflix.spinnaker.keel.events.ResourceActuationLaunched import com.netflix.spinnaker.keel.events.ResourceCheckResult import com.netflix.spinnaker.keel.events.VerificationBlockedActuation +import com.netflix.spinnaker.keel.rollout.FeatureRolloutAttempted +import com.netflix.spinnaker.keel.rollout.FeatureRolloutFailed import org.slf4j.LoggerFactory import org.springframework.context.event.EventListener import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor @@ -277,16 +279,39 @@ class TelemetryListener( @EventListener(VerificationBlockedActuation::class) fun onBlockedActuation(event: VerificationBlockedActuation) { - spectator.counter(BLOCKED_ACTUATION_ID, + spectator.counter( + BLOCKED_ACTUATION_ID, listOf( BasicTag("resourceId", event.id), BasicTag("resourceKind", event.kind.toString()), BasicTag("resourceApplication", event.application) ) - ) + ).safeIncrement() + } + + @EventListener(FeatureRolloutAttempted::class) + fun onFeatureRolloutAttempted(event: FeatureRolloutAttempted) { + spectator.counter( + FEATURE_ROLLOUT_ATTEMPTED_ID, + listOf( + BasicTag("feature", event.feature), + BasicTag("resourceId", event.resourceId) + ) + ).safeIncrement() + } + + @EventListener(FeatureRolloutFailed::class) + fun onFeatureRolloutFailed(event: FeatureRolloutFailed) { + spectator.counter( + FEATURE_ROLLOUT_FAILED_ID, + listOf( + BasicTag("feature", event.feature), + BasicTag("resourceId", event.resourceId) + ) + ).safeIncrement() } - private fun secondsSince(start: AtomicReference) : Double = + private fun secondsSince(start: AtomicReference): Double = Duration .between(start.get(), clock.instant()) .toMillis() @@ -331,5 +356,7 @@ class TelemetryListener( private const val AGENT_DURATION_ID = "keel.agent.duration" private const val POST_DEPLOY_CHECK_DRIFT_GAUGE = "keel.post-deploy.check.drift" private const val POST_DEPLOY_CHECK_DURATION_ID = "keel.post-deploy.check.duration" + private const val FEATURE_ROLLOUT_ATTEMPTED_ID = "keel.feature-rollout.attempted" + private const val FEATURE_ROLLOUT_FAILED_ID = "keel.feature-rollout.failed" } } diff --git a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/verification/VerificationRunner.kt b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/verification/VerificationRunner.kt index 98e60cc716..6992f1757c 100644 --- a/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/verification/VerificationRunner.kt +++ b/keel-core/src/main/kotlin/com/netflix/spinnaker/keel/verification/VerificationRunner.kt @@ -45,6 +45,7 @@ class VerificationRunner( override suspend fun start(context: ArtifactInEnvironmentContext, action: Verification) { enforcer.withVerificationLease(context) { + log.debug("Starting verification for ${context.shortName()}") val images = imageFinder.getImages(context.deliveryConfig, context.environmentName) val metadata = evaluators.start(context, action) + mapOf("images" to images) actionRepository.updateState(context, action, PENDING, metadata) diff --git a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/CheckSchedulerTests.kt b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/CheckSchedulerTests.kt index 0775ea5aea..802dce2884 100644 --- a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/CheckSchedulerTests.kt +++ b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/CheckSchedulerTests.kt @@ -1,6 +1,8 @@ package com.netflix.spinnaker.keel.actuation import com.netflix.spectator.api.NoopRegistry +import com.netflix.spinnaker.config.ArtifactCheckConfig +import com.netflix.spinnaker.config.EnvironmentCheckConfig import com.netflix.spinnaker.config.EnvironmentDeletionConfig import com.netflix.spinnaker.config.EnvironmentVerificationConfig import com.netflix.spinnaker.config.PostDeployActionsConfig @@ -33,7 +35,7 @@ import org.springframework.context.ApplicationEventPublisher import org.springframework.core.env.Environment as SpringEnvironment import java.time.Duration -internal object CheckSchedulerTests : JUnit5Minutests { +internal class CheckSchedulerTests : JUnit5Minutests { private val repository: KeelRepository = mockk() private val postDeployActionRunner: PostDeployActionRunner = mockk() @@ -47,6 +49,14 @@ internal object CheckSchedulerTests : JUnit5Minutests { it.minAgeDuration = checkMinAge it.batchSize = 2 } + private val artifactCheckConfig = ArtifactCheckConfig().also { + it.minAgeDuration = checkMinAge + it.batchSize = 2 + } + private val environmentCheckConfig = EnvironmentCheckConfig().also { + it.minAgeDuration = checkMinAge + it.batchSize = 2 + } private val verificationConfig = EnvironmentVerificationConfig().also { it.minAgeDuration = checkMinAge it.batchSize = 2 @@ -61,6 +71,10 @@ internal object CheckSchedulerTests : JUnit5Minutests { every { getProperty("keel.check.min-age-duration", Duration::class.java, any()) } returns checkMinAge + + every { getProperty("keel.resource-check.batch-size", Int::class.java, any()) } returns resourceCheckConfig.batchSize + every { getProperty("keel.environment-check.batch-size", Int::class.java, any()) } returns environmentCheckConfig.batchSize + every { getProperty("keel.artifact-check.batch-size", Int::class.java, any()) } returns artifactCheckConfig.batchSize } @@ -127,6 +141,8 @@ internal object CheckSchedulerTests : JUnit5Minutests { postDeployActionRunner = postDeployActionRunner, artifactHandlers = listOf(artifactHandler), resourceCheckConfig = resourceCheckConfig, + environmentCheckConfig = environmentCheckConfig, + artifactCheckConfig = artifactCheckConfig, verificationConfig = verificationConfig, postDeployConfig = postDeployConfig, environmentDeletionConfig = EnvironmentDeletionConfig(), diff --git a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionCheckerTests.kt b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionCheckerTests.kt index abf8a3f9c5..411329be1a 100644 --- a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionCheckerTests.kt +++ b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentPromotionCheckerTests.kt @@ -12,8 +12,11 @@ import com.netflix.spinnaker.keel.artifacts.DebianArtifact import com.netflix.spinnaker.keel.artifacts.DockerArtifact import com.netflix.spinnaker.keel.constraints.AllowedTimesConstraintAttributes import com.netflix.spinnaker.keel.constraints.AllowedTimesConstraintEvaluator +import com.netflix.spinnaker.keel.core.api.ArtifactVersionVetoData import com.netflix.spinnaker.keel.core.api.DependsOnConstraint +import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactVetoes import com.netflix.spinnaker.keel.core.api.PinnedEnvironment +import com.netflix.spinnaker.keel.core.api.PromotionStatus import com.netflix.spinnaker.keel.core.api.TimeWindow import com.netflix.spinnaker.keel.core.api.TimeWindowConstraint import com.netflix.spinnaker.keel.core.api.windowsNumeric @@ -21,6 +24,7 @@ import com.netflix.spinnaker.keel.persistence.KeelRepository import com.netflix.spinnaker.keel.telemetry.ArtifactVersionApproved import com.netflix.spinnaker.keel.test.DummyArtifactReferenceResourceSpec import com.netflix.spinnaker.keel.test.resource +import com.netflix.spinnaker.time.MutableClock import dev.minutest.junit.JUnit5Minutests import dev.minutest.rootContext import io.mockk.every @@ -64,7 +68,8 @@ internal class NewEnvironmentPromotionCheckerTests : JUnit5Minutests { environmentConstraintRunner, publisher, ArtifactConfig(), - springEnv + springEnv, + MutableClock() ) val dockerArtifact = DockerArtifact( @@ -168,199 +173,304 @@ internal class NewEnvironmentPromotionCheckerTests : JUnit5Minutests { repository.pinnedEnvironments(any()) } returns emptyList() - every { - repository.vetoedEnvironmentVersions(any()) - } returns emptyList() } - context("a single new version is queued for approval") { + context("No vetoed versions") { before { every { - repository.getArtifactVersionsQueuedForApproval(deliveryConfig.name, environment.name, any()) - } returns setOf("2.0").toArtifactVersions() + repository.vetoedEnvironmentVersions(any()) + } returns emptyList() } - context("the version is not already approved for the environment") { + context("a single new version is queued for approval") { before { every { - environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, "2.0", environment) - } returns true + repository.getArtifactVersionsQueuedForApproval(deliveryConfig.name, environment.name, any()) + } returns setOf("2.0").toArtifactVersions() + } - every { - repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) - } returns true + context("the version is not already approved for the environment") { + before { + every { + environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, "2.0", environment) + } returns true - runBlocking { - subject.checkEnvironments(deliveryConfig) + every { + repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) + } returns true + + runBlocking { + subject.checkEnvironments(deliveryConfig) + } } - } - test("the environment is assigned the latest version of an artifact") { - verify { - repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) + test("the environment is assigned the latest version of an artifact") { + verify { + repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) + } } - } - test("final status of stateless constraints is saved") { - val state = slot() - verify { - repository.storeConstraintState(capture(state)) + test("final status of stateless constraints is saved") { + val state = slot() + verify { + repository.storeConstraintState(capture(state)) + } + expectThat(state.captured.type).isEqualTo("allowed-times") } - expectThat(state.captured.type).isEqualTo("allowed-times") - } - test("a recheck is triggered for the environment") { - verify { - repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + test("a recheck is triggered for the environment") { + verify { + repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + } } - } - test("a telemetry event is fired") { - verify { - publisher.publishEvent( - ArtifactVersionApproved( - deliveryConfig.application, - deliveryConfig.name, - environment.name, - dockerArtifact.name, - dockerArtifact.type, - "2.0" + test("a telemetry event is fired") { + verify { + publisher.publishEvent( + ArtifactVersionApproved( + deliveryConfig.application, + deliveryConfig.name, + environment.name, + dockerArtifact.name, + dockerArtifact.type, + "2.0" + ) ) - ) + } } } - } - context("the version is already approved for the environment") { - before { - every { - environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, "2.0", environment) - } returns true + context("the version is already approved for the environment") { + before { + every { + environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, "2.0", environment) + } returns true - every { - repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) - } returns false + every { + repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) + } returns false - runBlocking { - subject.checkEnvironments(deliveryConfig) + runBlocking { + subject.checkEnvironments(deliveryConfig) + } } - } - test("an event is not sent") { - verify(exactly = 0) { - publisher.publishEvent(ofType()) + test("an event is not sent") { + verify(exactly = 0) { + publisher.publishEvent(ofType()) + } } - } - test("a recheck is not triggered for the environment") { - verify(exactly = 0) { - repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + test("a recheck is not triggered for the environment") { + verify(exactly = 0) { + repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + } } } - } - context("the stateless constraints no longer pass") { - before { - every { - environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, "2.0", environment) - } returns false + context("the stateless constraints no longer pass") { + before { + every { + environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, "2.0", environment) + } returns false - runBlocking { - subject.checkEnvironments(deliveryConfig) + runBlocking { + subject.checkEnvironments(deliveryConfig) + } + } + + test("nothing is approved") { + verify(exactly = 0) { + repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) + publisher.publishEvent(ofType()) + } } } - test("nothing is approved") { - verify(exactly = 0) { - repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) - publisher.publishEvent(ofType()) + context("the environment is pinned") { + before { + every { + repository.pinnedEnvironments(any()) + } returns listOf( + PinnedEnvironment( + deliveryConfigName = deliveryConfig.name, + targetEnvironment = environment.name, + artifact = dockerArtifact, + version = "1.0", + pinnedBy = null, + pinnedAt = null, + comment = null + ) + ) + } + + context("the pinned version is not deployed yet") { + before { + every { + repository.approveVersionFor(deliveryConfig, dockerArtifact, any(), environment.name) + } returns true + + every { + repository.getArtifactPromotionStatus(deliveryConfig, dockerArtifact, any(), environment.name) + } returns PromotionStatus.PREVIOUS + + runBlocking { + subject.checkEnvironments(deliveryConfig) + } + } + + test("constraint evaluation happens") { + verify(exactly = 1) { + environmentConstraintRunner.checkEnvironment(any()) + } + } + + test("resource recheck happens") { + verify(exactly = 2) { + // Once in the approve function and once in the check trigger. + // In practice, it's unlikely that users will pin a version that hasn't been approved already + repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + } + } + + test("the pinned artifact is approved") { + verify(exactly = 1) { + repository.approveVersionFor(deliveryConfig, dockerArtifact, "1.0", environment.name) + } + } + + test("queued constraints aren't looked at") { + verify(exactly = 0) { + environmentConstraintRunner.checkStatelessConstraints(any(), any(), any(), any()) + } + } + + test("stateless constraints for queued versions aren't rechecked") { + verify(exactly = 0) { + repository.getArtifactVersionsQueuedForApproval(any(), any(), any()) + } + } + } + + context("the pinned version was already approved is now deploying") { + before { + every { + repository.getArtifactPromotionStatus(deliveryConfig, dockerArtifact, any(), environment.name) + } returns PromotionStatus.DEPLOYING + + every { + repository.approveVersionFor(deliveryConfig, dockerArtifact, any(), environment.name) + } returns false + + runBlocking { + subject.checkEnvironments(deliveryConfig) + } + } + + test("resource recheck should not happen") { + verify(exactly = 0) { + repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + } + } } } } - context("the environment is pinned") { + context("there are several versions queued for approval") { before { every { - repository.pinnedEnvironments(any()) - } returns listOf( - PinnedEnvironment( - deliveryConfigName = deliveryConfig.name, - targetEnvironment = environment.name, - artifact = dockerArtifact, - version = "1.0", - pinnedBy = null, - pinnedAt = null, - comment = null - ) - ) + repository.getArtifactVersionsQueuedForApproval(deliveryConfig.name, environment.name, dockerArtifact) + } returns setOf("2.0", "1.2", "1.1").toArtifactVersions() every { - repository.approveVersionFor(deliveryConfig, dockerArtifact, any(), environment.name) - } returns true - - runBlocking { - subject.checkEnvironments(deliveryConfig) - } + repository.getPendingVersionsInEnvironment(any(), dockerArtifact.reference, any()) + } returns listOf("2.0", "1.2", "1.1", "1.0").toArtifactVersions() } - test("constraint evaluation happens") { - verify(exactly = 1) { - environmentConstraintRunner.checkEnvironment(any()) - } - } + context("all versions still pass stateless constraints") { + before { + every { + environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, any(), environment) + } returns true - test("the pinned artifact is approved") { - verify(exactly = 1) { - repository.approveVersionFor(deliveryConfig, dockerArtifact, "1.0", environment.name) - } - } + every { + repository.approveVersionFor(deliveryConfig, dockerArtifact, any(), environment.name) + } returns true - test("queued constraints aren't looked at") { - verify(exactly = 0) { - environmentConstraintRunner.checkStatelessConstraints(any(), any(), any(), any()) + runBlocking { + subject.checkEnvironments(deliveryConfig) + } } - } - test("stateless constraints for queued versions aren't rechecked") { - verify(exactly = 0) { - repository.getArtifactVersionsQueuedForApproval(any(), any(), any()) + test("all versions get approved") { + verify { + repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) + repository.approveVersionFor(deliveryConfig, dockerArtifact, "1.2", environment.name) + repository.approveVersionFor(deliveryConfig, dockerArtifact, "1.1", environment.name) + } } } } } - context("there are several versions queued for approval") { + context("the current version is marked as bad") { before { every { - repository.getArtifactVersionsQueuedForApproval(deliveryConfig.name, environment.name, dockerArtifact) - } returns setOf("2.0", "1.2", "1.1").toArtifactVersions() + repository.vetoedEnvironmentVersions(any()) + } returns listOf(EnvironmentArtifactVetoes( + deliveryConfigName = deliveryConfig.name, + targetEnvironment = environment.name, + artifact = dockerArtifact, + versions = mutableSetOf(ArtifactVersionVetoData("2.0", vetoedBy = "keel", comment="Oh no" , vetoedAt = null)) + )) every { - repository.getPendingVersionsInEnvironment(any(), dockerArtifact.reference, any()) - } returns listOf("2.0", "1.2", "1.1", "1.0").toArtifactVersions() + repository.getCurrentlyDeployedArtifactVersion(deliveryConfig, dockerArtifact, environment.name) + } returns listOf("2.0").toArtifactVersions().firstOrNull() + + every { + repository.getArtifactVersionsQueuedForApproval(deliveryConfig.name, environment.name, any()) + } returns emptyList() + + runBlocking { + subject.checkEnvironments(deliveryConfig) + } } - context("all versions still pass stateless constraints") { - before { - every { - environmentConstraintRunner.checkStatelessConstraints(dockerArtifact, deliveryConfig, any(), environment) - } returns true + test("trigger recheck if the vetoed version is the CURRENT version") { + verify(exactly = 1) { + repository.triggerResourceRecheck(environment.name, deliveryConfig.application) + } + } + } - every { - repository.approveVersionFor(deliveryConfig, dockerArtifact, any(), environment.name) - } returns true + context("another version is marked as bad. We don't care then") { + before { + every { + repository.vetoedEnvironmentVersions(any()) + } returns listOf(EnvironmentArtifactVetoes( + deliveryConfigName = deliveryConfig.name, + targetEnvironment = environment.name, + artifact = dockerArtifact, + versions = mutableSetOf(ArtifactVersionVetoData("2.0", vetoedBy = "keel", comment="Oh no" , vetoedAt = null)) + )) - runBlocking { - subject.checkEnvironments(deliveryConfig) - } + every { + repository.getCurrentlyDeployedArtifactVersion(deliveryConfig, dockerArtifact, environment.name) + } returns listOf("1.1").toArtifactVersions().firstOrNull() + + every { + repository.getArtifactVersionsQueuedForApproval(deliveryConfig.name, environment.name, any()) + } returns emptyList() + + runBlocking { + subject.checkEnvironments(deliveryConfig) } + } - test("all versions get approved") { - verify { - repository.approveVersionFor(deliveryConfig, dockerArtifact, "2.0", environment.name) - repository.approveVersionFor(deliveryConfig, dockerArtifact, "1.2", environment.name) - repository.approveVersionFor(deliveryConfig, dockerArtifact, "1.1", environment.name) - } + test("we should not trigger a recheck") { + verify(exactly = 0) { + repository.triggerResourceRecheck(any(), any()) } } } @@ -427,6 +537,10 @@ internal class NewEnvironmentPromotionCheckerTests : JUnit5Minutests { ) ) + every { + repository.getArtifactPromotionStatus(multiEnvConfig, dockerArtifact, any(), env1.name) + } returns PromotionStatus.PREVIOUS + runBlocking { subject.checkEnvironments(multiEnvConfig) } diff --git a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCancelerTests.kt b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCancelerTests.kt new file mode 100644 index 0000000000..cb641638b5 --- /dev/null +++ b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/actuation/EnvironmentTaskCancelerTests.kt @@ -0,0 +1,101 @@ +package com.netflix.spinnaker.keel.actuation + +import com.netflix.spinnaker.keel.api.actuation.TaskLauncher +import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactPin +import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactVeto +import com.netflix.spinnaker.keel.persistence.KeelRepository +import com.netflix.spinnaker.keel.persistence.TaskForResource +import com.netflix.spinnaker.keel.persistence.TaskTrackingRepository +import com.netflix.spinnaker.time.MutableClock +import io.mockk.coVerify +import io.mockk.every +import io.mockk.mockk +import io.mockk.spyk +import org.junit.jupiter.api.Test + +internal class EnvironmentTaskCancelerTests { + + private val clock = MutableClock() + private val application = "waffles" + private val environment = "butter" + private val artifactReference = "fork" + private val user = "emily@emily" + private val pin = EnvironmentArtifactPin( + targetEnvironment = environment, + reference = artifactReference, + version = "v1", + pinnedBy = user, + comment = "this is a good version" + ) + + private val veto = EnvironmentArtifactVeto( + targetEnvironment = environment, + reference = artifactReference, + version = "v0", + vetoedBy = user, + comment = "this is a very bad version" + ) + + private val taskTrackingRepository: TaskTrackingRepository = mockk() { + every { getInFlightTasks(application, environment) } returns setOf( + TaskForResource( + id = "task1", + name = "upsert vetoed version", + resourceId = "1", + startedAt = clock.instant(), + endedAt = null, + artifactVersion = "v0", + ), + TaskForResource( + id = "task2", + name = "upsert pinned version", + resourceId = "1", + startedAt = clock.instant(), + endedAt = null, + artifactVersion = "v1", + ) + ) + } + private val keelRepository: KeelRepository = mockk() + private val taskLauncher: TaskLauncher = mockk(relaxUnitFun = true) + + //this is a spyk so that i can mock the relevant resource ids response instead of constructing a delivery config + private val subject = spyk(EnvironmentTaskCanceler(taskTrackingRepository, keelRepository, taskLauncher)) + + @Test + fun `pin - cancels no tasks when there are no relevant resources`() { + every { subject.getRelevantResourceIds(application, environment, artifactReference) } returns emptyList() + + subject.cancelTasksForPin(application, pin, user) + + coVerify(exactly = 0) { taskLauncher.cancelTasks(any(), user) } + } + + @Test + fun `veto - cancels no tasks when there are no relevant resources`() { + every { subject.getRelevantResourceIds(application, environment, artifactReference) } returns emptyList() + + subject.cancelTasksForVeto(application, veto, user) + + coVerify(exactly = 0) { taskLauncher.cancelTasks(any(), user) } + } + + @Test + fun `pin - cancels running task for other version`() { + every { subject.getRelevantResourceIds(application, environment, artifactReference) } returns listOf("1") + + subject.cancelTasksForPin(application, pin, user) + + coVerify(exactly = 1) { taskLauncher.cancelTasks(listOf("task1"), user) } + } + + @Test + fun `veto - cancels running task for vetoed version`() { + every { subject.getRelevantResourceIds(application, environment, artifactReference) } returns listOf("1") + + subject.cancelTasksForVeto(application, veto, user) + + coVerify(exactly = 1) { taskLauncher.cancelTasks(listOf("task1"), user) } + } + +} diff --git a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinderTests.kt b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinderTests.kt new file mode 100644 index 0000000000..2636c4a004 --- /dev/null +++ b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/environments/DependentEnvironmentFinderTests.kt @@ -0,0 +1,172 @@ +package com.netflix.spinnaker.keel.environments + +import com.netflix.spinnaker.keel.api.DeliveryConfig +import com.netflix.spinnaker.keel.api.Environment +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.SubnetAwareLocations +import com.netflix.spinnaker.keel.api.SubnetAwareRegionSpec +import com.netflix.spinnaker.keel.api.ec2.ClusterSpec +import com.netflix.spinnaker.keel.api.ec2.EC2_CLUSTER_V1_1 +import com.netflix.spinnaker.keel.api.ec2.EC2_SECURITY_GROUP_V1 +import com.netflix.spinnaker.keel.api.ec2.SecurityGroupSpec +import com.netflix.spinnaker.keel.api.toSimpleLocations +import com.netflix.spinnaker.keel.core.api.DependsOnConstraint +import com.netflix.spinnaker.keel.events.ResourceState +import com.netflix.spinnaker.keel.persistence.DeliveryConfigRepository +import com.netflix.spinnaker.keel.test.resource +import io.mockk.every +import io.mockk.mockk +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.DynamicTest +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.TestFactory +import strikt.api.expectThat +import strikt.assertions.containsKeys +import strikt.assertions.isEmpty +import strikt.assertions.isEqualTo +import strikt.assertions.single + +internal class DependentEnvironmentFinderTests { + + val locations = SubnetAwareLocations( + account = "test", + subnet = "internal", + regions = setOf( + SubnetAwareRegionSpec(name = "us-west-2") + ) + ) + + val moniker = Moniker( + app = "fnord", + stack = "test" + ) + + val testEnvironment = Environment( + name = "test", + resources = setOf( + resource( + kind = EC2_CLUSTER_V1_1.kind, + spec = ClusterSpec( + moniker = moniker, + locations = locations + ) + ), + resource( + kind = EC2_SECURITY_GROUP_V1.kind, + spec = SecurityGroupSpec( + moniker = moniker, + locations = locations.toSimpleLocations(), + description = "a security group" + ) + ) + ) + ) + + val mainEnvironment = Environment( + name = "main", + constraints = setOf( + DependsOnConstraint( + environment = "test" + ) + ), + resources = setOf( + resource( + kind = EC2_CLUSTER_V1_1.kind, + spec = ClusterSpec( + moniker = moniker.copy(stack = "main"), + locations = locations.copy(account = "prod") + ) + ), + resource( + kind = EC2_SECURITY_GROUP_V1.kind, + spec = SecurityGroupSpec( + moniker = moniker.copy(stack = "main"), + locations = locations.copy(account = "prod").toSimpleLocations(), + description = "a security group" + ) + ) + ) + ) + + val disconnectedEnvironment = Environment( + name = "disconnected", + resources = setOf( + resource( + kind = EC2_CLUSTER_V1_1.kind, + spec = ClusterSpec( + moniker = moniker.copy(stack = "disconnected"), + locations = locations + ) + ), + resource( + kind = EC2_SECURITY_GROUP_V1.kind, + spec = SecurityGroupSpec( + moniker = moniker.copy(stack = "disconnected"), + locations = locations.toSimpleLocations(), + description = "a security group" + ) + ) + ) + ) + + val deliveryConfig = DeliveryConfig( + application = "fnord", + name = "fnord-manifest", + serviceAccount = "fnord@spinnaker", + environments = setOf(testEnvironment, mainEnvironment, disconnectedEnvironment) + ) + + val testCluster = testEnvironment.resources.single { it.spec is ClusterSpec } + val testSecurityGroup = testEnvironment.resources.single { it.spec is SecurityGroupSpec } + val mainCluster = mainEnvironment.resources.single { it.spec is ClusterSpec } + val mainSecurityGroup = mainEnvironment.resources.single { it.spec is SecurityGroupSpec } + + val deliveryConfigRepository: DeliveryConfigRepository = mockk() + val subject = DependentEnvironmentFinder(deliveryConfigRepository) + + @BeforeEach + fun stubEnvironmentFor() { + every { deliveryConfigRepository.environmentFor(testCluster.id) } returns testEnvironment + every { deliveryConfigRepository.environmentFor(testSecurityGroup.id) } returns testEnvironment + every { deliveryConfigRepository.environmentFor(mainCluster.id) } returns mainEnvironment + every { deliveryConfigRepository.environmentFor(mainSecurityGroup.id) } returns mainEnvironment + every { deliveryConfigRepository.deliveryConfigFor(any()) } returns deliveryConfig + } + + @Test + fun `resources of same kind is empty for a resource from an environment with no dependencies`() { + expectThat(subject.resourcesOfSameKindInDependentEnvironments(testCluster)) + .isEmpty() + } + + @TestFactory + fun `resources of same kind includes only the correct types of resources`() = + listOf( + mainCluster to testCluster, + mainSecurityGroup to testSecurityGroup + ).map { (resource, expected) -> + DynamicTest.dynamicTest("returns only ${resource.spec.javaClass.simpleName} resources") { + expectThat(subject.resourcesOfSameKindInDependentEnvironments(resource)) + .single() isEqualTo expected + } + } + + @Test + fun `resource statuses is empty for an environment with no dependencies`() { + expectThat(subject.resourceStatusesInDependentEnvironments(testCluster)) + .isEmpty() + } + + @Test + fun `resource statuses includes all resources from a dependent environment`() { + every { + deliveryConfigRepository.resourceStatusesInEnvironment( + deliveryConfig.name, + testEnvironment.name + ) + } returns testEnvironment.resources.associate { it.id to ResourceState.Ok } + + expectThat(subject.resourceStatusesInDependentEnvironments(mainCluster)) + .containsKeys(testCluster.id, testSecurityGroup.id) + } +} diff --git a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ApplicationServiceTests.kt b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ApplicationServiceTests.kt index eb35ae8dee..1a9b47fc5b 100644 --- a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ApplicationServiceTests.kt +++ b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ApplicationServiceTests.kt @@ -2,6 +2,7 @@ package com.netflix.spinnaker.keel.services import com.netflix.spectator.api.NoopRegistry import com.netflix.spinnaker.config.ArtifactConfig +import com.netflix.spinnaker.keel.actuation.EnvironmentTaskCanceler import com.netflix.spinnaker.keel.api.ArtifactInEnvironmentContext import com.netflix.spinnaker.keel.api.DeliveryConfig import com.netflix.spinnaker.keel.api.Environment @@ -215,6 +216,7 @@ class ApplicationServiceTests : JUnit5Minutests { val spectator = NoopRegistry() val artifactVersionLinks = ArtifactVersionLinks(mockScmInfo(), mockCacheFactory()) + val environmentTaskCanceler: EnvironmentTaskCanceler = mockk(relaxUnitFun = true) // subject val applicationService = ApplicationService( @@ -228,7 +230,8 @@ class ApplicationServiceTests : JUnit5Minutests { clock, spectator, ArtifactConfig(), - artifactVersionLinks + artifactVersionLinks, + environmentTaskCanceler ) val buildMetadata = BuildMetadata( diff --git a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ComparableLinksTests.kt b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ComparableLinksTests.kt index 1ea6ebee38..b1444b0a4b 100644 --- a/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ComparableLinksTests.kt +++ b/keel-core/src/test/kotlin/com/netflix/spinnaker/keel/services/ComparableLinksTests.kt @@ -2,6 +2,7 @@ package com.netflix.spinnaker.keel.services import com.netflix.spectator.api.NoopRegistry import com.netflix.spinnaker.config.ArtifactConfig +import com.netflix.spinnaker.keel.actuation.EnvironmentTaskCanceler import com.netflix.spinnaker.keel.api.DeliveryConfig import com.netflix.spinnaker.keel.api.Environment import com.netflix.spinnaker.keel.api.ScmInfo @@ -131,6 +132,8 @@ class ComparableLinksTests : JUnit5Minutests { } val registry = NoopRegistry() + val environmentTaskCanceler: EnvironmentTaskCanceler = mockk(relaxUnitFun = true) + // subject val applicationService = ApplicationService( repository, @@ -143,7 +146,8 @@ class ComparableLinksTests : JUnit5Minutests { clock, registry, ArtifactConfig(), - artifactVersionLinks + artifactVersionLinks, + environmentTaskCanceler ) val buildMetadata = BuildMetadata( diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ApplicationLoadBalancerSpec.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ApplicationLoadBalancerSpec.kt index c385f87923..edd6593d51 100644 --- a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ApplicationLoadBalancerSpec.kt +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ApplicationLoadBalancerSpec.kt @@ -12,6 +12,8 @@ import com.netflix.spinnaker.keel.api.ec2.LoadBalancerType.APPLICATION import com.netflix.spinnaker.keel.api.schema.Discriminator import com.netflix.spinnaker.keel.api.schema.Optional import java.time.Duration +import java.util.Collections.emptySortedSet +import java.util.SortedSet data class ApplicationLoadBalancerSpec( override val moniker: Moniker, @@ -42,12 +44,21 @@ data class ApplicationLoadBalancerSpec( override.dependencies?.securityGroupNames?.map { Dependency(SECURITY_GROUP, region, it) } ?: emptySet() } + override fun deepRename(suffix: String): ApplicationLoadBalancerSpec { + return copy( + moniker = moniker.withSuffix(suffix), + targetGroups = targetGroups.map { targetGroup -> + targetGroup.copy(name = "${targetGroup.name}-$suffix") + }.toSet() + ) + } + data class Listener( val port: Int, val protocol: String, val certificate: String? = null, val rules: Set = emptySet(), - val defaultActions: Set = emptySet() + val defaultActions: SortedSet = emptySortedSet() ) { init { if (protocol == "HTTPS") { @@ -100,11 +111,13 @@ data class ApplicationLoadBalancerSpec( val targetGroups: Set? = null ) - abstract class Action { + abstract class Action : Comparable { @Discriminator abstract val type: String abstract val order: Int + override fun compareTo(other: Action) = order.compareTo(other.order) + data class ForwardAction( override val order: Int, val targetGroupName: String @@ -130,7 +143,7 @@ data class ApplicationLoadBalancerSpec( data class Rule( val priority: String, val conditions: List = emptyList(), - val actions: List, + val actions: SortedSet, val default: Boolean ) diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClassicLoadBalancerSpec.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClassicLoadBalancerSpec.kt index e6e58ddcd5..e6994acd75 100644 --- a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClassicLoadBalancerSpec.kt +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClassicLoadBalancerSpec.kt @@ -37,6 +37,9 @@ data class ClassicLoadBalancerSpec( overrides.flatMap { (region, override) -> override.dependencies?.securityGroupNames?.map { Dependency(SECURITY_GROUP, region, it) } ?: emptySet() } + + override fun deepRename(suffix: String): ClassicLoadBalancerSpec = + copy(moniker = moniker.withSuffix(suffix)) } data class ClassicLoadBalancerOverride( diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClusterSpec.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClusterSpec.kt index 80a1295ad2..c439b1cdeb 100644 --- a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClusterSpec.kt +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/ClusterSpec.kt @@ -9,6 +9,7 @@ import com.netflix.spinnaker.keel.api.DependencyType.TARGET_GROUP import com.netflix.spinnaker.keel.api.Dependent import com.netflix.spinnaker.keel.api.ExcludedFromDiff import com.netflix.spinnaker.keel.api.Locations +import com.netflix.spinnaker.keel.api.ManagedRolloutConfig import com.netflix.spinnaker.keel.api.Moniker import com.netflix.spinnaker.keel.api.RedBlack import com.netflix.spinnaker.keel.api.SubnetAwareLocations @@ -41,13 +42,16 @@ fun ClusterSpec.resolve(): Set = dependencies = resolveDependencies(it.name), health = resolveHealth(it.name), scaling = resolveScaling(it.name), - tags = defaults.tags + overrides[it.name]?.tags, + tags = resolveTags(it.name), artifactName = artifactName, artifactVersion = artifactVersion ) } .toSet() +fun ClusterSpec.resolveTags(region: String? = null) = + defaults.tags + (region?.let { overrides[it] }?.tags ?: emptyMap()) + private fun ClusterSpec.resolveLaunchConfiguration(region: SubnetAwareRegionSpec): LaunchConfiguration { val image = checkNotNull( overrides[region.name]?.launchConfiguration?.image @@ -83,47 +87,63 @@ private fun ClusterSpec.resolveLaunchConfiguration(region: SubnetAwareRegionSpec ?: defaults.launchConfiguration?.instanceMonitoring ?: LaunchConfiguration.DEFAULT_INSTANCE_MONITORING, ramdiskId = overrides[region.name]?.launchConfiguration?.ramdiskId - ?: defaults.launchConfiguration?.ramdiskId + ?: defaults.launchConfiguration?.ramdiskId, + requireIMDSv2 = (overrides[region.name]?.launchConfiguration?.instanceMetadataServiceVersion + ?: defaults.launchConfiguration?.instanceMetadataServiceVersion) == InstanceMetadataServiceVersion.V2 ) } -fun ClusterSpec.resolveCapacity(region: String): Capacity = - overrides[region]?.resolveCapacity() ?: defaults.resolveCapacity() ?: Capacity.DefaultCapacity(1, 1, 1) +fun ClusterSpec.resolveCapacity(region: String? = null): Capacity = + when (region) { + null -> defaults.resolveCapacity() ?: Capacity.DefaultCapacity(1, 1, 1) + else -> overrides[region]?.resolveCapacity() ?: defaults.resolveCapacity() ?: Capacity.DefaultCapacity(1, 1, 1) + } fun ServerGroupSpec.resolveCapacity(): Capacity? = - if (capacity == null) { - null - } else if (scaling.hasScalingPolicies()) { - Capacity.AutoScalingCapacity(capacity) - } else - Capacity.DefaultCapacity(capacity) + when { + capacity == null -> null + scaling.hasScalingPolicies() -> Capacity.AutoScalingCapacity(capacity) + else -> Capacity.DefaultCapacity(capacity) + } -private fun ClusterSpec.resolveScaling(region: String): Scaling = +fun ClusterSpec.resolveScaling(region: String? = null): Scaling = Scaling( - suspendedProcesses = defaults.scaling?.suspendedProcesses + overrides[region]?.scaling?.suspendedProcesses, + suspendedProcesses = defaults.scaling?.suspendedProcesses + + (region?.let { overrides[it] }?.scaling?.suspendedProcesses ?: emptySet()), targetTrackingPolicies = defaults.scaling?.targetTrackingPolicies + - overrides[region]?.scaling?.targetTrackingPolicies, - stepScalingPolicies = defaults.scaling?.stepScalingPolicies + overrides[region]?.scaling?.stepScalingPolicies + (region?.let { overrides[it] }?.scaling?.targetTrackingPolicies ?: emptySet()), + stepScalingPolicies = defaults.scaling?.stepScalingPolicies + + (region?.let { overrides[it] }?.scaling?.stepScalingPolicies ?: emptySet()) ) -private fun ClusterSpec.resolveDependencies(region: String): ClusterDependencies = +fun ClusterSpec.resolveDependencies(region: String? = null): ClusterDependencies = ClusterDependencies( - loadBalancerNames = defaults.dependencies?.loadBalancerNames + overrides[region]?.dependencies?.loadBalancerNames, - securityGroupNames = defaults.dependencies?.securityGroupNames + overrides[region]?.dependencies?.securityGroupNames, - targetGroups = defaults.dependencies?.targetGroups + overrides[region]?.dependencies?.targetGroups + loadBalancerNames = defaults.dependencies?.loadBalancerNames + + (region?.let { overrides[it] }?.dependencies?.loadBalancerNames ?: emptySet()), + securityGroupNames = defaults.dependencies?.securityGroupNames + + (region?.let { overrides[it] }?.dependencies?.securityGroupNames ?: emptySet()), + targetGroups = defaults.dependencies?.targetGroups + + (region?.let { overrides[it] }?.dependencies?.targetGroups ?: emptySet()) ) -private fun ClusterSpec.resolveHealth(region: String): Health { +fun ClusterSpec.resolveHealth(region: String? = null): Health { val default by lazy { Health() } return Health( - cooldown = overrides[region]?.health?.cooldown ?: defaults.health?.cooldown ?: default.cooldown, - warmup = overrides[region]?.health?.warmup ?: defaults.health?.warmup ?: default.warmup, - healthCheckType = overrides[region]?.health?.healthCheckType ?: defaults.health?.healthCheckType - ?: default.healthCheckType, - enabledMetrics = overrides[region]?.health?.enabledMetrics ?: defaults.health?.enabledMetrics - ?: default.enabledMetrics, - terminationPolicies = overrides[region]?.health?.terminationPolicies - ?: defaults.health?.terminationPolicies ?: default.terminationPolicies + cooldown = region?.let { overrides[it] }?.health?.cooldown + ?: defaults.health?.cooldown + ?: default.cooldown, + warmup = region?.let { overrides[it] }?.health?.warmup + ?: defaults.health?.warmup + ?: default.warmup, + healthCheckType = region?.let { overrides[it] }?.health?.healthCheckType + ?: defaults.health?.healthCheckType + ?: default.healthCheckType, + enabledMetrics = region?.let { overrides[it] }?.health?.enabledMetrics + ?: defaults.health?.enabledMetrics + ?: default.enabledMetrics, + terminationPolicies = region?.let { overrides[it] }?.health?.terminationPolicies + ?: defaults.health?.terminationPolicies + ?: default.terminationPolicies ) } @@ -131,6 +151,7 @@ data class ClusterSpec( override val moniker: Moniker, override val artifactReference: String? = null, val deployWith: ClusterDeployStrategy = RedBlack(), + val managedRollout: ManagedRolloutConfig = ManagedRolloutConfig(), override val locations: SubnetAwareLocations, private val _defaults: ServerGroupSpec, override val overrides: Map = emptyMap(), @@ -149,15 +170,17 @@ data class ClusterSpec( health: HealthSpec? = null, scaling: Scaling? = null, tags: Map? = null, - overrides: Map = emptyMap() + overrides: Map = emptyMap(), + managedRollout: ManagedRolloutConfig = ManagedRolloutConfig() ) : this( moniker, artifactReference, deployWith, + managedRollout, locations, ServerGroupSpec( launchConfiguration, - capacity , + capacity, dependencies, health, scaling, @@ -205,6 +228,9 @@ data class ClusterSpec( deps }.toSet() + override fun deepRename(suffix: String) = + copy(moniker = moniker.withSuffix(suffix)) + data class ServerGroupSpec( val launchConfiguration: LaunchConfigurationSpec? = null, val capacity: CapacitySpec? = null, diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/InstanceMetadataServiceVersion.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/InstanceMetadataServiceVersion.kt new file mode 100644 index 0000000000..cbec06f99a --- /dev/null +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/InstanceMetadataServiceVersion.kt @@ -0,0 +1,5 @@ +package com.netflix.spinnaker.keel.api.ec2 + +enum class InstanceMetadataServiceVersion { + V1, V2 +} diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/LaunchConfigurationSpec.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/LaunchConfigurationSpec.kt index af8961c684..e23ad415d1 100644 --- a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/LaunchConfigurationSpec.kt +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/LaunchConfigurationSpec.kt @@ -9,5 +9,6 @@ data class LaunchConfigurationSpec( val iamRole: String? = null, val keyPair: String? = null, val instanceMonitoring: Boolean? = null, - val ramdiskId: String? = null + val ramdiskId: String? = null, + val instanceMetadataServiceVersion: InstanceMetadataServiceVersion? = null ) diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/SecurityGroupSpec.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/SecurityGroupSpec.kt index 95364b917f..e8f89480d2 100644 --- a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/SecurityGroupSpec.kt +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/SecurityGroupSpec.kt @@ -29,6 +29,19 @@ data class SecurityGroupSpec( val overrides: Map = emptyMap() ) : Monikered, Locatable { override val id = "${locations.account}:$moniker" + + override fun deepRename(suffix: String): SecurityGroupSpec { + return copy( + moniker = moniker.withSuffix(suffix), + inboundRules = inboundRules.map { rule -> + if (rule is ReferenceRule && rule.name == moniker.toName()) { + rule.copy(name = "${rule.name}-$suffix") + } else { + rule + } + }.toSet() + ) + } } data class SecurityGroupOverride( diff --git a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/old/ApplicationLoadBalancerV1_1Spec.kt b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/old/ApplicationLoadBalancerV1_1Spec.kt index 2b8144f6b4..6b7d5f2cf7 100644 --- a/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/old/ApplicationLoadBalancerV1_1Spec.kt +++ b/keel-ec2-api/src/main/kotlin/com/netflix/spinnaker/keel/api/ec2/old/ApplicationLoadBalancerV1_1Spec.kt @@ -11,6 +11,8 @@ import com.netflix.spinnaker.keel.api.ec2.LoadBalancerType import com.netflix.spinnaker.keel.api.ec2.LoadBalancerType.APPLICATION import com.netflix.spinnaker.keel.api.schema.Optional import java.time.Duration +import java.util.Collections.emptySortedSet +import java.util.SortedSet data class ApplicationLoadBalancerV1_1Spec( override val moniker: Moniker, @@ -38,7 +40,7 @@ data class ApplicationLoadBalancerV1_1Spec( val protocol: String, val certificateArn: String?, val rules: Set = emptySet(), - val defaultActions: Set = emptySet() + val defaultActions: SortedSet = emptySortedSet() ) data class ApplicationLoadBalancerOverrideV1_1( diff --git a/keel-ec2-plugin/keel-ec2-plugin.gradle b/keel-ec2-plugin/keel-ec2-plugin.gradle index 34ca88131c..a468ba6fa6 100644 --- a/keel-ec2-plugin/keel-ec2-plugin.gradle +++ b/keel-ec2-plugin/keel-ec2-plugin.gradle @@ -2,6 +2,7 @@ dependencies { api(project(":keel-api")) api(project(":keel-ec2-api")) implementation(project(":keel-core")) // TODO: ideally not + implementation(project(":keel-optics")) implementation(project(":keel-clouddriver")) implementation(project(":keel-orca")) implementation(project(":keel-retrofit")) diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/config/EC2Config.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/config/EC2Config.kt index 361ca9c4eb..5eedd7b253 100644 --- a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/config/EC2Config.kt +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/config/EC2Config.kt @@ -21,19 +21,21 @@ import com.netflix.spinnaker.keel.api.plugins.Resolver import com.netflix.spinnaker.keel.api.support.EventPublisher import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.clouddriver.CloudDriverService -import com.netflix.spinnaker.keel.clouddriver.ImageService -import com.netflix.spinnaker.keel.ec2.resolvers.ImageResolver +import com.netflix.spinnaker.keel.ec2.resolvers.InstanceMetadataServiceResolver import com.netflix.spinnaker.keel.ec2.resource.ApplicationLoadBalancerHandler import com.netflix.spinnaker.keel.ec2.resource.BlockDeviceConfig import com.netflix.spinnaker.keel.ec2.resource.ClassicLoadBalancerHandler import com.netflix.spinnaker.keel.ec2.resource.ClusterHandler import com.netflix.spinnaker.keel.ec2.resource.SecurityGroupHandler +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder import com.netflix.spinnaker.keel.igor.artifact.ArtifactService import com.netflix.spinnaker.keel.orca.ClusterExportHelper import com.netflix.spinnaker.keel.orca.OrcaService +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import org.springframework.beans.factory.getBean import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty -import org.springframework.boot.context.properties.EnableConfigurationProperties +import org.springframework.context.ApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import java.time.Clock @@ -51,7 +53,7 @@ class EC2Config { normalizers: List>, eventPublisher: EventPublisher, clusterExportHelper: ClusterExportHelper, - blockDeviceConfig : BlockDeviceConfig, + blockDeviceConfig: BlockDeviceConfig, artifactService: ArtifactService ): ClusterHandler = ClusterHandler( @@ -115,4 +117,25 @@ class EC2Config { taskLauncher, normalizers ) + + @Bean + fun ec2InstanceMetadataServiceResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + applicationContext: ApplicationContext, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher + ): InstanceMetadataServiceResolver { + // This is necessary to avoid a circular bean dependency as Resolver instances (like we're creating here) + // get wired into ResourceHandlers, but here the Resolver needs a capability provided by the ResourceHandler. + val clusterHandler by lazy { applicationContext.getBean() } + + return InstanceMetadataServiceResolver( + dependentEnvironmentFinder, + // although it looks like this could be optimized to clusterHandler::current that will cause the bean to get + // created right away, which will blow up with a circular dependency + { clusterHandler.current(it) }, + featureRolloutRepository, + eventPublisher + ) + } } diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/_modelConversions.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/_modelConversions.kt index 4b7a66c6dc..0c53f68419 100644 --- a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/_modelConversions.kt +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/_modelConversions.kt @@ -20,7 +20,7 @@ internal fun ApplicationLoadBalancerModel.Rule.toEc2Api(): Rule = Rule( priority = priority, conditions = conditions?.map { it.toEc2Api() } ?: emptyList(), - actions = actions.map { it.toEc2Api() }, + actions = actions.map { it.toEc2Api() }.toSortedSet(), default = default ) diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/optics/ec2Optics.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/optics/ec2Optics.kt new file mode 100644 index 0000000000..edccab406f --- /dev/null +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/optics/ec2Optics.kt @@ -0,0 +1,60 @@ +package com.netflix.spinnaker.keel.ec2.optics + +import arrow.optics.Lens +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.SubnetAwareLocations +import com.netflix.spinnaker.keel.api.ec2.ClusterSpec +import com.netflix.spinnaker.keel.api.ec2.ClusterSpec.ServerGroupSpec +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion +import com.netflix.spinnaker.keel.api.ec2.LaunchConfigurationSpec +import com.netflix.spinnaker.keel.optics.monikerStackLens +import com.netflix.spinnaker.keel.optics.subnetAwareLocationsAccountLens + +/** + * Lens for getting/setting [ClusterSpec.moniker]. + */ +val clusterSpecMonikerLens: Lens = Lens( + get = ClusterSpec::moniker, + set = { spec, moniker -> spec.copy(moniker = moniker) } +) + +/** + * Composed lens for getting/setting the [Moniker.stack] of a [ClusterSpec]. + */ +val clusterSpecStackLens = clusterSpecMonikerLens + monikerStackLens + +/** + * Lens for getting/setting [ClusterSpec.locations]. + */ +val clusterSpecLocationsLens: Lens = Lens( + get = ClusterSpec::locations, + set = { spec, locations -> spec.copy(locations = locations) } +) + +/** + * Composed lens for getting/setting the [SubnetAwareLocations.account] of a [ClusterSpec]. + */ +val clusterSpecAccountLens = + clusterSpecLocationsLens + subnetAwareLocationsAccountLens + +val clusterSpecDefaultsLens: Lens = Lens( + get = ClusterSpec::defaults, + set = { spec, defaults -> spec.copy(_defaults = defaults) } +) + +val serverGroupSpecLaunchConfigurationSpecLens: Lens = Lens( + get = ServerGroupSpec::launchConfiguration, + set = { serverGroupSpec, launchConfigurationSpec -> serverGroupSpec.copy(launchConfiguration = launchConfigurationSpec) } +) + +val launchConfigurationSpecInstanceMetadataServiceVersionLens: Lens = + Lens( + get = { it?.instanceMetadataServiceVersion }, + set = { launchConfigurationSpec, instanceMetadataServiceVersion -> + launchConfigurationSpec?.copy( + instanceMetadataServiceVersion = instanceMetadataServiceVersion + ) ?: LaunchConfigurationSpec( + instanceMetadataServiceVersion = instanceMetadataServiceVersion + ) + } + ) diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/ApplicationLoadBalancerDefaultsResolver.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/ApplicationLoadBalancerDefaultsResolver.kt index c72f9897b2..d8e8a6239a 100644 --- a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/ApplicationLoadBalancerDefaultsResolver.kt +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/ApplicationLoadBalancerDefaultsResolver.kt @@ -7,6 +7,7 @@ import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec.Action.For import com.netflix.spinnaker.keel.api.ec2.EC2_APPLICATION_LOAD_BALANCER_V1_2 import com.netflix.spinnaker.keel.api.plugins.Resolver import org.springframework.stereotype.Component +import java.util.SortedSet @Component class ApplicationLoadBalancerDefaultsResolver : Resolver { @@ -16,8 +17,8 @@ class ApplicationLoadBalancerDefaultsResolver : Resolver = if (it.defaultActions.isEmpty()) { + sortedSetOf( ForwardAction( order = 1, targetGroupName = resource.spec.targetGroups.first().name diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolver.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolver.kt new file mode 100644 index 0000000000..3780eb6324 --- /dev/null +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolver.kt @@ -0,0 +1,61 @@ +package com.netflix.spinnaker.keel.ec2.resolvers + +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.ec2.ClusterSpec +import com.netflix.spinnaker.keel.api.ec2.EC2_CLUSTER_V1_1 +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion.V1 +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion.V2 +import com.netflix.spinnaker.keel.api.ec2.LaunchConfigurationSpec +import com.netflix.spinnaker.keel.api.ec2.ServerGroup +import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.ec2.optics.clusterSpecDefaultsLens +import com.netflix.spinnaker.keel.ec2.optics.launchConfigurationSpecInstanceMetadataServiceVersionLens +import com.netflix.spinnaker.keel.ec2.optics.serverGroupSpecLaunchConfigurationSpecLens +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder +import com.netflix.spinnaker.keel.optics.resourceSpecLens +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.rollout.RolloutAwareResolver + +/** + * Resolves the [LaunchConfigurationSpec.instanceMetadataServiceVersion] value if it is not explicitly specified. + * + * If the cluster already uses [InstanceMetadataServiceVersion.V2], or the setting has been applied to all clusters in + * dependent environments, and those environments are stable, this resolver will select v2. Otherwise it will select v1. + */ +class InstanceMetadataServiceResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + resourceToCurrentState: suspend (Resource) -> Map, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher +) : RolloutAwareResolver>( + dependentEnvironmentFinder, + resourceToCurrentState, + featureRolloutRepository, + eventPublisher +) { + override val supportedKind = EC2_CLUSTER_V1_1 + override val featureName = "imdsv2" + + override fun isAppliedTo(actualResource: Map) = + actualResource.values.all { it.launchConfiguration.requireIMDSv2 } + + override fun isExplicitlySpecified(resource: Resource) = + clusterResourceInstanceMetadataServiceVersionLens.get(resource) != null + + override fun activate(resource: Resource) = + clusterResourceInstanceMetadataServiceVersionLens.set(resource, V2) + + override fun deactivate(resource: Resource) = + clusterResourceInstanceMetadataServiceVersionLens.set(resource, V1) + + override val Map.exists: Boolean + get() = isNotEmpty() + + /** + * Composed lens that lets us set the deeply nested [LaunchConfigurationSpec.instanceMetadataServiceVersion] property + * directly on the [Resource]. + */ + private val clusterResourceInstanceMetadataServiceVersionLens = + resourceSpecLens() compose clusterSpecDefaultsLens compose serverGroupSpecLaunchConfigurationSpecLens compose launchConfigurationSpecInstanceMetadataServiceVersionLens +} diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ApplicationLoadBalancerHandler.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ApplicationLoadBalancerHandler.kt index d0d46b9e0d..10ccfa77b5 100644 --- a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ApplicationLoadBalancerHandler.kt +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ApplicationLoadBalancerHandler.kt @@ -13,8 +13,8 @@ import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancer import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec.Action import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec.ApplicationLoadBalancerOverride -import com.netflix.spinnaker.keel.api.ec2.EC2_CLOUD_PROVIDER import com.netflix.spinnaker.keel.api.ec2.EC2_APPLICATION_LOAD_BALANCER_V1_2 +import com.netflix.spinnaker.keel.api.ec2.EC2_CLOUD_PROVIDER import com.netflix.spinnaker.keel.api.ec2.LoadBalancerDependencies import com.netflix.spinnaker.keel.api.ec2.Location import com.netflix.spinnaker.keel.api.plugins.Resolver @@ -243,7 +243,7 @@ class ApplicationLoadBalancerHandler( ?.let { cloudDriverCache.certificateByArn(it.certificateArn).serverCertificateName }, // TODO: filtering out default rules seems wrong, see TODO in ApplicationLoadBalancerNormalizer rules = l.rules.filter { !it.default }.map { it.toEc2Api() }.toSet(), - defaultActions = l.defaultActions.map { it.toEc2Api() }.toSet() + defaultActions = l.defaultActions.map { it.toEc2Api() }.toSortedSet() ) }.toSet(), dependencies = LoadBalancerDependencies( @@ -306,13 +306,15 @@ class ApplicationLoadBalancerHandler( mapOf( "port" to it.port, "protocol" to it.protocol, - "rules" to it.rules, - "defaultActions" to it.defaultActions.sortedBy(Action::order).map { action -> + "rules" to it.rules.map { rule -> mapOf( - "type" to action.type, - "order" to action.order, - ) + action.toOrcaRequest() + "priority" to rule.priority, + "conditions" to rule.conditions, + "default" to rule.default, + "actions" to rule.actions.map(Action::toOrcaRequest) + ) }, + "defaultActions" to it.defaultActions.sortedBy(Action::order).map(Action::toOrcaRequest), ).run { it.certificate?.let { certificateName -> this + mapOf( diff --git a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandler.kt b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandler.kt index 1fd10919f8..6fd9ef95e4 100644 --- a/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandler.kt +++ b/keel-ec2-plugin/src/main/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandler.kt @@ -2,6 +2,9 @@ package com.netflix.spinnaker.keel.ec2.resource import com.fasterxml.jackson.module.kotlin.convertValue import com.netflix.rocket.api.artifact.internal.debian.DebianArtifactParser +import com.netflix.spinnaker.keel.actuation.RolloutLocation +import com.netflix.spinnaker.keel.actuation.RolloutTarget +import com.netflix.spinnaker.keel.api.ClusterDeployStrategy import com.netflix.spinnaker.keel.api.Exportable import com.netflix.spinnaker.keel.api.Moniker import com.netflix.spinnaker.keel.api.NoStrategy @@ -11,7 +14,6 @@ import com.netflix.spinnaker.keel.api.ResourceDiff import com.netflix.spinnaker.keel.api.SubnetAwareLocations import com.netflix.spinnaker.keel.api.SubnetAwareRegionSpec import com.netflix.spinnaker.keel.api.actuation.Job -import com.netflix.spinnaker.keel.api.actuation.Task import com.netflix.spinnaker.keel.api.actuation.TaskLauncher import com.netflix.spinnaker.keel.api.artifacts.ArtifactStatus.UNKNOWN import com.netflix.spinnaker.keel.api.artifacts.DEBIAN @@ -37,6 +39,9 @@ import com.netflix.spinnaker.keel.api.ec2.MetricDimension import com.netflix.spinnaker.keel.api.ec2.PredefinedMetricSpecification import com.netflix.spinnaker.keel.api.ec2.Scaling import com.netflix.spinnaker.keel.api.ec2.ScalingProcess +import com.netflix.spinnaker.keel.api.ec2.ScalingProcess.AddToLoadBalancer +import com.netflix.spinnaker.keel.api.ec2.ScalingProcess.Launch +import com.netflix.spinnaker.keel.api.ec2.ScalingProcess.Terminate import com.netflix.spinnaker.keel.api.ec2.ServerGroup import com.netflix.spinnaker.keel.api.ec2.ServerGroup.ActiveServerGroupImage import com.netflix.spinnaker.keel.api.ec2.ServerGroup.Health @@ -49,6 +54,10 @@ import com.netflix.spinnaker.keel.api.ec2.byRegion import com.netflix.spinnaker.keel.api.ec2.hasScalingPolicies import com.netflix.spinnaker.keel.api.ec2.resolve import com.netflix.spinnaker.keel.api.ec2.resolveCapacity +import com.netflix.spinnaker.keel.api.ec2.resolveDependencies +import com.netflix.spinnaker.keel.api.ec2.resolveHealth +import com.netflix.spinnaker.keel.api.ec2.resolveScaling +import com.netflix.spinnaker.keel.api.ec2.resolveTags import com.netflix.spinnaker.keel.api.plugins.BaseClusterHandler import com.netflix.spinnaker.keel.api.plugins.CurrentImages import com.netflix.spinnaker.keel.api.plugins.ImageInRegion @@ -66,29 +75,29 @@ import com.netflix.spinnaker.keel.clouddriver.model.MetricDimensionModel import com.netflix.spinnaker.keel.clouddriver.model.PredefinedMetricSpecificationModel import com.netflix.spinnaker.keel.clouddriver.model.ScalingPolicy import com.netflix.spinnaker.keel.clouddriver.model.StepAdjustmentModel +import com.netflix.spinnaker.keel.clouddriver.model.SuspendedProcess import com.netflix.spinnaker.keel.clouddriver.model.subnet +import com.netflix.spinnaker.keel.clouddriver.model.toActive +import com.netflix.spinnaker.keel.core.api.DEFAULT_SERVICE_ACCOUNT import com.netflix.spinnaker.keel.core.orcaClusterMoniker import com.netflix.spinnaker.keel.core.parseMoniker import com.netflix.spinnaker.keel.core.serverGroup -import com.netflix.spinnaker.keel.diff.toIndividualDiffs +import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.ec2.MissingAppVersionException import com.netflix.spinnaker.keel.ec2.toEc2Api import com.netflix.spinnaker.keel.events.ResourceHealthEvent import com.netflix.spinnaker.keel.exceptions.ActiveServerGroupsException import com.netflix.spinnaker.keel.exceptions.ExportError +import com.netflix.spinnaker.keel.filterNotNullValues import com.netflix.spinnaker.keel.igor.artifact.ArtifactService import com.netflix.spinnaker.keel.orca.ClusterExportHelper import com.netflix.spinnaker.keel.orca.OrcaService -import com.netflix.spinnaker.keel.orca.dependsOn -import com.netflix.spinnaker.keel.orca.restrictedExecutionWindow import com.netflix.spinnaker.keel.orca.toOrcaJobProperties -import com.netflix.spinnaker.keel.orca.waitStage import com.netflix.spinnaker.keel.parseAppVersion import com.netflix.spinnaker.keel.parseAppVersionOrNull import com.netflix.spinnaker.keel.plugin.buildSpecFromDiff import com.netflix.spinnaker.keel.retrofit.isNotFound import com.netflix.spinnaker.keel.serialization.configuredObjectMapper -import kotlinx.coroutines.Deferred import kotlinx.coroutines.async import kotlinx.coroutines.coroutineScope import kotlinx.coroutines.runBlocking @@ -173,220 +182,38 @@ class ClusterHandler( CurrentImages(supportedKind.kind, images, resource.id) } - override suspend fun upsert( - resource: Resource, - resourceDiff: ResourceDiff> - ): List = - coroutineScope { - val diffs = resourceDiff - .toIndividualDiffs() - .filter { diff -> diff.hasChanges() } - - val deferred: MutableList> = mutableListOf() - val modifyDiffs = diffs.filter { it.isCapacityOrAutoScalingOnly() || it.isEnabledOnly() } - val createDiffs = diffs - modifyDiffs - - if (modifyDiffs.isNotEmpty()) { - deferred.addAll( - modifyInPlace(resource, modifyDiffs) - ) - } + override fun ResourceDiff.moniker(): Moniker = + desired.moniker - val version = diffs.first().desired.launchConfiguration.appVersion - ?: throw MissingAppVersionException(resource.id) + override fun ServerGroup.moniker(): Moniker = + moniker - if (resource.spec.deployWith.isStaggered && createDiffs.isNotEmpty()) { - val tasks = upsertStaggered(resource, createDiffs, version) - return@coroutineScope tasks + deferred.map { it.await() } - } - - deferred.addAll( - upsertUnstaggered(resource, createDiffs, version) - ) - - if (createDiffs.isNotEmpty()) { - notifyArtifactDeploying(resource, version) - } + override fun Resource.isStaggeredDeploy(): Boolean = + spec.deployWith.isStaggered - return@coroutineScope deferred.map { it.await() } - } - - private suspend fun modifyInPlace( - resource: Resource, - diffs: List> - ): List> = - coroutineScope { - diffs.mapNotNull { diff -> - val (job, description) = when { - diff.isCapacityOnly() -> listOf(diff.resizeServerGroupJob()) to "Modify capacity of server group ${diff.desired.moniker} in " + - "${diff.desired.location.account}/${diff.desired.location.region}" - diff.isAutoScalingOnly() -> diff.modifyScalingPolicyJob() to "Modify auto-scaling of server group ${diff.desired.moniker} in " + - "${diff.desired.location.account}/${diff.desired.location.region}" - diff.isEnabledOnly() -> { - val appVersion = diff.desired.launchConfiguration.appVersion - ?: throw MissingAppVersionException(resource.id) - val job = diff.disableOtherServerGroupJob(resource, appVersion) - listOf(job) to "Disable extra active server group ${job["asgName"]} in " + - "${diff.desired.location.account}/${diff.desired.location.region}" - } - else -> listOf(diff.resizeServerGroupJob()) + diff.modifyScalingPolicyJob(1) to "Modify capacity and auto-scaling of server group ${diff.desired.moniker} in " + - "${diff.desired.location.account}/${diff.desired.location.region}" - } - - if (job.isEmpty()) { - null - } else { - log.info("Modifying server group in-place using task: {}", job) - - async { - taskLauncher.submitJob( - resource = resource, - description = description, - correlationId = "${resource.id}:${diff.desired.location.region}", - stages = job - ) - } - } - } - } + override fun Resource.isManagedRollout(): Boolean = + spec.managedRollout.enabled - private suspend fun upsertUnstaggered( - resource: Resource, - diffs: List>, - version: String, - dependsOn: String? = null - ): List> = - coroutineScope { - diffs.mapNotNull { diff -> - val stages: MutableList> = mutableListOf() - var refId = 0 + override fun Resource.regions(): List = + spec.locations.regions.map { it.name } - if (dependsOn != null) { - stages.add(dependsOn(dependsOn)) - refId++ - } + override fun Resource.moniker(): Moniker = + spec.moniker - stages.add(diff.createServerGroupJob(refId, resource)) - refId++ + override fun getDesiredAccount(diff: ResourceDiff): String = + diff.desired.location.account - if (diff.shouldDeployAndModifyScalingPolicies()) { - stages.addAll(diff.modifyScalingPolicyJob(refId)) - } + override fun Resource.account(): String = + spec.locations.account - if (stages.isEmpty()) { - null - } else { - log.info("Upsert server group using task: {}", stages) - - async { - taskLauncher.submitJob( - resource = resource, - description = "Deploy $version to server group ${diff.desired.moniker} in " + - "${diff.desired.location.account}/${diff.desired.location.region}", - correlationId = "${resource.id}:${diff.desired.location.region}", - stages = stages - ) - } - } - } - } + override fun correlationId(resource: Resource, diff: ResourceDiff): String = + "${resource.id}:${diff.desired.location.region}" - private suspend fun upsertStaggered( - resource: Resource, - diffs: List>, - version: String - ): List = - coroutineScope { - val regionalDiffs = diffs.associateBy { it.desired.location.region } - val tasks: MutableList = mutableListOf() - var priorExecutionId: String? = null - val staggeredRegions = resource.spec.deployWith.stagger.map { - it.region - } - .toSet() + override fun ResourceDiff.version(resource: Resource): String = + desired.launchConfiguration.appVersion ?: throw MissingAppVersionException(resource.id) - // If any, these are deployed in-parallel after all regions with a defined stagger - val unstaggeredRegions = regionalDiffs.keys - staggeredRegions - - for (stagger in resource.spec.deployWith.stagger) { - if (!regionalDiffs.containsKey(stagger.region)) { - continue - } - - val diff = regionalDiffs[stagger.region] as ResourceDiff - val stages: MutableList> = mutableListOf() - var refId = 0 - - /** - * Given regions staggered as [A, B, C], this makes the execution of the B - * `createServerGroup` task dependent on the A task, and C dependent on B, - * while preserving the unstaggered behavior of an orca task per region. - */ - if (priorExecutionId != null) { - stages.add(dependsOn(priorExecutionId)) - refId++ - } - - val stage = diff.createServerGroupJob(refId, resource).toMutableMap() - - refId++ - - /** - * If regions are staggered by time windows, add a `restrictedExecutionWindow` - * to the `createServerGroup` stage. - */ - if (stagger.hours != null) { - val hours = stagger.hours!!.split("-").map { it.toInt() } - stage.putAll(restrictedExecutionWindow(hours[0], hours[1])) - } - - stages.add(stage) - - if (diff.shouldDeployAndModifyScalingPolicies()) { - stages.addAll(diff.modifyScalingPolicyJob(refId)) - } - - if (stagger.pauseTime != null) { - stages.add( - waitStage(stagger.pauseTime!!, stages.size) - ) - } - - val deferred = async { - taskLauncher.submitJob( - resource = resource, - description = "Deploy $version to server group ${diff.desired.moniker} in " + - "${diff.desired.location.account}/${diff.desired.location.region}", - correlationId = "${resource.id}:${diff.desired.location.region}", - stages = stages - ) - } - - notifyArtifactDeploying(resource, version) - - val task = deferred.await() - priorExecutionId = task.id - tasks.add(task) - } - - /** - * `ClusterSpec.stagger` doesn't have to define a stagger for all of the regions clusters. - * If a cluster deploys into 4 regions [A, B, C, D] but only defines a stagger for [A, B], - * [C, D] will deploy in parallel after the completion of B and any pauseTime it defines. - */ - if (unstaggeredRegions.isNotEmpty()) { - val unstaggeredDiffs = regionalDiffs - .filter { unstaggeredRegions.contains(it.key) } - .map { it.value } - - tasks.addAll( - upsertUnstaggered(resource, unstaggeredDiffs, version, priorExecutionId) - .map { it.await() } - ) - } - - return@coroutineScope tasks - } + override fun Resource.getDeployWith(): ClusterDeployStrategy = + spec.deployWith override suspend fun export(exportable: Exportable): ClusterSpec { // Get existing infrastructure @@ -558,14 +385,9 @@ class ClusterHandler( } override suspend fun actuationInProgress(resource: Resource) = - resource - .spec - .locations - .regions - .map { it.name } - .any { region -> + generateCorrelationIds(resource).any { correlationId -> orcaService - .getCorrelatedExecutions("${resource.id}:$region") + .getCorrelatedExecutions(correlationId) .isNotEmpty() } @@ -585,13 +407,39 @@ class ClusterHandler( /** * @return `true` if the only changes in the diff are to capacity. */ - private fun ResourceDiff.isCapacityOnly(): Boolean = - current != null && affectedRootPropertyTypes.all { it == Capacity::class.java } + override fun ResourceDiff.isCapacityOnly(): Boolean = + current != null && affectedRootPropertyTypes.all { + it == Capacity::class.java || it == DefaultCapacity::class.java + } + + override fun ResourceDiff.isSuspendPropertiesAndCapacityOnly() = + affectedRootPropertyTypes.all { + it == Scaling::class.java || it == Capacity::class.java || it == DefaultCapacity::class.java + } + && sameSuspendedProcesses(current?.scaling, desired.scaling) + + /** + * Clusters have different scaling config when disabled. + * This function checks if the scaling is the same minus that expected difference. + */ + private fun sameSuspendedProcesses( + current: Scaling?, + desired: Scaling + ): Boolean { + if (current == null){ + return false + } + + val disabledProcesses = setOf(Launch, AddToLoadBalancer, Terminate) + val currentMinusDisabled = current.copy(suspendedProcesses = current.suspendedProcesses - disabledProcesses) + + return !DefaultResourceDiff(desired, currentMinusDisabled).hasChanges() + } /** * @return `true` if the only changes in the diff are to scaling. */ - private fun ResourceDiff.isAutoScalingOnly(): Boolean = + override fun ResourceDiff.isAutoScalingOnly(): Boolean = current != null && affectedRootPropertyTypes.any { it == Scaling::class.java } && affectedRootPropertyTypes.all { it == Capacity::class.java || it == Scaling::class.java } && @@ -607,17 +455,14 @@ class ClusterHandler( affectedRootPropertyNames.all { it == "onlyEnabledServerGroup" } && current!!.onlyEnabledServerGroup != desired.onlyEnabledServerGroup - /** - * @return `true` if [current] doesn't exist and desired includes a scaling policy. - */ - private fun ResourceDiff.shouldDeployAndModifyScalingPolicies(): Boolean = - (current == null && desired.scaling.hasScalingPolicies()) || - (current != null && !isCapacityOrAutoScalingOnly() && hasScalingPolicyDiff()) + override fun ResourceDiff.hasScalingPolicies(): Boolean = + desired.scaling.hasScalingPolicies() + /** * @return `true` if [current] exists and the diff impacts scaling policies or capacity only. */ - private fun ResourceDiff.isCapacityOrAutoScalingOnly(): Boolean = + override fun ResourceDiff.isCapacityOrAutoScalingOnly(): Boolean = current != null && affectedRootPropertyTypes.all { it == Capacity::class.java || it == Scaling::class.java } && current!!.scaling.suspendedProcesses == desired.scaling.suspendedProcesses @@ -625,14 +470,22 @@ class ClusterHandler( /** * @return `true` if [current] exists and the diff includes a scaling policy change. */ - private fun ResourceDiff.hasScalingPolicyDiff(): Boolean = + override fun ResourceDiff.hasScalingPolicyDiff(): Boolean = current != null && affectedRootPropertyTypes.contains(Scaling::class.java) && ( current!!.scaling.targetTrackingPolicies != desired.scaling.targetTrackingPolicies || current!!.scaling.stepScalingPolicies != desired.scaling.stepScalingPolicies ) - private fun ResourceDiff.disableOtherServerGroupJob(resource: Resource, desiredVersion: String): Job { + override suspend fun getServerGroupsByRegion(resource: Resource): Map> = + getExistingServerGroupsByRegion(resource) + .mapValues { regionalList -> + regionalList.value.map { serverGroup: ClouddriverServerGroup -> + serverGroup.toActive(resource.spec.locations.account).toServerGroup() + } + } + + override fun ResourceDiff.disableOtherServerGroupJob(resource: Resource, desiredVersion: String): Job { val current = requireNotNull(current) { "Current server group must not be null when generating a disable job" } @@ -677,8 +530,8 @@ class ClusterHandler( ) } - private fun ResourceDiff.createServerGroupJob(refId: Int, resource: Resource): Job = - createServerGroupJobBase(refId) + resource.spec.deployWith.toOrcaJobProperties("Amazon") + + override fun ResourceDiff.upsertServerGroupJob(resource: Resource, startingRefId: Int, version: String?): Job = + createServerGroupJobBase(startingRefId) + resource.spec.deployWith.toOrcaJobProperties("Amazon") + mapOf("metadata" to mapOf("resource" to resource.id)) private fun ResourceDiff.createServerGroupJobBase(startingRefId: Int = 0): Job = @@ -757,26 +610,159 @@ class ClusterHandler( } } - private fun ResourceDiff.resizeServerGroupJob(): Job { - val current = requireNotNull(current) { - "Current server group must not be null when generating a resize job" - } - return mapOf( + override fun Resource.upsertServerGroupManagedRolloutJob( + diffs: List>, + version: String? + ): Job = + mapOf( "refId" to "1", - "type" to "resizeServerGroup", - "capacity" to mapOf( - "min" to desired.capacity.min, - "max" to desired.capacity.max, - "desired" to desired.capacity.desired + "type" to "managedRollout", + "input" to mapOf( + "selectionStrategy" to spec.managedRollout?.selectionStrategy, + "targets" to spec.generateRolloutTargets(diffs), + "clusterDefinitions" to listOf(toManagedRolloutClusterDefinition(diffs)) ), - "cloudProvider" to EC2_CLOUD_PROVIDER, + "reason" to "Diff detected at ${clock.instant().iso()}", + ) + + // todo eb: individual server group deploy strategy? + // todo eb: scaling policies? + private fun Resource.toManagedRolloutClusterDefinition(diffs: List>) = + with(spec) { + val dependencies = resolveDependencies() + + mutableMapOf( + "application" to moniker.app, + "stack" to moniker.stack, + "freeFormDetails" to moniker.detail, + "loadBalancers" to dependencies.loadBalancerNames, + "targetGroups" to dependencies.targetGroups, + "securityGroups" to dependencies.securityGroupNames, + "targetHealthyDeployPercentage" to 100, // TODO: any reason to do otherwise? + "tags" to resolveTags(), + "useAmiBlockDeviceMappings" to false, // TODO: any reason to do otherwise? + "copySourceCustomBlockDeviceMappings" to false, // TODO: any reason to do otherwise? + "virtualizationType" to "hvm", // TODO: any reason to do otherwise? + "moniker" to moniker.orcaClusterMoniker, + "suspendedProcesses" to resolveScaling().suspendedProcesses, + "reason" to "Diff detected at ${clock.instant().iso()}", + "cloudProvider" to EC2_CLOUD_PROVIDER, + "account" to locations.account, + "subnetType" to diffs.map { it.desired.location.subnet }.first(), //todo eb: why? yes? + "capacity" to resolveCapacity(), + ) + resolveHealth().toMapForOrca() + + mapOf("overrides" to buildOverrides(diffs)) + + spec.deployWith.toOrcaJobProperties("Amazon") + } + + override fun ResourceDiff.rollbackServerGroupJob( + resource: Resource, + rollbackServerGroup: ServerGroup + ): Job = + mutableMapOf( + "rollbackType" to "EXPLICIT", + "rollbackContext" to mapOf( + "rollbackServerGroupName" to current?.moniker?.serverGroup, + "restoreServerGroupName" to rollbackServerGroup.moniker.serverGroup, + "targetHealthyRollbackPercentage" to 100, + "delayBeforeDisableSeconds" to 0 + ), + "targetGroups" to desired.dependencies.targetGroups, + "securityGroups" to desired.dependencies.securityGroupNames, + "platformHealthOnlyShowOverride" to false, + "reason" to "rollin' back", + "type" to "rollbackServerGroup", + "moniker" to current?.moniker?.orcaClusterMoniker, + "region" to desired.location.region, "credentials" to desired.location.account, - "moniker" to current.moniker.orcaClusterMoniker, - "region" to current.location.region, - "serverGroupName" to current.name + "cloudProvider" to EC2_CLOUD_PROVIDER, + "user" to DEFAULT_SERVICE_ACCOUNT ) + + fun Resource.buildOverrides(diffs: List>): Map { + val overrides: MutableMap = spec.overrides.toMutableMap() + val launchConfigOverrides = diffs.generateLaunchConfigOverrides() + val capacityForScalingOverrides = generateCapacityOverridesIfAutoscaling(diffs) + diffs.forEach { diff -> + val region = getDesiredRegion(diff) + val existingOverride: MutableMap = mapper.convertValue(overrides[region] ?: mutableMapOf()) + val availabilityZones = mutableMapOf("availabilityZones" to mutableMapOf(region to diff.desired.location.availabilityZones)) + val regionalLaunchConfig: MutableMap = mapper.convertValue(launchConfigOverrides[region] ?: mutableMapOf()) + val capacity: MutableMap = mapper.convertValue(capacityForScalingOverrides[region] ?: mutableMapOf()) + overrides[region] = existingOverride + availabilityZones + regionalLaunchConfig + capacity + } + return overrides } + private fun Health.toMapForOrca() = + mutableMapOf( + "cooldown" to cooldown.seconds, + "enabledMetrics" to enabledMetrics, + "healthCheckType" to healthCheckType.name, + "healthCheckGracePeriod" to warmup.seconds, + "terminationPolicies" to terminationPolicies.map(TerminationPolicy::name), + ) + + private fun LaunchConfiguration.toMapForOrca(): Map = + mutableMapOf( + "instanceMonitoring" to instanceMonitoring, + "ebsOptimized" to ebsOptimized, + "iamRole" to iamRole, + "amiName" to imageId, + "keyPair" to keyPair, + "instanceType" to instanceType, + "requireIMDSv2" to requireIMDSv2, + ) + + /** + * Creates a map of region to launch config settings so that the ami name is resolved by region. + */ + private fun List>.generateLaunchConfigOverrides(): Map> = + associate { getDesiredRegion(it) to it.desired.launchConfiguration.toMapForOrca() } + + // generates targets only for diff clusters + private fun ClusterSpec.generateRolloutTargets(diffs: List>): List> = + diffs + .map { + mapper.convertValue( + RolloutTarget( + EC2_CLOUD_PROVIDER, + RolloutLocation( + locations.account, + getDesiredRegion(it), + emptyList() // todo eb: should this be availability zones? is this only if you want to stagger deployment by availibility? + ) + ) + ) + } + + /** + * If the cluster has autoscaling, we need to generate the right desired capacity + * so that we don't drastically downsize it. + * Here we generate the capacity in an override block to hand to the managed rollout stage. + * + * @return an override block keyed by region + */ + private fun Resource.generateCapacityOverridesIfAutoscaling( + diffs: List> + ): Map { + val defaultCapacity = spec.resolveCapacity() + return diffs.associate { diff -> + val capacity: Map? = when(diff.desired.capacity) { + is DefaultCapacity -> null + is AutoScalingCapacity -> mapOf ( + "capacity" to DefaultCapacity( + min = defaultCapacity.min, + max = defaultCapacity.max, + desired = diff.resolveDesiredCapacity() + ) + ) + } + getDesiredRegion(diff) to capacity + }.filterNotNullValues() + } + + /** * For server groups with scaling policies, the [ClusterSpec] will not include a desired value. so we use the higher * of the desired value the server group we're replacing uses, or the min. This means we won't catastrophically down- @@ -790,6 +776,26 @@ class ClusterHandler( is AutoScalingCapacity -> maxOf(current?.capacity?.desired ?: 0, desired.capacity.min) } + override fun ResourceDiff.resizeServerGroupJob(): Job { + val current = requireNotNull(current) { + "Current server group must not be null when generating a resize job" + } + return mapOf( + "refId" to "1", + "type" to "resizeServerGroup", + "capacity" to mapOf( + "min" to desired.capacity.min, + "max" to desired.capacity.max, + "desired" to desired.capacity.desired + ), + "cloudProvider" to EC2_CLOUD_PROVIDER, + "credentials" to desired.location.account, + "moniker" to current.moniker.orcaClusterMoniker, + "region" to current.location.region, + "serverGroupName" to current.name + ) + } + /** * @return list of stages to remove or create scaling policies in-place on the * current serverGroup. @@ -797,7 +803,7 @@ class ClusterHandler( * Scaling policies are treated as immutable by keel once applied. If an existing * policy is modified, it will be deleted and reapplied via a single task. */ - private fun ResourceDiff.modifyScalingPolicyJob(startingRefId: Int = 0): List { + override fun ResourceDiff.modifyScalingPolicyJob(startingRefId: Int): List { var (refId, stages) = toDeletePolicyJob(startingRefId) val newTargetPolicies = when (current) { null -> desired.scaling.targetTrackingPolicies @@ -845,11 +851,12 @@ class ClusterHandler( stages.addAll( policyNamesToRemove .map { + refId++ mapOf( - "refId" to "${refId + 1}", + "refId" to refId.toString(), "requisiteStageRefIds" to when (refId) { - 0 -> emptyList() - else -> listOf("$refId") + 0, 1 -> listOf() + else -> listOf((refId - 1).toString()) }, "type" to "deleteScalingPolicy", "policyName" to it, @@ -858,9 +865,7 @@ class ClusterHandler( "moniker" to current.moniker.orcaClusterMoniker, "region" to current.location.region, "serverGroupName" to current.moniker.serverGroup - ).also { - refId++ - } + ) } ) @@ -1089,7 +1094,8 @@ class ClusterHandler( ?: launchTemplateData!!.monitoring.enabled, // Because launchConfig.ramdiskId can be null, need to do launchTemplateData?. instead of launchTemplateData!! - ramdiskId = (launchConfig?.ramdiskId ?: launchTemplateData?.ramDiskId).orNull() + ramdiskId = (launchConfig?.ramdiskId ?: launchTemplateData?.ramDiskId).orNull(), + requireIMDSv2 = launchTemplateData?.metadataOptions?.get("httpTokens") == "required" ), buildInfo = buildInfo?.toEc2Api(), capacity = capacity.let { diff --git a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolverTests.kt b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolverTests.kt new file mode 100644 index 0000000000..0f4aac8ff9 --- /dev/null +++ b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resolvers/InstanceMetadataServiceResolverTests.kt @@ -0,0 +1,112 @@ +package com.netflix.spinnaker.keel.ec2.resolvers + +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.SubnetAwareLocations +import com.netflix.spinnaker.keel.api.SubnetAwareRegionSpec +import com.netflix.spinnaker.keel.api.ec2.ClusterSpec +import com.netflix.spinnaker.keel.api.ec2.EC2_CLUSTER_V1_1 +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion.V1 +import com.netflix.spinnaker.keel.api.ec2.InstanceMetadataServiceVersion.V2 +import com.netflix.spinnaker.keel.api.ec2.LaunchConfigurationSpec +import com.netflix.spinnaker.keel.api.ec2.Location +import com.netflix.spinnaker.keel.api.ec2.ServerGroup +import com.netflix.spinnaker.keel.api.ec2.ServerGroup.LaunchConfiguration +import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.ec2.optics.clusterSpecAccountLens +import com.netflix.spinnaker.keel.ec2.optics.clusterSpecStackLens +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.rollout.RolloutAwareResolverTests +import strikt.api.* +import strikt.assertions.* + +internal class InstanceMetadataServiceResolverTests : + RolloutAwareResolverTests, InstanceMetadataServiceResolver>() { + + override fun createResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + resourceToCurrentState: suspend (Resource) -> Map, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher + ) = InstanceMetadataServiceResolver( + dependentEnvironmentFinder, + resourceToCurrentState, + featureRolloutRepository, + eventPublisher + ) + + override val kind = EC2_CLUSTER_V1_1.kind + + override val spec = ClusterSpec( + moniker = Moniker( + app = "fnord" + ), + locations = SubnetAwareLocations( + account = "prod", + subnet = "internal", + regions = setOf( + SubnetAwareRegionSpec(name = "us-west-2") + ) + ) + ) + + override val previousEnvironmentSpec = + clusterSpecAccountLens.set(clusterSpecStackLens.set(spec, "test"), "test") + + override val nonExistentResolvedResource = emptyMap() + + override fun ClusterSpec.withFeatureApplied() = withInstanceMetadataServiceVersion(V2) + + override fun ClusterSpec.withFeatureNotApplied() = withInstanceMetadataServiceVersion(V1) + + override fun Assertion.Builder>.featureIsApplied() = + apply { + instanceMetadataServiceVersion isEqualTo V2 + } + + override fun Assertion.Builder>.featureIsNotApplied() = + apply { + instanceMetadataServiceVersion isEqualTo V1 + } + + private val Assertion.Builder>.instanceMetadataServiceVersion: Assertion.Builder + get() = get(Resource::spec) + .get(ClusterSpec::defaults) + .get(ClusterSpec.ServerGroupSpec::launchConfiguration) + .isNotNull() + .get(LaunchConfigurationSpec::instanceMetadataServiceVersion) + + private fun ClusterSpec.withInstanceMetadataServiceVersion(version: InstanceMetadataServiceVersion?) = + copy( + _defaults = defaults.copy( + launchConfiguration = LaunchConfigurationSpec( + instanceMetadataServiceVersion = version + ) + ) + ) + + override fun ClusterSpec.toResolvedType(featureActive: Boolean) = + locations.regions.map(SubnetAwareRegionSpec::name).associateWith { region -> + ServerGroup( + name = "${moniker}-v001", + location = Location( + locations.account, + region, + locations.vpc!!, + locations.subnet!!, + "abc".map { "${region}$it" }.toSet() + ), + launchConfiguration = LaunchConfiguration( + imageId = "ami-001", + appVersion = "$application-v001", + baseImageName = "bionic-v001", + instanceType = "m5.xl", + iamRole = "${application}Role", + keyPair = "${application}KeyPair", + requireIMDSv2 = featureActive + ) + ) + } +} diff --git a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandlerTests.kt b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandlerTests.kt index 9dea5cd95d..bbb1f6a03b 100644 --- a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandlerTests.kt +++ b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/ClusterHandlerTests.kt @@ -40,6 +40,7 @@ import com.netflix.spinnaker.keel.clouddriver.model.Network import com.netflix.spinnaker.keel.clouddriver.model.SecurityGroupSummary import com.netflix.spinnaker.keel.clouddriver.model.ServerGroupCollection import com.netflix.spinnaker.keel.clouddriver.model.Subnet +import com.netflix.spinnaker.keel.core.orcaClusterMoniker import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.igor.artifact.ArtifactService import com.netflix.spinnaker.keel.model.OrchestrationRequest @@ -71,7 +72,9 @@ import io.mockk.coVerify as verify @Suppress("MemberVisibilityCanBePrivate") internal class ClusterHandlerTests : JUnit5Minutests { - val cloudDriverService = mockk() + val cloudDriverService = mockk() { + every { listServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection("test", emptySet()) + } val cloudDriverCache = mockk() val orcaService = mockk() val normalizers = emptyList>() @@ -263,6 +266,8 @@ internal class ClusterHandlerTests : JUnit5Minutests { every { springEnv.getProperty("keel.plugins.ec2.volumes.account-overrides.test.volume-type", String::class.java) } returns null + + every { cloudDriverService.listServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection("test", emptySet()) } after { @@ -341,6 +346,7 @@ internal class ClusterHandlerTests : JUnit5Minutests { } test("annealing a diff creates staggered server groups with scaling policies upserted in the same orchestration") { + every { cloudDriverService.listServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection("test", emptySet()) val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } @@ -1139,13 +1145,13 @@ internal class ClusterHandlerTests : JUnit5Minutests { mapOf( "type" to "destroyServerGroup", "asgName" to it.name, - "moniker" to it.moniker, + "moniker" to it.moniker.orcaClusterMoniker, "serverGroupName" to it.name, "region" to it.region, "credentials" to allServerGroups.accountName, "cloudProvider" to "aws", "user" to resource.serviceAccount, - "completeOtherBranchesThenFail" to false, + "completeOtherBranchesThenFail" to true, "continuePipeline" to false, "failPipeline" to false, ) diff --git a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/Ec2BaseClusterHandlerTests.kt b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/Ec2BaseClusterHandlerTests.kt index a07b204ec1..29df543843 100644 --- a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/Ec2BaseClusterHandlerTests.kt +++ b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/Ec2BaseClusterHandlerTests.kt @@ -1,8 +1,13 @@ package com.netflix.spinnaker.keel.ec2.resource +import com.netflix.spinnaker.keel.api.Highlander +import com.netflix.spinnaker.keel.api.ManagedRolloutConfig import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.RedBlack import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.ResourceDiff +import com.netflix.spinnaker.keel.api.SelectionStrategy +import com.netflix.spinnaker.keel.api.StaggeredRegion import com.netflix.spinnaker.keel.api.SubnetAwareLocations import com.netflix.spinnaker.keel.api.SubnetAwareRegionSpec import com.netflix.spinnaker.keel.api.actuation.TaskLauncher @@ -19,25 +24,37 @@ import com.netflix.spinnaker.keel.api.plugins.Resolver import com.netflix.spinnaker.keel.api.support.EventPublisher import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.clouddriver.CloudDriverService +import com.netflix.spinnaker.keel.core.serverGroup import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.igor.artifact.ArtifactService import com.netflix.spinnaker.keel.orca.ClusterExportHelper import com.netflix.spinnaker.keel.orca.OrcaService import io.mockk.mockk import io.mockk.spyk +import org.springframework.core.env.Environment import java.time.Clock +import java.time.Duration class Ec2BaseClusterHandlerTests : BaseClusterHandlerTests() { private val cloudDriverService: CloudDriverService = mockk() private val cloudDriverCache: CloudDriverCache = mockk() private val orcaService: OrcaService = mockk() - private val taskLauncher: TaskLauncher = mockk() private val clusterExportHelper: ClusterExportHelper = mockk() - private val blockDeviceConfig : BlockDeviceConfig = mockk() + private val springEnv: Environment = mockk(relaxed = true) + private val blockDeviceConfig : BlockDeviceConfig = BlockDeviceConfig(springEnv, VolumeDefaultConfiguration()) val artifactService = mockk() val metadata = mapOf("id" to "1234", "application" to "waffles", "serviceAccount" to "me@you.com" ) + val launchConfigurationSpec = LaunchConfigurationSpec( + image = VirtualMachineImage("id-1", "my-app-1.2.3", "base-1"), + instanceType = "m3.xl", + keyPair = "keypair", + ebsOptimized = false, + instanceMonitoring = false, + ramdiskId = "1" + ) + val baseSpec = ClusterSpec( moniker = Moniker("waffles"), artifactReference = "my-artfact", @@ -47,18 +64,18 @@ class Ec2BaseClusterHandlerTests : BaseClusterHandlerTests>, clock: Clock, eventPublisher: EventPublisher): ClusterHandler = + override fun createSpyHandler( + resolvers: List>, + clock: Clock, + eventPublisher: EventPublisher, + taskLauncher: TaskLauncher, + ): ClusterHandler = spyk(ClusterHandler( cloudDriverService = cloudDriverService, cloudDriverCache = cloudDriverCache, @@ -98,22 +115,126 @@ class Ec2BaseClusterHandlerTests : BaseClusterHandlerTests> { - val currentServerGroups = getSingleRegionCluster().spec.resolve() + override fun getMultiRegionStaggeredDeployCluster(): Resource { + val spec = baseSpec.copy( + locations = SubnetAwareLocations( + account = "account", + regions = setOf(SubnetAwareRegionSpec("east"), SubnetAwareRegionSpec("west")), + subnet = "subnet-1" + ), + deployWith = RedBlack( + stagger = listOf( + StaggeredRegion( + region = "east", + pauseTime = Duration.ofMinutes(1) + ) + ) + ) + ) + return Resource( + kind = EC2_CLUSTER_V1_1.kind, + metadata = metadata, + spec = spec + ) + } + + override fun getMultiRegionManagedRolloutCluster(): Resource { + val spec = baseSpec.copy( + locations = SubnetAwareLocations( + account = "account", + regions = setOf(SubnetAwareRegionSpec("east"), SubnetAwareRegionSpec("west")), + subnet = "subnet-1" + ), + managedRollout = ManagedRolloutConfig(enabled = true, selectionStrategy = SelectionStrategy.ALPHABETICAL) + ) + return Resource( + kind = EC2_CLUSTER_V1_1.kind, + metadata = metadata, + spec = spec + ) + } + + override fun getDiffInMoreThanEnabled(resource: Resource): ResourceDiff> { + val currentServerGroups = resource.spec.resolve() .byRegion() - val desiredServerGroups = getSingleRegionCluster().spec.resolve() + val desiredServerGroups = resource.spec.resolve() .map { it.withDoubleCapacity().withManyEnabled() }.byRegion() return DefaultResourceDiff(desiredServerGroups, currentServerGroups) } - override fun getDiffOnlyInEnabled(): ResourceDiff> { - val currentServerGroups = getSingleRegionCluster().spec.resolve() + override fun getDiffOnlyInEnabled(resource: Resource): ResourceDiff> { + val currentServerGroups = resource.spec.resolve() .byRegion() - val desiredServerGroups = getSingleRegionCluster().spec.resolve() + val desiredServerGroups = resource.spec.resolve() .map { it.withManyEnabled() }.byRegion() return DefaultResourceDiff(desiredServerGroups, currentServerGroups) } + override fun getDiffInCapacity(resource: Resource): ResourceDiff> { + val current = resource.spec.resolve().byRegion() + val desired = resource.spec.resolve().map { it.withDoubleCapacity() }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getDiffInImage(resource: Resource, version: String?): ResourceDiff> { + val current = resource.spec.resolve().byRegion() + val desired = resource.spec.resolve().map { it.withADifferentImage(version ?: "112233") }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getCreateAndModifyDiff(resource: Resource): ResourceDiff> { + val current = resource.spec.resolve().byRegion() + val desired = resource.spec.resolve().map { + when(it.location.region) { + "east" -> it.withADifferentImage("1.2.3") + else -> it.withDoubleCapacity() + } + }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getDiffForRollback( + resource: Resource, + version: String, + currentMoniker: Moniker + ): ResourceDiff> { + val current = resource.spec.resolve().map { it.withMoniker(currentMoniker) }.byRegion() + val desired = resource.spec.resolve().map { it.withADifferentImage(version) }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getDiffForRollbackPlusCapacity( + resource: Resource, + version: String, + currentMoniker: Moniker + ): ResourceDiff> { + val current = resource.spec.resolve().map { it.withMoniker(currentMoniker) }.byRegion() + val desired = resource.spec.resolve().map { it.withADifferentImage(version).withZeroCapacity() }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getRollbackServerGroupsByRegion( + resource: Resource, + version: String, + rollbackMoniker: Moniker + ): Map> = + resource + .spec + .resolve() + .map { it.withADifferentImage(version).withMoniker(rollbackMoniker) } + .associate { it.location.region to listOf(it) } + + override fun getRollbackServerGroupsByRegionZeroCapacity( + resource: Resource, + version: String, + rollbackMoniker: Moniker + ): Map> = + resource + .spec + .resolve() + .map { it.withADifferentImage(version).withMoniker(rollbackMoniker).withZeroCapacity() } + .associate { it.location.region to listOf(it) } + private fun ServerGroup.withDoubleCapacity(): ServerGroup = copy( capacity = Capacity.DefaultCapacity( @@ -123,8 +244,26 @@ class Ec2BaseClusterHandlerTests : BaseClusterHandlerTests) : ServerGroup { + fun serverGroup(currentState: Map): ServerGroup { expectThat(currentState).hasSize(1) return currentState.values.iterator().next() } - fun clusterSpec(vpc: Network, subnet: Subnet, ramdiskId: String?) = + fun clusterSpec( + vpc: Network, + subnet: Subnet, + ramdiskId: String?, + instanceMetadataServiceVersion: InstanceMetadataServiceVersion? + ) = ClusterSpec( moniker = Moniker(app = "keel", stack = "test"), locations = SubnetAwareLocations( @@ -87,12 +122,15 @@ internal class LaunchConfigTests { StaggeredRegion(region = vpc.region, hours = "16-02") ) ), - _defaults = serverGroupSpec(ramdiskId) + _defaults = serverGroupSpec(ramdiskId, instanceMetadataServiceVersion) ) - fun serverGroupSpec(ramdiskId: String?) = + fun serverGroupSpec(ramdiskId: String?, instanceMetadataServiceVersion: InstanceMetadataServiceVersion?) = ClusterSpec.ServerGroupSpec( - launchConfiguration = launchConfigurationSpec(ramdiskId=ramdiskId), + launchConfiguration = launchConfigurationSpec( + ramdiskId = ramdiskId, + instanceMetadataServiceVersion = instanceMetadataServiceVersion + ), capacity = CapacitySpec(1, 6), scaling = Scaling( targetTrackingPolicies = setOf( @@ -114,7 +152,7 @@ internal class LaunchConfigTests { ) ) - fun launchConfigurationSpec(ramdiskId : String?) = + fun launchConfigurationSpec(ramdiskId: String?, instanceMetadataServiceVersion: InstanceMetadataServiceVersion?) = LaunchConfigurationSpec( image = VirtualMachineImage( id = "ami-123543254134", @@ -126,7 +164,8 @@ internal class LaunchConfigTests { iamRole = ServerGroup.LaunchConfiguration.defaultIamRoleFor("keel"), keyPair = "nf-keypair-test-fake", instanceMonitoring = false, - ramdiskId = ramdiskId + ramdiskId = ramdiskId, + instanceMetadataServiceVersion = instanceMetadataServiceVersion ) val sg1 = SecurityGroupSummary("keel", "sg-325234532", "vpc-1") @@ -157,8 +196,12 @@ internal class LaunchConfigTests { artifactService ) - fun setup(ramdiskId : String?, launchInfo: LaunchInfo) { - spec = clusterSpec(vpc, subnet, ramdiskId) + fun setup( + ramdiskId: String? = null, + launchInfo: LaunchInfo = LAUNCH_TEMPLATE, + instanceMetadataServiceVersion: InstanceMetadataServiceVersion? = null + ) { + spec = clusterSpec(vpc, subnet, ramdiskId, instanceMetadataServiceVersion) resource = resource( kind = EC2_CLUSTER_V1_1.kind, spec = spec @@ -166,10 +209,11 @@ internal class LaunchConfigTests { val serverGroup = spec.resolve().first() val activeServerGroupResponse = serverGroup.toCloudDriverResponse( - vpc=vpc, - subnets=listOf(subnet), - securityGroups=listOf(sg1, sg2), - launchInfo=launchInfo) + vpc = vpc, + subnets = listOf(subnet), + securityGroups = listOf(sg1, sg2), + launchInfo = launchInfo + ) with(cloudDriverCache) { every { networkBy(vpc.id) } returns vpc diff --git a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/TestUtils.kt b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/TestUtils.kt index c36a996d40..3903321f01 100644 --- a/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/TestUtils.kt +++ b/keel-ec2-plugin/src/test/kotlin/com/netflix/spinnaker/keel/ec2/resource/TestUtils.kt @@ -66,14 +66,17 @@ fun ServerGroup.toCloudDriverResponse( ramdiskId=launchConfiguration.ramdiskId) } LaunchInfo.LAUNCH_TEMPLATE -> { - launchTemplate = LaunchTemplate(LaunchTemplateData( - ebsOptimized=launchConfiguration.ebsOptimized, - iamInstanceProfile= IamInstanceProfile(launchConfiguration.iamRole), - imageId=launchConfiguration.imageId, - instanceType=launchConfiguration.instanceType, - keyName=launchConfiguration.keyPair, - monitoring=InstanceMonitoring(launchConfiguration.instanceMonitoring), - ramDiskId=launchConfiguration.ramdiskId)) + launchTemplate = LaunchTemplate( + LaunchTemplateData( + ebsOptimized = launchConfiguration.ebsOptimized, + iamInstanceProfile = IamInstanceProfile(launchConfiguration.iamRole), + imageId = launchConfiguration.imageId, + instanceType = launchConfiguration.instanceType, + keyName = launchConfiguration.keyPair, + monitoring = InstanceMonitoring(launchConfiguration.instanceMonitoring), + ramDiskId = launchConfiguration.ramdiskId + ) + ) } } ActiveServerGroup( diff --git a/keel-echo/src/main/kotlin/com/netflix/spinnaker/config/EchoConfiguration.kt b/keel-echo/src/main/kotlin/com/netflix/spinnaker/config/EchoConfiguration.kt index 26247fe3de..9161476c4b 100644 --- a/keel-echo/src/main/kotlin/com/netflix/spinnaker/config/EchoConfiguration.kt +++ b/keel-echo/src/main/kotlin/com/netflix/spinnaker/config/EchoConfiguration.kt @@ -11,7 +11,6 @@ import org.springframework.beans.factory.annotation.Value import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import retrofit2.Retrofit -import retrofit2.converter.jackson.JacksonConverterFactory @Configuration class EchoConfiguration { diff --git a/keel-echo/src/main/kotlin/com/netflix/spinnaker/keel/echo/ManualJudgementNotifier.kt b/keel-echo/src/main/kotlin/com/netflix/spinnaker/keel/echo/ManualJudgementNotifier.kt index fe70607479..e70eab208d 100644 --- a/keel-echo/src/main/kotlin/com/netflix/spinnaker/keel/echo/ManualJudgementNotifier.kt +++ b/keel-echo/src/main/kotlin/com/netflix/spinnaker/keel/echo/ManualJudgementNotifier.kt @@ -12,13 +12,11 @@ import com.netflix.spinnaker.keel.persistence.KeelRepository import kotlinx.coroutines.runBlocking import org.slf4j.LoggerFactory import org.springframework.boot.context.properties.EnableConfigurationProperties -import org.springframework.context.annotation.Configuration import org.springframework.context.event.EventListener -import org.springframework.stereotype.Component import org.springframework.core.env.Environment +import org.springframework.stereotype.Component @Component -@Configuration @EnableConfigurationProperties(KeelNotificationConfig::class, BaseUrlConfig::class) /** * Listens for [ConstraintStateChanged] events where the constraint is a [ManualJudgementConstraint] and sends diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Cache.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Cache.kt index b5da290b8a..b66303f6e3 100644 --- a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Cache.kt +++ b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Cache.kt @@ -58,9 +58,15 @@ class Front50Cache( /** * @return the [Application] with the given name from the cache. This cache is primed during app startup using * the bulk API in Front50, and later updated/refreshed on an app-by-app basis. + * if [invalidateCache] is true it will first invalidate the cache for the app name. + * This is useful when users update their git repo details. */ - suspend fun applicationByName(name: String): Application = - applicationsByNameCache.get(name.toLowerCase()).await() ?: throw ApplicationNotFound(name) + suspend fun applicationByName(name: String, invalidateCache: Boolean = false): Application { + if (invalidateCache) { + invalidateApplicationByNameCache(name) + } + return applicationsByNameCache.get(name.toLowerCase()).await() ?: throw ApplicationNotFound(name) + } /** * @return the [Application]s matching the given search parameters from the cache. @@ -95,6 +101,10 @@ class Front50Cache( applicationsByNameCache.put(app.name.toLowerCase(), CompletableFuture.supplyAsync { app }) } + private fun invalidateApplicationByNameCache(app: String) { + applicationsByNameCache.synchronous().invalidate(app.toLowerCase()) + } + private fun invalidateSearchParamsCache(app: Application) { with(app) { // We invalidate the cache, as it's easier than updating it diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Service.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Service.kt index 66b106a16d..ebcab75877 100644 --- a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Service.kt +++ b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/Front50Service.kt @@ -2,7 +2,6 @@ package com.netflix.spinnaker.keel.front50 import com.netflix.spinnaker.keel.core.api.DEFAULT_SERVICE_ACCOUNT import com.netflix.spinnaker.keel.front50.model.Application -import com.netflix.spinnaker.keel.front50.model.Delivery import com.netflix.spinnaker.keel.front50.model.Pipeline import retrofit2.http.Body import retrofit2.http.GET @@ -14,13 +13,6 @@ import retrofit2.http.Query import retrofit2.http.QueryMap interface Front50Service { - - @GET("/deliveries/{id}") - suspend fun deliveryById( - @Path("id") id: String, - @Header("X-SPINNAKER-USER") user: String = DEFAULT_SERVICE_ACCOUNT - ): Delivery - @GET("/v2/applications/{name}") suspend fun applicationByName( @Path("name") name: String, diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Application.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Application.kt index 2f622fd9fd..39270d82cf 100644 --- a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Application.kt +++ b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Application.kt @@ -11,7 +11,7 @@ import com.fasterxml.jackson.annotation.JsonInclude.Include.NON_NULL @JsonInclude(NON_NULL) data class Application( val name: String, - val email: String, + val email: String? = null, val dataSources: DataSources? = null, val repoProjectKey: String? = null, val repoSlug: String? = null, diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Cluster.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Cluster.kt new file mode 100644 index 0000000000..e8231ec042 --- /dev/null +++ b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Cluster.kt @@ -0,0 +1,44 @@ +package com.netflix.spinnaker.keel.front50.model + +import com.fasterxml.jackson.annotation.JsonAlias +import com.fasterxml.jackson.annotation.JsonAnyGetter +import com.fasterxml.jackson.annotation.JsonAnySetter +import com.netflix.spinnaker.keel.api.Moniker + +/** + * A cluster, as represented in a [DeployStage]. + */ +data class Cluster( + val account: String, + val application: String, + val provider: String, + val strategy: String, + val stack: String? = null, + val freeFormDetails: String? = null, + val detail: String? = freeFormDetails, + val availabilityZones: Map> = emptyMap(), + @JsonAlias("region") + private val _region: String? = null +) { + val moniker: Moniker + get() = Moniker( + app = application, + stack = if (stack.isNullOrEmpty()) null else stack, + detail = if (detail.isNullOrEmpty()) null else detail + ) + + val name: String + get() = moniker.toName() + + val region: String? + // region info may be available via the availabilityZones or region fields + get() = availabilityZones.keys.firstOrNull() ?: _region + + @get:JsonAnyGetter + val details: MutableMap = mutableMapOf() + + @JsonAnySetter + fun setAttribute(key: String, value: Any) { + details[key] = value + } +} diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Delivery.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Delivery.kt deleted file mode 100644 index f431db8b5d..0000000000 --- a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Delivery.kt +++ /dev/null @@ -1,21 +0,0 @@ -package com.netflix.spinnaker.keel.front50.model - -import com.fasterxml.jackson.annotation.JsonAnyGetter -import com.fasterxml.jackson.annotation.JsonAnySetter - -data class Delivery( - val id: String, - val application: String, - val updateTs: Long? = null, - val createTs: Long? = null, - val lastModifiedBy: String? = null, - val deliveryArtifacts: List> = emptyList(), - val deliveryEnvironments: List> = emptyList(), - @get:JsonAnyGetter val details: MutableMap = mutableMapOf() -) { - - @JsonAnySetter - fun setAttribute(key: String, value: Any) { - details[key] = value - } -} diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Pipeline.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Pipeline.kt index 1175782d18..b7c7162f89 100644 --- a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Pipeline.kt +++ b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Pipeline.kt @@ -19,15 +19,53 @@ data class Pipeline( private val _updateTs: Long? = null, val lastModifiedBy: String? = null ) { + /** + * The pipeline stages, in the right order of dependency between them. + */ val stages: List get() = _stages.sortedBy { if (it.requisiteStageRefIds.isEmpty()) "" else it.requisiteStageRefIds.first() } + /** + * List of the pipeline stage types ordered by stage dependencies. + */ + val shape: List + get() = stages.map { stage -> stage.type } + val updateTs: Instant? get() = _updateTs?.let { Instant.ofEpochMilli(it) } val hasParallelStages: Boolean get() = stages.any { it.requisiteStageRefIds.size > 1 } + fun findUpstreamBake(deployStage: DeployStage): BakeStage? { + val i = stages.indexOf(deployStage) + return stages.subList(0, i).filterIsInstance().lastOrNull() + } + + fun findDownstreamDeploys(stage: Stage): List { + val i = stages.indexOf(stage) + return stages.slice(i until stages.size).filterIsInstance() + } + + fun findDeployForCluster(findImageStage: FindImageStage) = + stages + .filterIsInstance() + .find { deploy -> + deploy.clusters.any { cluster -> + findImageStage.cloudProvider == cluster.provider && + findImageStage.cluster == cluster.name && + findImageStage.credentials == cluster.account && + cluster.region in findImageStage.regions + } + } + + fun hasManualJudgment(deployStage: DeployStage) = + try { + stages[stages.indexOf(deployStage) - 1] is ManualJudgmentStage + } catch (e: IndexOutOfBoundsException) { + false + } + override fun equals(other: Any?) = if (other is Pipeline) { other.id == this.id } else { @@ -36,3 +74,17 @@ data class Pipeline( override fun hashCode() = id.hashCode() } + +/** + * Searches the list of pipelines for one that contains a deploy stage matching the cluster described in the given + * [FindImageStage]. Returns a pair of the pipeline and deploy stage, if found. + */ +fun List.findPipelineWithDeployForCluster(findImageStage: FindImageStage): Pair? { + forEach { pipeline -> + val deploy = pipeline.findDeployForCluster(findImageStage) + if (deploy != null) { + return pipeline to deploy + } + } + return null +} diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stage.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stage.kt deleted file mode 100644 index c62e18b83f..0000000000 --- a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stage.kt +++ /dev/null @@ -1,20 +0,0 @@ -package com.netflix.spinnaker.keel.front50.model - -import com.fasterxml.jackson.annotation.JsonAnyGetter -import com.fasterxml.jackson.annotation.JsonAnySetter - -/** - * A stage in a Spinnaker [Pipeline]. - */ -data class Stage( - val type: String, - val name: String, - val refId: String, - val requisiteStageRefIds: List = emptyList(), - @get:JsonAnyGetter val details: MutableMap = mutableMapOf() -) { - @JsonAnySetter - fun setAttribute(key: String, value: Any) { - details[key] = value - } -} diff --git a/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stages.kt b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stages.kt new file mode 100644 index 0000000000..5c8a024df3 --- /dev/null +++ b/keel-front50/src/main/kotlin/com/netflix/spinnaker/keel/front50/model/Stages.kt @@ -0,0 +1,129 @@ +package com.netflix.spinnaker.keel.front50.model + +import com.fasterxml.jackson.annotation.JsonAnyGetter +import com.fasterxml.jackson.annotation.JsonAnySetter +import com.fasterxml.jackson.annotation.JsonSubTypes +import com.fasterxml.jackson.annotation.JsonSubTypes.Type +import com.fasterxml.jackson.annotation.JsonTypeInfo +import com.fasterxml.jackson.annotation.JsonTypeInfo.As +import com.fasterxml.jackson.annotation.JsonTypeInfo.Id +import com.netflix.spinnaker.keel.api.artifacts.ArtifactStatus +import com.netflix.spinnaker.keel.api.artifacts.BaseLabel +import com.netflix.spinnaker.keel.api.artifacts.BaseLabel.RELEASE +import com.netflix.spinnaker.keel.api.artifacts.StoreType +import com.netflix.spinnaker.keel.api.artifacts.StoreType.EBS +import com.netflix.spinnaker.keel.api.artifacts.VirtualMachineOptions +import com.netflix.spinnaker.keel.artifacts.DebianArtifact + +/** + * A stage in a Spinnaker [Pipeline]. + */ +@JsonTypeInfo( + use = Id.NAME, + include = As.EXISTING_PROPERTY, + property = "type", + visible = true, // the default is false and hides the property from the deserializer! + defaultImpl = GenericStage::class +) +@JsonSubTypes( + Type(value = BakeStage::class, name = "bake"), + Type(value = DeployStage::class, name = "deploy"), + Type(value = FindImageStage::class, name = "findImage"), + Type(value = FindImageFromTagsStage::class, name = "findImageFromTags"), + Type(value = ManualJudgmentStage::class, name = "manualJudgment") +) +abstract class Stage { + abstract val type: String + abstract val name: String + abstract val refId: String + open val requisiteStageRefIds: List = emptyList() + + @get:JsonAnyGetter + val details: MutableMap = mutableMapOf() + + @JsonAnySetter + fun setAttribute(key: String, value: Any) { + details[key] = value + } +} + +data class GenericStage( + override val name: String, + override val type: String, + override val refId: String, + override val requisiteStageRefIds: List = emptyList() +) : Stage() + +data class BakeStage( + override val name: String, + override val type: String, + override val refId: String, + override val requisiteStageRefIds: List = emptyList(), + val `package`: String, + val baseLabel: BaseLabel = RELEASE, + val baseOs: String, + val regions: Set, + val storeType: StoreType = EBS, + val vmType: String = "hvm", + val cloudProviderType: String = "aws" +) : Stage() { + + val artifact: DebianArtifact + get() = DebianArtifact( + name = `package`, + vmOptions = VirtualMachineOptions( + baseLabel = baseLabel, + baseOs = baseOs, + regions = regions, + storeType = storeType + ), + statuses = try { + setOf(ArtifactStatus.valueOf(baseLabel.name)) + } catch (e: IllegalArgumentException) { + emptySet() + } + ) +} + +data class DeployStage( + override val name: String, + override val type: String, + override val refId: String, + override val requisiteStageRefIds: List = emptyList(), + val clusters: Set +) : Stage() + +enum class SelectionStrategy { + LARGEST, + NEWEST, + OLDEST, + FAIL +} + +data class FindImageStage( + override val name: String, + override val type: String, + override val refId: String, + override val requisiteStageRefIds: List = emptyList(), + val cluster: String, + val credentials: String, // account + val onlyEnabled: Boolean, + val selectionStrategy: SelectionStrategy, + val cloudProvider: String, + val regions: Set, + val cloudProviderType: String = cloudProvider +) : Stage() + +data class FindImageFromTagsStage( + override val name: String, + override val type: String, + override val refId: String, + override val requisiteStageRefIds: List = emptyList(), +) : Stage() + +data class ManualJudgmentStage( + override val name: String, + override val type: String, + override val refId: String, + override val requisiteStageRefIds: List = emptyList(), +) : Stage() diff --git a/keel-front50/src/test/kotlin/com/netflix/spinnaker/keel/front50/model/DeliveryTest.kt b/keel-front50/src/test/kotlin/com/netflix/spinnaker/keel/front50/model/DeliveryTest.kt deleted file mode 100644 index 3d7926a343..0000000000 --- a/keel-front50/src/test/kotlin/com/netflix/spinnaker/keel/front50/model/DeliveryTest.kt +++ /dev/null @@ -1,44 +0,0 @@ -package com.netflix.spinnaker.keel.front50.model - -import com.netflix.spinnaker.keel.front50.Front50Service -import com.netflix.spinnaker.keel.retrofit.model.ModelParsingTestSupport - -object DeliveryTest : ModelParsingTestSupport(Front50Service::class.java) { - - override val json = - """ - |{ - | "id": "foo", - | "application": "bar", - | "updateTs": 12345, - | "createTs": 12345, - | "lastModifiedBy": "anonymous", - | "deliveryArtifacts": [ - | { - | "id": "artifact1" - | } - | ], - | "deliveryEnvironments": [ - | { - | "id": "environment1" - | } - | ], - | "otherAttribute1": "bloop", - | "otherAttribute2": "blerp" - |} - """.trimMargin() - - override suspend fun Front50Service.call(): Delivery? = - deliveryById("foo") - - override val expected = Delivery( - id = "foo", - application = "bar", - createTs = 12345, - updateTs = 12345, - lastModifiedBy = "anonymous", - deliveryArtifacts = listOf(mapOf("id" to "artifact1")), - deliveryEnvironments = listOf(mapOf("id" to "environment1")), - details = mutableMapOf("otherAttribute1" to "bloop", "otherAttribute2" to "blerp") - ) -} diff --git a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/NotificationEventListener.kt b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/NotificationEventListener.kt index 337d4559f6..b1f53de4bd 100644 --- a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/NotificationEventListener.kt +++ b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/NotificationEventListener.kt @@ -400,7 +400,8 @@ class NotificationEventListener( currentArtifact = currentArtifact, deliveryArtifact = deliveryArtifact, pinnedArtifact = pinnedArtifact, - stateUid = currentState.uid + stateUid = currentState.uid, + config = config ), MANUAL_JUDGMENT_AWAIT, environment.name @@ -424,7 +425,8 @@ class NotificationEventListener( deliveryArtifact = deliveryArtifact, pinnedArtifact = pinnedArtifact, author = slackDetail.author, - display = slackDetail.display ?: NORMAL + display = slackDetail.display ?: NORMAL, + config = config ), MANUAL_JUDGMENT_UPDATE, environment.name diff --git a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/SlackNotificationEvent.kt b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/SlackNotificationEvent.kt index 7a239cc903..154a6ae92d 100644 --- a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/SlackNotificationEvent.kt +++ b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/SlackNotificationEvent.kt @@ -84,7 +84,8 @@ data class SlackManualJudgmentNotification( val targetEnvironment: String, val deliveryArtifact: DeliveryArtifact, val stateUid: UID?, - override val application: String + override val application: String, + val config: DeliveryConfig ) : SlackNotificationEvent(time, application) data class SlackManualJudgmentUpdateNotification( @@ -99,9 +100,10 @@ data class SlackManualJudgmentUpdateNotification( val deliveryArtifact: DeliveryArtifact, val author: String? = null, val display: NotificationDisplay = NORMAL, + val config: DeliveryConfig, override val application: String, - override val time: Instant, -) : SlackNotificationEvent(time, application) + override val time: Instant + ) : SlackNotificationEvent(time, application) data class SlackVerificationCompletedNotification( val artifact: PublishedArtifact, diff --git a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/GitDataGenerator.kt b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/GitDataGenerator.kt index 0d1eba47a5..ad83ea9794 100644 --- a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/GitDataGenerator.kt +++ b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/GitDataGenerator.kt @@ -63,8 +63,14 @@ class GitDataGenerator( fun toCode(env: String) = "`${env.toLowerCase()}`" - fun linkedTitleSnippet(artifact: PublishedArtifact, application: String): String { + fun linkedTitleSnippet(artifact: PublishedArtifact, + application: String, + moreThanOneArtifact: Boolean? = false): String { var text = "${linkedApp(application)} build <${generateArtifactUrl(application, artifact.reference, artifact.version)}|#${artifact.buildNumber ?: artifact.version}>" + + if (moreThanOneArtifact == true) { + text+= " [${artifact.reference} _(${artifact.type})_]" + } artifact.gitMetadata?.let { text += " " + getAuthor(it) } return text } @@ -75,9 +81,15 @@ class GitDataGenerator( return text } - fun notificationBodyWithEnv(layoutBlockDsl: LayoutBlockDsl, emoji: String, application: String, artifact: PublishedArtifact, descriptiveText: String, env: String, preposition: String = "to") { + fun notificationBodyWithEnv(layoutBlockDsl: LayoutBlockDsl, + emoji: String, application: String, + artifact: PublishedArtifact, + descriptiveText: String, + env: String, + preposition: String = "to", + moreThanOneArtifact: Boolean? = false) { layoutBlockDsl.section { - markdownText("$emoji *${linkedTitleSnippet(artifact, application)} $descriptiveText $preposition ${toCode(env)}*") + markdownText("$emoji *${linkedTitleSnippet(artifact, application, moreThanOneArtifact)} $descriptiveText $preposition ${toCode(env)}*") } buildCommitSectionWithButton(layoutBlockDsl, artifact.gitMetadata) } diff --git a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgementUpdateHandler.kt b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgementUpdateHandler.kt index 8f07ca387e..61abba2122 100644 --- a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgementUpdateHandler.kt +++ b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgementUpdateHandler.kt @@ -2,6 +2,7 @@ package com.netflix.spinnaker.keel.notifications.slack.handlers import com.netflix.spinnaker.keel.api.NotificationDisplay import com.netflix.spinnaker.keel.api.constraints.ConstraintStatus +import com.netflix.spinnaker.keel.artifacts.ArtifactVersionLinks import com.netflix.spinnaker.keel.notifications.NotificationType.MANUAL_JUDGMENT_UPDATE import com.netflix.spinnaker.keel.notifications.slack.SlackManualJudgmentUpdateNotification import com.netflix.spinnaker.keel.notifications.slack.SlackService @@ -18,8 +19,9 @@ import java.time.Clock class ManualJudgementUpdateHandler( private val slackService: SlackService, private val clock: Clock, - private val gitDataGenerator: GitDataGenerator -): SlackNotificationHandler { + private val gitDataGenerator: GitDataGenerator, + private val artifactVersionLinks: ArtifactVersionLinks +) : SlackNotificationHandler { override val supportedTypes = listOf(MANUAL_JUDGMENT_UPDATE) private val log by lazy { LoggerFactory.getLogger(javaClass) } @@ -31,13 +33,25 @@ class ManualJudgementUpdateHandler( log.debug("Updating manual judgment await notification for application ${notification.application} sent at ${notification.timestamp}") with(notification) { - require(status.complete) { "Manual judgment not in complete state (${status.name}) for application ${notification.application} sent at ${notification.timestamp}"} + require(status.complete) { "Manual judgment not in complete state (${status.name}) for application ${notification.application} sent at ${notification.timestamp}" } val verb = when { status.passes() -> "approved" else -> "rejected" } + //don't calculate compareLink if the user reject the version + val compareLink = if (verb == "approved") { + artifactVersionLinks.generateCompareLink(artifactCandidate, currentArtifact, deliveryArtifact) + } else null + + //flag if there's more than a single artifact (not including preview env), so we would notify the user which artifact it is + val moreThanOneArtifact = config.artifacts.size != 1 && + config.environments.map { + environment -> + deliveryArtifact.isUsedIn(environment) && !environment.isPreview + }.size > 1 + val baseBlocks = ManualJudgmentNotificationHandler.constructMessageWithoutButtons( targetEnvironment, application, @@ -45,13 +59,14 @@ class ManualJudgementUpdateHandler( pinnedArtifact, gitDataGenerator, 0, // clear the num ahead text on resend + moreThanOneArtifact, verb ) val newFooterBlock = withBlocks { context { elements { - markdownText(judgedContext(user, status)) + markdownText(judgedContext(user, status, compareLink)) } } } @@ -63,7 +78,7 @@ class ManualJudgementUpdateHandler( } } - fun judgedContext(user: String?, status: ConstraintStatus): String { + fun judgedContext(user: String?, status: ConstraintStatus, compareLink: String?): String { val handle = user?.let { slackService.getUsernameByEmail(user) } val emoji = if (status.passes()) { ":white_check_mark:" @@ -75,7 +90,13 @@ class ManualJudgementUpdateHandler( } else { "reject" } - return "$emoji $handle hit " + - "$action on " + + var text = "$emoji $handle hit $action on " + + if (compareLink != null) { + text += ". <$compareLink|_See deployed code changes_>" + } + + return text } } diff --git a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgmentNotificationHandler.kt b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgmentNotificationHandler.kt index 9db3da1adc..b592fd4ab1 100644 --- a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgmentNotificationHandler.kt +++ b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/ManualJudgmentNotificationHandler.kt @@ -45,7 +45,16 @@ class ManualJudgmentNotificationHandler( ) val compareLink = artifactVersionLinks.generateCompareLink(artifactCandidate, currentArtifact, deliveryArtifact) - val descriptiveBlocks = notification.toBlocks(numVersionsToBePromoted) + //flag if there's more than a single artifact (not including preview env), so we would notify the user which artifact it is + val moreThanOneArtifact = config.artifacts.size != 1 && + config.environments.map { + environment -> + deliveryArtifact.isUsedIn(environment) && !environment.isPreview + }.size > 1 + + + + val descriptiveBlocks = notification.toBlocks(numVersionsToBePromoted, moreThanOneArtifact) val actionBlocks = withBlocks { actions { @@ -102,14 +111,15 @@ class ManualJudgmentNotificationHandler( } } - private fun SlackManualJudgmentNotification.toBlocks(numToBePromoted: Int): List { + private fun SlackManualJudgmentNotification.toBlocks(numToBePromoted: Int, moreThanOneArtifact: Boolean): List { return constructMessageWithoutButtons( targetEnvironment, application, artifactCandidate, pinnedArtifact, gitDataGenerator, - numToBePromoted + numToBePromoted, + moreThanOneArtifact ) } @@ -126,10 +136,11 @@ class ManualJudgmentNotificationHandler( pinnedArtifact: PublishedArtifact?, gitDataGenerator: GitDataGenerator, numToBePromoted: Int, + moreThanOneArtifact: Boolean, action: String = "awaiting judgement" ): List { return withBlocks { - gitDataGenerator.notificationBodyWithEnv(this, ":gavel:", application, artifactCandidate, action, environment, "in") + gitDataGenerator.notificationBodyWithEnv(this, ":gavel:", application, artifactCandidate, action, environment, "in", moreThanOneArtifact) var text = "" if (numToBePromoted > 1) { diff --git a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/UnpinnedNotificationHandler.kt b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/UnpinnedNotificationHandler.kt index 62627b137f..fa8e52c438 100644 --- a/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/UnpinnedNotificationHandler.kt +++ b/keel-notifications/src/main/kotlin/com/netflix/spinnaker/keel/notifications/slack/handlers/UnpinnedNotificationHandler.kt @@ -6,7 +6,6 @@ import com.netflix.spinnaker.keel.notifications.slack.SlackService import com.netflix.spinnaker.keel.notifications.slack.SlackUnpinnedNotification import com.slack.api.model.block.LayoutBlock import com.slack.api.model.kotlin_extension.block.withBlocks -import org.apache.logging.log4j.util.Strings import org.slf4j.LoggerFactory import org.springframework.stereotype.Component @@ -37,17 +36,24 @@ class UnpinnedNotificationHandler( val header = ":wastebasket: :pin: *${gitDataGenerator.linkedApp(application)} pin of build $previouslyPinned removed from ${gitDataGenerator.toCode(targetEnvironment)}*" val unpinner = slackService.getUsernameByEmail(user) + val isPinnedVersionAlreadyDeployed = latestApprovedArtifactVersion?.version == originalPin.version var text = "$unpinner unpinned ${gitDataGenerator.toCode(targetEnvironment)}" + if (latestApprovedArtifactVersion != null) { val link = gitDataGenerator.generateArtifactUrl(application, originalPin.artifact.reference, latestApprovedArtifactVersion.version) - text += ", <$link|#${latestApprovedArtifactVersion.buildNumber ?: latestApprovedArtifactVersion.version}> will start deploying shortly" + //if latest version == pinned version, show a different message + if (isPinnedVersionAlreadyDeployed) { + text += " The latest version is already deployed, and new versions can be deployed now." + } else { + text += ", <$link|#${latestApprovedArtifactVersion.buildNumber ?: latestApprovedArtifactVersion.version}> will start deploying shortly" + } } section { markdownText(header + "\n\n" + text) } - if (latestApprovedArtifactVersion != null) { + if (latestApprovedArtifactVersion != null && !isPinnedVersionAlreadyDeployed) { gitDataGenerator.buildCommitSectionWithButton(this, latestApprovedArtifactVersion.gitMetadata) } diff --git a/keel-optics/keel-optics.gradle b/keel-optics/keel-optics.gradle new file mode 100644 index 0000000000..614393c71b --- /dev/null +++ b/keel-optics/keel-optics.gradle @@ -0,0 +1,4 @@ +dependencies { + api(project(":keel-api")) + api("io.arrow-kt:arrow-optics:1.0.0") +} diff --git a/keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/mapOptics.kt b/keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/mapOptics.kt new file mode 100644 index 0000000000..a33de145fa --- /dev/null +++ b/keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/mapOptics.kt @@ -0,0 +1,14 @@ +package com.netflix.spinnaker.keel.optics + +import arrow.optics.Lens + +/** + * Generic lens for getting/setting a keyed value in a map. Setting `null` removes the key from the map. + */ +fun mapValueLens(key: K): Lens, V?> = Lens( + get = { it[key] }, + set = { map, value -> + if (value == null) map - key + else map + (key to value) + } +) diff --git a/keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/resourceOptics.kt b/keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/resourceOptics.kt new file mode 100644 index 0000000000..21fcd09bde --- /dev/null +++ b/keel-optics/src/main/kotlin/com/netflix/spinnaker/keel/optics/resourceOptics.kt @@ -0,0 +1,40 @@ +package com.netflix.spinnaker.keel.optics + +import arrow.optics.Lens +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.ResourceSpec +import com.netflix.spinnaker.keel.api.SimpleLocations +import com.netflix.spinnaker.keel.api.SubnetAwareLocations + +/** + * Lens for getting/setting [Resource.spec]. + */ +fun resourceSpecLens(): Lens, SPEC> = Lens( + get = Resource::spec, + set = { resource, spec -> resource.copy(spec = spec) } +) + +/** + * Lens for getting/setting [Moniker.stack]. + */ +val monikerStackLens: Lens = Lens( + get = Moniker::stack, + set = { moniker, stack -> moniker.copy(stack = stack) } +) + +/** + * Lens for getting/setting [SimpleLocations.account]. + */ +val simpleLocationsAccountLens: Lens = Lens( + get = SimpleLocations::account, + set = { locations, account -> locations.copy(account = account) } +) + +/** + * Lens for getting/setting [SubnetAwareLocations.account]. + */ +val subnetAwareLocationsAccountLens: Lens = Lens( + get = SubnetAwareLocations::account, + set = { locations, account -> locations.copy(account = account) } +) diff --git a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryService.kt b/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryService.kt index e596a8e441..f0901265e9 100644 --- a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryService.kt +++ b/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryService.kt @@ -2,24 +2,20 @@ package com.netflix.spinnaker.keel.orca import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.kotlin.convertValue -import com.fasterxml.jackson.module.kotlin.readValue -import com.netflix.spinnaker.keel.api.TaskStatus +import com.netflix.spinnaker.keel.actuation.ExecutionSummary +import com.netflix.spinnaker.keel.actuation.ExecutionSummaryService +import com.netflix.spinnaker.keel.actuation.RolloutLocation +import com.netflix.spinnaker.keel.actuation.RolloutStatus +import com.netflix.spinnaker.keel.actuation.RolloutStep +import com.netflix.spinnaker.keel.actuation.RolloutTarget +import com.netflix.spinnaker.keel.actuation.RolloutTargetWithStatus +import com.netflix.spinnaker.keel.actuation.Stage import com.netflix.spinnaker.keel.api.TaskStatus.CANCELED import com.netflix.spinnaker.keel.api.TaskStatus.FAILED_CONTINUE -import com.netflix.spinnaker.keel.api.TaskStatus.NOT_STARTED import com.netflix.spinnaker.keel.api.TaskStatus.RUNNING import com.netflix.spinnaker.keel.api.TaskStatus.SKIPPED import com.netflix.spinnaker.keel.api.TaskStatus.STOPPED -import com.netflix.spinnaker.keel.api.TaskStatus.SUCCEEDED import com.netflix.spinnaker.keel.api.TaskStatus.TERMINAL -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummary -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummaryService -import com.netflix.spinnaker.keel.api.actuation.RolloutLocation -import com.netflix.spinnaker.keel.api.actuation.RolloutStatus -import com.netflix.spinnaker.keel.api.actuation.RolloutStep -import com.netflix.spinnaker.keel.api.actuation.RolloutTarget -import com.netflix.spinnaker.keel.api.actuation.RolloutTargetWithStatus -import com.netflix.spinnaker.keel.api.actuation.Stage import kotlinx.coroutines.runBlocking import org.slf4j.LoggerFactory import org.springframework.stereotype.Component @@ -36,6 +32,8 @@ class OrcaExecutionSummaryService( companion object { val COMPLETED_TARGETS_STAGE = "initManagedRolloutStep" + val KICKOFF_STAGE = "startManagedRollout" + val DEPLOY_STAGE = "deploy" } override fun getSummary(executionId: String): ExecutionSummary { @@ -44,7 +42,9 @@ class OrcaExecutionSummaryService( } val typedStages: List = taskDetails.execution?.stages?.map { mapper.convertValue(it) } ?: emptyList() - val currentStage = typedStages.firstOrNull { it.status == RUNNING } + val currentStage = typedStages + .filter { it.status == RUNNING } + .maxByOrNull { it.refId.length } //grab the longest ref id, which will be the most nested running stage val targets = getTargets(taskDetails, typedStages) return ExecutionSummary( @@ -64,12 +64,12 @@ class OrcaExecutionSummaryService( fun getTargets(execution: ExecutionDetailResponse, typedStages: List): List { val targetsWithStatus: MutableList = mutableListOf() val statusTargetMap = if (execution.isManagedRollout()) { - getTargetStatusManagedRollout(execution, typedStages) + getTargetStatusManagedRollout(typedStages) } else { getTargetStatusDeployStage(execution, typedStages) } - statusTargetMap.forEach { (status, targets) -> + statusTargetMap.forEach { (status, targets) -> targetsWithStatus.addAll( targets.map { RolloutTargetWithStatus( @@ -87,52 +87,74 @@ class OrcaExecutionSummaryService( variables?.find { it.key == "selectionStrategy" } != null fun getTargetStatusManagedRollout( - execution: ExecutionDetailResponse, typedStages: List ): Map> { val targets: MutableMap> = mutableMapOf() + + // completed targets will be listed in the outputs of this type of stage targets[RolloutStatus.SUCCEEDED] = typedStages - .filter { it.type == COMPLETED_TARGETS_STAGE} + .filter { it.type == COMPLETED_TARGETS_STAGE } .mapNotNull { it.outputs["completedRolloutStep"] } .map { mapper.convertValue(it) } .flatMap { it.targets } + // deploying targets will be listed in the context of the deploy stage, + // so we filter for running deploy stages targets[RolloutStatus.RUNNING] = typedStages - .filter { it.type == COMPLETED_TARGETS_STAGE && - !it.outputs.containsKey("completedRolloutStep") && - it.status == RUNNING - } - .map { stage -> - val input = stage.context["input"] as Map<*, *> - val runningTargets = input["targets"] as? Map<*, *> ?: emptyList() - mapper.convertValue(runningTargets) - } - - targets[RolloutStatus.NOT_STARTED] = typedStages .filter { - it.type == COMPLETED_TARGETS_STAGE && - it.status == NOT_STARTED + it.type == DEPLOY_STAGE && + it.status == RUNNING } .map { stage -> - val input = stage.context["input"] as Map<*, *> - val runningTargets = input["targets"] as? Map<*, *> ?: emptyList() - mapper.convertValue(runningTargets) + val runningTargets = stage.context["targets"] as? List> ?: emptyList() + mapper.convertValue>(runningTargets) } + .flatten() targets[RolloutStatus.FAILED] = typedStages .filter { - it.type == COMPLETED_TARGETS_STAGE && + it.type == DEPLOY_STAGE && listOf(FAILED_CONTINUE, TERMINAL, CANCELED, SKIPPED, STOPPED).contains(it.status) } .map { stage -> - val input = stage.context["input"] as Map<*, *> - val runningTargets = input["targets"] as? Map<*, *> ?: emptyList() + val runningTargets = stage.context["targets"] as? List> ?: emptyList() mapper.convertValue(runningTargets) } + targets[RolloutStatus.NOT_STARTED] = (typedStages + .firstOrNull { + it.type == KICKOFF_STAGE + } + ?.let { stage -> + val allTargets = stage.context["targets"] as? List> ?: emptyList() + mapper.convertValue>(allTargets) + } ?: emptyList()) + .filter { target -> + target.notIn(targets[RolloutStatus.SUCCEEDED] as List) && + target.notIn(targets[RolloutStatus.FAILED] as List) && + target.notIn(targets[RolloutStatus.RUNNING] as List) + } + return targets } + /** + * Normal equals/comparison doesn't work when we use the java objects, so I must + * write my own. + */ + fun RolloutTarget.notIn(targets: List): Boolean { + targets.forEach { target -> + if (target.cloudProvider == cloudProvider && + target.location.region == location.region && + target.location.account == location.account && + target.location.sublocations == location.sublocations + ) { + return false + } + } + return true + } + fun getTargetStatusDeployStage( execution: ExecutionDetailResponse, typedStages: List @@ -171,8 +193,8 @@ class OrcaExecutionSummaryService( return regions.map { region -> RolloutTarget( - cloudProvider = cloudProvider, - location = RolloutLocation(account = account, region = region), + cloudProvider, + RolloutLocation(account, region, emptyList()), ) } } diff --git a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaService.kt b/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaService.kt index bac19c6e8f..778d0ff6cc 100644 --- a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaService.kt +++ b/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaService.kt @@ -61,6 +61,12 @@ interface OrcaService { @Header("X-SPINNAKER-USER") user: String = DEFAULT_SERVICE_ACCOUNT ) + @PUT("/tasks/cancel") + suspend fun cancelOrchestrations( + @Body taskIds: List, + @Header("X-SPINNAKER-USER") user: String = DEFAULT_SERVICE_ACCOUNT + ) + @GET("/executions/correlated/{correlationId}") suspend fun getCorrelatedExecutions( @Path("correlationId") correlationId: String, diff --git a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskLauncher.kt b/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskLauncher.kt index 251cf185eb..455e0aacc3 100644 --- a/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskLauncher.kt +++ b/keel-orca/src/main/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskLauncher.kt @@ -61,7 +61,8 @@ class OrcaTaskLauncher( resource: Resource<*>, description: String, correlationId: String, - stages: List + stages: List, + artifactVersion: String? ) = submitJob( user = resource.serviceAccount, @@ -72,16 +73,18 @@ class OrcaTaskLauncher( description = description, correlationId = correlationId, stages = stages, - type = SubjectType.RESOURCE + type = SubjectType.RESOURCE, + artifactVersion = artifactVersion ) override fun submitJobAsync( resource: Resource<*>, description: String, correlationId: String, - stages: List + stages: List, + artifactVersion: String? ): CompletableFuture = GlobalScope.future { - submitJob(resource, description, correlationId, stages) + submitJob(resource, description, correlationId, stages, artifactVersion) } override suspend fun submitJob( @@ -95,7 +98,8 @@ class OrcaTaskLauncher( stages: List, type: SubjectType, artifacts: List>, - parameters: Map + parameters: Map, + artifactVersion: String? ) = orcaService .orchestrate( @@ -125,7 +129,8 @@ class OrcaTaskLauncher( subjectType = type, application = application, environmentName = environmentName, - resourceId = resourceId + resourceId = resourceId, + artifactVersion = artifactVersion ) ) ) @@ -150,6 +155,9 @@ class OrcaTaskLauncher( override suspend fun getTaskExecution(taskId: String): TaskExecution = orcaService.getOrchestrationExecution(taskId) + override suspend fun cancelTasks(taskIds: List, user: String) = + orcaService.cancelOrchestrations(taskIds, user) + private val Resource<*>.notifications: Set get() = repository .environmentFor(id) diff --git a/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryServiceTests.kt b/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryServiceTests.kt index b08a893dd7..0cb3d7f59a 100644 --- a/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryServiceTests.kt +++ b/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaExecutionSummaryServiceTests.kt @@ -2,17 +2,18 @@ package com.netflix.spinnaker.keel.orca import com.fasterxml.jackson.module.kotlin.readValue import com.netflix.spinnaker.keel.api.TaskStatus -import com.netflix.spinnaker.keel.api.actuation.RolloutStatus +import com.netflix.spinnaker.keel.actuation.RolloutStatus.NOT_STARTED +import com.netflix.spinnaker.keel.actuation.RolloutStatus.RUNNING +import com.netflix.spinnaker.keel.actuation.RolloutStatus.SUCCEEDED import com.netflix.spinnaker.keel.test.configuredTestObjectMapper import io.mockk.coEvery import io.mockk.mockk import kotlinx.coroutines.runBlocking import org.junit.jupiter.api.Test import strikt.api.expectThat -import strikt.assertions.contains import strikt.assertions.containsExactly +import strikt.assertions.containsExactlyInAnyOrder import strikt.assertions.hasSize -import strikt.assertions.isEmpty import strikt.assertions.isEqualTo import strikt.assertions.isNotEmpty import strikt.assertions.isNotNull @@ -37,11 +38,42 @@ class OrcaExecutionSummaryServiceTests { } expectThat(summary.deployTargets).isNotEmpty().hasSize(2) - expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactly(RolloutStatus.SUCCEEDED) + expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactly(SUCCEEDED) expectThat(summary.currentStage).isNull() expectThat(summary.status).isEqualTo(TaskStatus.SUCCEEDED) } + @Test + fun `can read running managed rollout stage`() { + val response = javaClass.getResource("/running-managed-rollout.json").readText() + coEvery { orcaService.getOrchestrationExecution(any()) } returns mapper.readValue(response) + + val summary = runBlocking { + subject.getSummary("1") + } + + expectThat(summary.deployTargets).isNotEmpty().hasSize(2) + expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactlyInAnyOrder(SUCCEEDED, NOT_STARTED) + expectThat(summary.currentStage).isNotNull().get { type }.isEqualTo("waitForNextRolloutStep") + expectThat(summary.status).isEqualTo(TaskStatus.RUNNING) + } + + @Test + fun `can read failed managed rollout stage`() { + val response = javaClass.getResource("/failed-managed-rollout.json").readText() + coEvery { orcaService.getOrchestrationExecution(any()) } returns mapper.readValue(response) + + val summary = runBlocking { + subject.getSummary("1") + } + + expectThat(summary.deployTargets).isNotEmpty().hasSize(2) + expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactlyInAnyOrder(SUCCEEDED, NOT_STARTED) + expectThat(summary.currentStage).isNull() + expectThat(summary.status).isEqualTo(TaskStatus.TERMINAL) + } + + @Test fun `can read a single region deploy stage`() { val response = javaClass.getResource("/single-region-deploy.json").readText() @@ -52,7 +84,7 @@ class OrcaExecutionSummaryServiceTests { } expectThat(summary.deployTargets).isNotEmpty().hasSize(1) - expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactly(RolloutStatus.SUCCEEDED) + expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactly(SUCCEEDED) expectThat(summary.currentStage).isNull() expectThat(summary.stages).isNotEmpty().hasSize(5) expectThat(summary.status).isEqualTo(TaskStatus.SUCCEEDED) @@ -68,7 +100,7 @@ class OrcaExecutionSummaryServiceTests { } expectThat(summary.deployTargets).isNotEmpty().hasSize(1) - expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactly(RolloutStatus.RUNNING) + expectThat(summary.deployTargets.map { it.status }.toSet()).containsExactly(RUNNING) expectThat(summary.currentStage).isNotNull().get { type }.isEqualTo("createServerGroup") expectThat(summary.stages).isNotEmpty().hasSize(1) expectThat(summary.status).isEqualTo(TaskStatus.RUNNING) diff --git a/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskMonitorAgentTests.kt b/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskMonitorAgentTests.kt index 2f5c093c6f..80a5d69216 100644 --- a/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskMonitorAgentTests.kt +++ b/keel-orca/src/test/kotlin/com/netflix/spinnaker/keel/orca/OrcaTaskMonitorAgentTests.kt @@ -50,7 +50,8 @@ internal class OrcaTaskMonitorAgentTests : JUnit5Minutests { application = "fnord", environmentName = "prod", resourceId = resource.id, - name = "upsert server group" + name = "upsert server group", + artifactVersion = "v1" ) val taskConstraintRecord = TaskRecord( @@ -59,7 +60,8 @@ internal class OrcaTaskMonitorAgentTests : JUnit5Minutests { application = "fnord", environmentName = "prod", resourceId = null, - name = "canary constraint" + name = "canary constraint", + artifactVersion = "v1" ) val task = Task( diff --git a/keel-orca/src/test/resources/failed-managed-rollout.json b/keel-orca/src/test/resources/failed-managed-rollout.json new file mode 100644 index 0000000000..43023225f9 --- /dev/null +++ b/keel-orca/src/test/resources/failed-managed-rollout.json @@ -0,0 +1,1750 @@ +{ + "id": "01FH8TGGA62Q8GMWV2149MGW2C", + "name": "Deploy master-h97.cd22e76 to cluster emburnstest-eggo in titustestvpc/us-east-1,us-west-2 using a managed rollout", + "application": "emburnstest", + "status": "TERMINAL", + "variables": [ + { + "key": "input", + "value": { + "selectionStrategy": "ALPHABETICAL", + "clusterDefinitions": [ + { + "registry": "testregistry", + "stack": "eggo", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "targetHealthyDeployPercentage": 100, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + + }, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "cloudProvider": "titus", + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "tag": "master-h97.cd22e76", + "inService": true, + "capacityGroup": "emburnstest" + } + ], + "targets": [ + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-west-2", + "account": "titustestvpc" + } + } + ] + } + }, + { + "key": "reason", + "value": "Diff detected at 2021-10-05T18:12:33.494765Z[UTC]" + }, + { + "key": "selectionStrategy", + "value": "ALPHABETICAL" + }, + { + "key": "rolloutWorkflowId", + "value": "rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS" + }, + { + "key": "targets", + "value": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ] + }, + { + "key": "rolloutStep", + "value": { + "id": "b47e5371-4a9e-3413-969f-f4c1579fe352", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ] + } + }, + { + "key": "clusters", + "value": [ + { + "stack": "eggo", + "credentials": "titustestvpc", + "targetHealthyDeployPercentage": 100, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "cloudProvider": "titus", + "tag": "master-h97.cd22e76", + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + } + ] + }, + { + "key": "outputs", + "value": { + + } + }, + { + "key": "notification.type", + "value": "upsertentitytags" + }, + { + "key": "deploy.account.name", + "value": "titustestvpc" + }, + { + "key": "zeroDesiredCapacityCount", + "value": 0 + }, + { + "key": "stack", + "value": "eggo" + }, + { + "key": "lastCapacityCheck", + "value": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + } + }, + { + "key": "credentials", + "value": "titustestvpc" + }, + { + "key": "targetHealthyDeployPercentage", + "value": 100 + }, + { + "key": "source", + "value": { + + } + }, + { + "key": "type", + "value": "createServerGroup" + }, + { + "key": "constraints", + "value": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + } + }, + { + "key": "currentInstanceCount", + "value": 1 + }, + { + "key": "network", + "value": "default" + }, + { + "key": "capacity", + "value": { + "min": 1, + "desired": 1, + "max": 1 + } + }, + { + "key": "targetDesiredSize", + "value": 1 + }, + { + "key": "cloudProvider", + "value": "titus" + }, + { + "key": "kato.result.expected", + "value": true + }, + { + "key": "deploy.server.groups", + "value": { + "us-east-1": [ + "emburnstest-eggo-v000" + ] + } + }, + { + "key": "tag", + "value": "master-h97.cd22e76" + }, + { + "key": "kato.last.task.id", + "value": { + "id": "01FH8TGR3EQRDCF6C01BJZWTAA" + } + }, + { + "key": "inService", + "value": true + }, + { + "key": "registry", + "value": "testregistry" + }, + { + "key": "scaling", + "value": { + + } + }, + { + "key": "imageId", + "value": "emburns/spin-titus-demo:master-h97.cd22e76" + }, + { + "key": "workflowExecution", + "value": { + "namespace": "spinnaker", + "workflowType": "WorkflowDispatcher", + "runId": "08e768f5-e7fd-4918-a8ea-b719ee6c95a6", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGHHJD9HQK0GWCJZVAK21/08e768f5-e7fd-4918-a8ea-b719ee6c95a6/summary", + "workflowId": "stage:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGHHJD9HQK0GWCJZVAK21", + "uiTarget": "https://temporal" + } + }, + { + "key": "loadBalancers", + "value": [ + + ] + }, + { + "key": "resources", + "value": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + } + }, + { + "key": "overrides", + "value": { + + } + }, + { + "key": "workflowResult", + "value": { + "childWorkflowExecutionInfos": { + "0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9": { + "namespace": "spinnaker", + "workflowType": "StageContextCloudOperationRunner", + "runId": "f41d300d-70f7-4f7a-b93f-563bd3067aaf", + "permalink": "https://temporal/namespaces/spinnaker/workflows/0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9/f41d300d-70f7-4f7a-b93f-563bd3067aaf/summary", + "workflowId": "0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9", + "uiTarget": "https://temporal" + } + }, + "childWorkflowExecutionResults": { + "0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9": { + "output": { + "jobUri": "734cab5f-3522-4fcc-a280-38cb3f16ce6d", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v000" + } + }, + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + + }, + "logs": [ + + ], + "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", + "isCompleted": true + } + }, + "logs": [ + "Retrieved application data from front50 for application: emburnstest", + "Resolved disruption budget.", + "Prepared job request for: emburnstest-eggo-v000 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v000, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v000, jobUri: 734cab5f-3522-4fcc-a280-38cb3f16ce6d", + "Invalid source asg null", + "Entity tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-east-1 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-east-1 from ElasticSearch" + ], + "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", + "isCompleted": true + } + }, + { + "key": "env", + "value": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + } + }, + { + "key": "tags", + "value": { + + } + }, + { + "key": "capacitySnapshot", + "value": { + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 + } + }, + { + "key": "application", + "value": "emburnstest" + }, + { + "key": "targetGroups", + "value": [ + + ] + }, + { + "key": "containerAttributes", + "value": { + + } + }, + { + "key": "name", + "value": "Deploy in us-east-1" + }, + { + "key": "commits", + "value": [ + + ] + }, + { + "key": "securityGroups", + "value": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest", + "nf-infrastructure", + "nf-datacenter" + ] + }, + { + "key": "migrationPolicy", + "value": { + "type": "systemDefault" + } + }, + { + "key": "kato.tasks", + "value": [ + { + "resultObjects": [ + { + "jobUri": "734cab5f-3522-4fcc-a280-38cb3f16ce6d", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v000" + } + } + ], + "history": [ + + ], + "id": "n/a", + "status": { + + } + } + ] + }, + { + "key": "entryPoint", + "value": "" + }, + { + "key": "region", + "value": "us-east-1" + }, + { + "key": "account", + "value": "titustestvpc" + }, + { + "key": "capacityGroup", + "value": "emburnstest" + }, + { + "key": "force.cache.refresh.errors", + "value": [ + + ] + }, + { + "key": "processed.server.groups", + "value": [ + + ] + }, + { + "key": "refreshed.server.groups", + "value": [ + + ] + }, + { + "key": "zones", + "value": [ + + ] + }, + { + "key": "exception", + "value": { + "exceptionType": "WorkflowServiceException", + "shouldRetry": false, + "details": { + "stackTrace": "io.temporal.client.WorkflowServiceException: workflowId='rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS', runId='', workflowType='RolloutWorkflow'}\n\tat io.temporal.internal.sync.WorkflowStubImpl.query(WorkflowStubImpl.java:351)\n\tat io.temporal.internal.sync.WorkflowInvocationHandler$SyncWorkflowInvocationHandler.queryWorkflow(WorkflowInvocationHandler.java:309)\n\tat io.temporal.internal.sync.WorkflowInvocationHandler$SyncWorkflowInvocationHandler.invoke(WorkflowInvocationHandler.java:272)\n\tat io.temporal.internal.sync.WorkflowInvocationHandler.invoke(WorkflowInvocationHandler.java:178)\n\tat com.sun.proxy.$Proxy465.getNextStep(Unknown Source)\n\tat com.netflix.spinnaker.orca.managedrollout.client.BuoyClient.getNextRolloutStep(BuoyClient.kt:82)\n\tat com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask.execute(AwaitReadyRolloutStepTask.kt:62)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1$1.invoke(RunTaskHandler.kt:160)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.withLoggingContext(RunTaskHandler.kt:439)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.access$withLoggingContext(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1.invoke(RunTaskHandler.kt:107)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.AuthenticationAware$sam$java_util_concurrent_Callable$0.call(AuthenticationAware.kt)\n\tat com.netflix.spinnaker.security.AuthenticatedRequest.lambda$wrapCallableForPrincipal$0(AuthenticatedRequest.java:274)\n\tat com.netflix.spinnaker.orca.q.handler.AuthenticationAware.withAuth(AuthenticationAware.kt:58)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1.invoke(RunTaskHandler.kt:106)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$withTask$1.invoke(RunTaskHandler.kt:264)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$withTask$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withTask$1.invoke(OrcaMessageHandler.kt:68)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withTask$1.invoke(OrcaMessageHandler.kt:46)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withStage$1.invoke(OrcaMessageHandler.kt:85)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withStage$1.invoke(OrcaMessageHandler.kt:46)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler.withExecution(OrcaMessageHandler.kt:95)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler.withStage(OrcaMessageHandler.kt:74)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler.withTask(OrcaMessageHandler.kt:60)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.withTask(RunTaskHandler.kt:253)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.handle(RunTaskHandler.kt:103)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.handle(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.q.MessageHandler.invoke(MessageHandler.kt:36)\n\tat com.netflix.spinnaker.orca.q.audit.ExecutionTrackingMessageHandlerPostProcessor$ExecutionTrackingMessageHandlerProxy.invoke(ExecutionTrackingMessageHandlerPostProcessor.kt:72)\n\tat com.netflix.spinnaker.q.QueueProcessor$callback$1$1.run(QueueProcessor.kt:90)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:829)\nCaused by: io.grpc.StatusRuntimeException: DEADLINE_EXCEEDED: deadline exceeded after 9.999872685s. [remote_addr=temporal-prestaging.us-west-2.spinnaker.mgmt.netflix.net/100.72.26.48:8980]\n\tat io.grpc.stub.ClientCalls.toStatusRuntimeException(ClientCalls.java:262)\n\tat io.grpc.stub.ClientCalls.getUnchecked(ClientCalls.java:243)\n\tat io.grpc.stub.ClientCalls.blockingUnaryCall(ClientCalls.java:156)\n\tat io.temporal.api.workflowservice.v1.WorkflowServiceGrpc$WorkflowServiceBlockingStub.queryWorkflow(WorkflowServiceGrpc.java:2983)\n\tat io.temporal.internal.external.GenericWorkflowClientExternalImpl.lambda$query$5(GenericWorkflowClientExternalImpl.java:216)\n\tat io.temporal.internal.retryer.GrpcSyncRetryer.retry(GrpcSyncRetryer.java:59)\n\tat io.temporal.internal.retryer.GrpcRetryer.retryWithResult(GrpcRetryer.java:51)\n\tat io.temporal.internal.external.GenericWorkflowClientExternalImpl.query(GenericWorkflowClientExternalImpl.java:209)\n\tat io.temporal.internal.client.RootWorkflowClientInvoker.query(RootWorkflowClientInvoker.java:142)\n\tat io.temporal.internal.sync.WorkflowStubImpl.query(WorkflowStubImpl.java:342)\n\t... 35 more\n", + "error": "Unexpected Task Failure", + "errors": [ + "workflowId='rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS', runId='', workflowType='RolloutWorkflow'}" + ] + }, + "operation": "awaitReadyRolloutStep", + "timestamp": 1633457685176 + } + } + ], + "steps": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutTask", + "name": "completeManagedRollout", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.StartManagedRolloutTask", + "name": "startManagedRollout", + "startTime": 1633457554127, + "endTime": 1633457554254, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "startTime": 1633457674536, + "endTime": 1633457674695, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633457554637, + "endTime": 1633457554816, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", + "name": "completeParallelDeploy", + "startTime": 1633457674240, + "endTime": 1633457674307, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", + "name": "determineSourceServerGroup", + "startTime": 1633457555636, + "endTime": 1633457555715, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", + "name": "determineHealthProviders", + "startTime": 1633457555748, + "endTime": 1633457555919, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", + "name": "snapshotSourceServerGroup", + "startTime": 1633457555988, + "endTime": 1633457556084, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "4", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", + "name": "createServerGroup", + "startTime": 1633457556151, + "endTime": 1633457556373, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "5", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "monitorDeploy", + "startTime": 1633457556407, + "endTime": 1633457561541, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "6", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", + "name": "tagServerGroup", + "startTime": 1633457561572, + "endTime": 1633457561820, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "7", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", + "name": "waitForUpInstances", + "startTime": 1633457561882, + "endTime": 1633457672908, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "8", + "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", + "name": "jarDiffs", + "startTime": 1633457672941, + "endTime": 1633457673023, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "9", + "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", + "name": "getCommits", + "startTime": 1633457673094, + "endTime": 1633457673322, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", + "name": "restoreMinCapacity", + "startTime": 1633457673655, + "endTime": 1633457673719, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "waitForCapacityMatch", + "startTime": 1633457673749, + "endTime": 1633457673826, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", + "name": "forceCacheRefresh", + "startTime": 1633457673895, + "endTime": 1633457673999, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633457675061, + "endTime": 1633457685244, + "status": "TERMINAL", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "buildTime": 1633457553734, + "startTime": 1633457553771, + "endTime": 1633457685496, + "execution": { + "type": "ORCHESTRATION", + "id": "01FH8TGGA62Q8GMWV2149MGW2C", + "application": "emburnstest", + "buildTime": 1633457553734, + "canceled": false, + "limitConcurrent": false, + "keepWaitingPipelines": false, + "stages": [ + { + "id": "01FH8TGGA6MCFQ1AK3G9F7MZBS", + "refId": "1", + "type": "managedRollout", + "name": "managedRollout", + "startTime": 1633457553816, + "endTime": 1633457685455, + "status": "TERMINAL", + "context": { + "input": { + "selectionStrategy": "ALPHABETICAL", + "clusterDefinitions": [ + { + "registry": "testregistry", + "stack": "eggo", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "targetHealthyDeployPercentage": 100, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + + }, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "cloudProvider": "titus", + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "tag": "master-h97.cd22e76", + "inService": true, + "capacityGroup": "emburnstest" + } + ], + "targets": [ + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-west-2", + "account": "titustestvpc" + } + } + ] + }, + "reason": "Diff detected at 2021-10-05T18:12:33.494765Z[UTC]" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutTask", + "name": "completeManagedRollout", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TGGFM1479J6XDHYE616FH", + "refId": "1<1", + "type": "startManagedRollout", + "name": "Start Managed Rollout", + "startTime": 1633457554017, + "endTime": 1633457554303, + "status": "SUCCEEDED", + "context": { + "selectionStrategy": "ALPHABETICAL", + "rolloutWorkflowId": "rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "cloudProvider": "titus", + "location": { + "region": "us-west-2", + "account": "titustestvpc" + } + } + ] + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.StartManagedRolloutTask", + "name": "startManagedRollout", + "startTime": 1633457554127, + "endTime": 1633457554254, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGA6MCFQ1AK3G9F7MZBS", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TGGFMNPKVGWNNXHH0A9AA", + "refId": "1<2", + "type": "initManagedRolloutStep", + "name": "Run next Rollout Step", + "startTime": 1633457554332, + "endTime": 1633457674763, + "status": "SUCCEEDED", + "context": { + "rolloutWorkflowId": "rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS" + }, + "outputs": { + "completedRolloutStep": { + "id": "b47e5371-4a9e-3413-969f-f4c1579fe352", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "account": "titustestvpc", + "region": "us-east-1" + } + } + ] + } + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "startTime": 1633457674536, + "endTime": 1633457674695, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGA6MCFQ1AK3G9F7MZBS", + "requisiteStageRefIds": [ + "1<1" + ] + }, + { + "id": "01FH8TGGYVCCN4Q946MH0ZP01J", + "refId": "1<2<1", + "type": "waitForNextRolloutStep", + "name": "Wait For Next Rollout Step", + "startTime": 1633457554481, + "endTime": 1633457554848, + "status": "SUCCEEDED", + "context": { + "rolloutWorkflowId": "rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS", + "rolloutStep": { + "id": "b47e5371-4a9e-3413-969f-f4c1579fe352", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ] + } + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633457554637, + "endTime": 1633457554816, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGFMNPKVGWNNXHH0A9AA", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TGGYV7KBASASHKJPAWR4H", + "refId": "1<2<2", + "type": "doManagedRollout", + "name": "Deploy Rollout Step", + "startTime": 1633457554880, + "endTime": 1633457674432, + "status": "SUCCEEDED", + "context": { + + }, + "outputs": { + + }, + "tasks": [ + + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGFMNPKVGWNNXHH0A9AA", + "requisiteStageRefIds": [ + "1<2<1" + ] + }, + { + "id": "01FH8TGHEDH26RVMNYDPQ9PDQD", + "refId": "1<2<2<1", + "type": "deploy", + "name": "Deploy us-east-1", + "startTime": 1633457554933, + "endTime": 1633457674339, + "status": "SUCCEEDED", + "context": { + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ], + "clusters": [ + { + "stack": "eggo", + "credentials": "titustestvpc", + "targetHealthyDeployPercentage": 100, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "cloudProvider": "titus", + "tag": "master-h97.cd22e76", + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + } + ] + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", + "name": "completeParallelDeploy", + "startTime": 1633457674240, + "endTime": 1633457674307, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGYV7KBASASHKJPAWR4H", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TGHHJD9HQK0GWCJZVAK21", + "refId": "1<2<2<1<1", + "type": "createServerGroup", + "name": "Deploy in us-east-1", + "startTime": 1633457555039, + "endTime": 1633457674144, + "status": "SUCCEEDED", + "context": { + "outputs": { + + }, + "notification.type": "upsertentitytags", + "deploy.account.name": "titustestvpc", + "zeroDesiredCapacityCount": 0, + "stack": "eggo", + "lastCapacityCheck": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + }, + "credentials": "titustestvpc", + "targetHealthyDeployPercentage": 100, + "source": { + + }, + "type": "createServerGroup", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ], + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "currentInstanceCount": 1, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "targetDesiredSize": 1, + "cloudProvider": "titus", + "kato.result.expected": true, + "deploy.server.groups": { + "us-east-1": [ + "emburnstest-eggo-v000" + ] + }, + "tag": "master-h97.cd22e76", + "kato.last.task.id": { + "id": "01FH8TGR3EQRDCF6C01BJZWTAA" + }, + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "workflowExecution": { + "namespace": "spinnaker", + "workflowType": "WorkflowDispatcher", + "runId": "08e768f5-e7fd-4918-a8ea-b719ee6c95a6", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGHHJD9HQK0GWCJZVAK21/08e768f5-e7fd-4918-a8ea-b719ee6c95a6/summary", + "workflowId": "stage:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGHHJD9HQK0GWCJZVAK21", + "uiTarget": "https://temporal" + }, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "workflowResult": { + "childWorkflowExecutionInfos": { + "0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9": { + "namespace": "spinnaker", + "workflowType": "StageContextCloudOperationRunner", + "runId": "f41d300d-70f7-4f7a-b93f-563bd3067aaf", + "permalink": "https://temporal/namespaces/spinnaker/workflows/0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9/f41d300d-70f7-4f7a-b93f-563bd3067aaf/summary", + "workflowId": "0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9", + "uiTarget": "https://temporal" + } + }, + "childWorkflowExecutionResults": { + "0cfd01a26983ac53e0f04e119594743049641618ae1f34ab3d7ffc3e198fe4e9": { + "output": { + "jobUri": "734cab5f-3522-4fcc-a280-38cb3f16ce6d", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v000" + } + }, + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + + }, + "logs": [ + + ], + "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", + "isCompleted": true + } + }, + "logs": [ + "Retrieved application data from front50 for application: emburnstest", + "Resolved disruption budget.", + "Prepared job request for: emburnstest-eggo-v000 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v000, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v000, jobUri: 734cab5f-3522-4fcc-a280-38cb3f16ce6d", + "Invalid source asg null", + "Entity tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-east-1 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-east-1 from ElasticSearch" + ], + "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", + "isCompleted": true + }, + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "capacitySnapshot": { + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "name": "Deploy in us-east-1", + "commits": [ + + ], + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest", + "nf-infrastructure", + "nf-datacenter" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "kato.tasks": [ + { + "resultObjects": [ + { + "jobUri": "734cab5f-3522-4fcc-a280-38cb3f16ce6d", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v000" + } + } + ], + "history": [ + + ], + "id": "n/a", + "status": { + + } + } + ], + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", + "name": "determineSourceServerGroup", + "startTime": 1633457555636, + "endTime": 1633457555715, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", + "name": "determineHealthProviders", + "startTime": 1633457555748, + "endTime": 1633457555919, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", + "name": "snapshotSourceServerGroup", + "startTime": 1633457555988, + "endTime": 1633457556084, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "4", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", + "name": "createServerGroup", + "startTime": 1633457556151, + "endTime": 1633457556373, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "5", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "monitorDeploy", + "startTime": 1633457556407, + "endTime": 1633457561541, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "6", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", + "name": "tagServerGroup", + "startTime": 1633457561572, + "endTime": 1633457561820, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "7", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", + "name": "waitForUpInstances", + "startTime": 1633457561882, + "endTime": 1633457672908, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "8", + "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", + "name": "jarDiffs", + "startTime": 1633457672941, + "endTime": 1633457673023, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "9", + "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", + "name": "getCommits", + "startTime": 1633457673094, + "endTime": 1633457673322, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGHEDH26RVMNYDPQ9PDQD", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TM55EJSWTCAC111KS1NWM", + "refId": "1<2<2<1<1>1", + "type": "applySourceServerGroupCapacity", + "name": "restoreMinCapacityFromSnapshot", + "startTime": 1633457673453, + "endTime": 1633457674056, + "status": "SUCCEEDED", + "context": { + "force.cache.refresh.errors": [ + + ], + "credentials": "titustestvpc", + "processed.server.groups": [ + + ], + "cloudProvider": "titus", + "deploy.server.groups": { + + }, + "refreshed.server.groups": [ + + ], + "zones": [ + + ] + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", + "name": "restoreMinCapacity", + "startTime": 1633457673655, + "endTime": 1633457673719, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "waitForCapacityMatch", + "startTime": 1633457673749, + "endTime": 1633457673826, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", + "name": "forceCacheRefresh", + "startTime": 1633457673895, + "endTime": 1633457673999, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_AFTER", + "parentStageId": "01FH8TGHHJD9HQK0GWCJZVAK21", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TGGFMW6B8F97VG7DEBNE5", + "refId": "1<3", + "type": "initManagedRolloutStep", + "name": "Run next Rollout Step", + "startTime": 1633457674815, + "endTime": 1633457685361, + "status": "TERMINAL", + "context": { + "rolloutWorkflowId": "rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGA6MCFQ1AK3G9F7MZBS", + "requisiteStageRefIds": [ + "1<2" + ] + }, + { + "id": "01FH8TM6M19FW5ATTWG7J88J00", + "refId": "1<3<1", + "type": "waitForNextRolloutStep", + "name": "Wait For Next Rollout Step", + "startTime": 1633457674942, + "endTime": 1633457685295, + "status": "TERMINAL", + "context": { + "exception": { + "exceptionType": "WorkflowServiceException", + "shouldRetry": false, + "details": { + "stackTrace": "io.temporal.client.WorkflowServiceException: workflowId='rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS', runId='', workflowType='RolloutWorkflow'}\n\tat io.temporal.internal.sync.WorkflowStubImpl.query(WorkflowStubImpl.java:351)\n\tat io.temporal.internal.sync.WorkflowInvocationHandler$SyncWorkflowInvocationHandler.queryWorkflow(WorkflowInvocationHandler.java:309)\n\tat io.temporal.internal.sync.WorkflowInvocationHandler$SyncWorkflowInvocationHandler.invoke(WorkflowInvocationHandler.java:272)\n\tat io.temporal.internal.sync.WorkflowInvocationHandler.invoke(WorkflowInvocationHandler.java:178)\n\tat com.sun.proxy.$Proxy465.getNextStep(Unknown Source)\n\tat com.netflix.spinnaker.orca.managedrollout.client.BuoyClient.getNextRolloutStep(BuoyClient.kt:82)\n\tat com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask.execute(AwaitReadyRolloutStepTask.kt:62)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1$1.invoke(RunTaskHandler.kt:160)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.withLoggingContext(RunTaskHandler.kt:439)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.access$withLoggingContext(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1.invoke(RunTaskHandler.kt:107)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.AuthenticationAware$sam$java_util_concurrent_Callable$0.call(AuthenticationAware.kt)\n\tat com.netflix.spinnaker.security.AuthenticatedRequest.lambda$wrapCallableForPrincipal$0(AuthenticatedRequest.java:274)\n\tat com.netflix.spinnaker.orca.q.handler.AuthenticationAware.withAuth(AuthenticationAware.kt:58)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1.invoke(RunTaskHandler.kt:106)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$handle$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$withTask$1.invoke(RunTaskHandler.kt:264)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler$withTask$1.invoke(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withTask$1.invoke(OrcaMessageHandler.kt:68)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withTask$1.invoke(OrcaMessageHandler.kt:46)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withStage$1.invoke(OrcaMessageHandler.kt:85)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler$withStage$1.invoke(OrcaMessageHandler.kt:46)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler.withExecution(OrcaMessageHandler.kt:95)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler.withStage(OrcaMessageHandler.kt:74)\n\tat com.netflix.spinnaker.orca.q.handler.OrcaMessageHandler.withTask(OrcaMessageHandler.kt:60)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.withTask(RunTaskHandler.kt:253)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.handle(RunTaskHandler.kt:103)\n\tat com.netflix.spinnaker.orca.q.handler.RunTaskHandler.handle(RunTaskHandler.kt:77)\n\tat com.netflix.spinnaker.q.MessageHandler.invoke(MessageHandler.kt:36)\n\tat com.netflix.spinnaker.orca.q.audit.ExecutionTrackingMessageHandlerPostProcessor$ExecutionTrackingMessageHandlerProxy.invoke(ExecutionTrackingMessageHandlerPostProcessor.kt:72)\n\tat com.netflix.spinnaker.q.QueueProcessor$callback$1$1.run(QueueProcessor.kt:90)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:829)\nCaused by: io.grpc.StatusRuntimeException: DEADLINE_EXCEEDED: deadline exceeded after 9.999872685s. [remote_addr=temporal-prestaging.us-west-2.spinnaker.mgmt.netflix.net/100.72.26.48:8980]\n\tat io.grpc.stub.ClientCalls.toStatusRuntimeException(ClientCalls.java:262)\n\tat io.grpc.stub.ClientCalls.getUnchecked(ClientCalls.java:243)\n\tat io.grpc.stub.ClientCalls.blockingUnaryCall(ClientCalls.java:156)\n\tat io.temporal.api.workflowservice.v1.WorkflowServiceGrpc$WorkflowServiceBlockingStub.queryWorkflow(WorkflowServiceGrpc.java:2983)\n\tat io.temporal.internal.external.GenericWorkflowClientExternalImpl.lambda$query$5(GenericWorkflowClientExternalImpl.java:216)\n\tat io.temporal.internal.retryer.GrpcSyncRetryer.retry(GrpcSyncRetryer.java:59)\n\tat io.temporal.internal.retryer.GrpcRetryer.retryWithResult(GrpcRetryer.java:51)\n\tat io.temporal.internal.external.GenericWorkflowClientExternalImpl.query(GenericWorkflowClientExternalImpl.java:209)\n\tat io.temporal.internal.client.RootWorkflowClientInvoker.query(RootWorkflowClientInvoker.java:142)\n\tat io.temporal.internal.sync.WorkflowStubImpl.query(WorkflowStubImpl.java:342)\n\t... 35 more\n", + "error": "Unexpected Task Failure", + "errors": [ + "workflowId='rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS', runId='', workflowType='RolloutWorkflow'}" + ] + }, + "operation": "awaitReadyRolloutStep", + "timestamp": 1633457685176 + }, + "rolloutWorkflowId": "rollout:01FH8TGGA62Q8GMWV2149MGW2C:01FH8TGGA6MCFQ1AK3G9F7MZBS" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633457675061, + "endTime": 1633457685244, + "status": "TERMINAL", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGFMW6B8F97VG7DEBNE5", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8TM6M1ZQ7XTEWAEFYSKTZZ", + "refId": "1<3<2", + "type": "doManagedRollout", + "name": "Deploy Rollout Step", + "status": "NOT_STARTED", + "context": { + + }, + "outputs": { + + }, + "tasks": [ + + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8TGGFMW6B8F97VG7DEBNE5", + "requisiteStageRefIds": [ + "1<3<1" + ] + } + ], + "startTime": 1633457553771, + "endTime": 1633457685496, + "status": "TERMINAL", + "authentication": { + "user": "emburns@netflix.com", + "allowedAccounts": [ + + ] + }, + "origin": "keel", + "trigger": { + "type": "keel", + "correlationId": "titus:cluster:titustestvpc:emburnstest-eggo:managed-rollout", + "user": "keel", + "parameters": { + + }, + "artifacts": [ + + ], + "notifications": [ + + ], + "rebake": false, + "dryRun": false, + "strategy": false, + "resolvedExpectedArtifacts": [ + + ], + "expectedArtifacts": [ + + ] + }, + "description": "Deploy master-h97.cd22e76 to cluster emburnstest-eggo in titustestvpc/us-east-1,us-west-2 using a managed rollout", + "notifications": [ + + ], + "initialConfig": { + + }, + "systemNotifications": [ + + ], + "partition": "us-west-2" + } +} diff --git a/keel-orca/src/test/resources/managed-rollout-execution.json b/keel-orca/src/test/resources/managed-rollout-execution.json index f72cc5cbc7..bc0ec56cb5 100644 --- a/keel-orca/src/test/resources/managed-rollout-execution.json +++ b/keel-orca/src/test/resources/managed-rollout-execution.json @@ -1,6 +1,6 @@ { - "id": "01FE4V8BPR7B9K1FGD54T2K25Q", - "name": "Managing rollouts", + "id": "01FH8ZX2F1F3YWGQTH0R9BQKR3", + "name": "Deploy master-h97.cd22e76 to cluster emburnstest-eggo in titustestvpc/us-east-1,us-west-2 using a managed rollout", "application": "emburnstest", "status": "SUCCEEDED", "variables": [ @@ -10,99 +10,65 @@ "selectionStrategy": "ALPHABETICAL", "clusterDefinitions": [ { - "stack": "temporal", - "credentials": "titustestvpc", - "targetHealthyDeployPercentage": 100, - "repository": "spinnaker/basic", - "constraints": { - "hard": {}, - "soft": {} - }, - "capacity": { - "min": 0, - "desired": 0, - "max": 0 - }, - "freeFormDetails": "local", - "cloudProvider": "titus", - "tag": "latest", - "inService": true, - "iamProfile": "arn:aws:iam::1", "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + "stack": "eggo", + "scaling": { + }, - "imageId": "spinnaker/basic:latest", + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "targetHealthyDeployPercentage": 100, + "loadBalancers": [ + + ], "resources": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 + }, + "overrides": { + }, "env": { - "EC2_REGION": "us-west-2", - "NETFLIX_HOME_REGION": "us-west-2", - "NETFLIX_REGION": "us-west-2", - "SPINNAKER_ACCOUNT": "titus" + }, - "labels": { - "interestingHealthProviderNames": "Titus" + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, - "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 }, - "targetGroups": [], - "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" + "tags": { + }, - "organization": "spinnaker", - "migrationPolicy": { - "type": "systemDefault" + "application": "emburnstest", + "targetGroups": [ + + ], + "cloudProvider": "titus", + "containerAttributes": { + }, "securityGroups": [ "nf-datacenter", "nf-infrastructure", "emburnstest" ], + "migrationPolicy": { + "type": "systemDefault" + }, "entryPoint": "", - "strategy": "highlander", - "user": "e@e.com", - "account": "titustestvpc", + "tag": "master-h97.cd22e76", + "inService": true, "capacityGroup": "emburnstest" } ], @@ -110,7 +76,9 @@ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-east-1", "account": "titustestvpc" } @@ -118,7 +86,9 @@ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-west-2", "account": "titustestvpc" } @@ -126,13 +96,17 @@ ] } }, + { + "key": "reason", + "value": "Diff detected at 2021-10-05T19:46:48.154072Z[UTC]" + }, { "key": "selectionStrategy", "value": "ALPHABETICAL" }, { "key": "rolloutWorkflowId", - "value": "rollout:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8BPR9WCJX5MCQ9WN81BX" + "value": "rollout:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX2F181G1G04H09KMYNSR" }, { "key": "targets", @@ -140,15 +114,9 @@ { "cloudProvider": "titus", "location": { - "sublocations": [], - "region": "us-east-1", - "account": "titustestvpc" - } - }, - { - "cloudProvider": "titus", - "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-west-2", "account": "titustestvpc" } @@ -158,12 +126,14 @@ { "key": "rolloutStep", "value": { - "id": "6873fdd8-c7da-3c69-b389-651662752192", + "id": "c7178e73-e67a-35ff-aedc-504c3b178973", "targets": [ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-west-2", "account": "titustestvpc" } @@ -175,65 +145,43 @@ "key": "clusters", "value": [ { - "stack": "temporal", + "stack": "eggo", "credentials": "titustestvpc", "targetHealthyDeployPercentage": 100, - "repository": "spinnaker/basic", "constraints": { - "hard": {}, - "soft": {} + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, + "network": "default", "capacity": { - "min": 0, - "desired": 0, - "max": 0 + "min": 1, + "desired": 1, + "max": 1 }, - "freeFormDetails": "local", "cloudProvider": "titus", - "tag": "latest", + "tag": "master-h97.cd22e76", "inService": true, - "iamProfile": "arn:aws:iam::1", "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + "scaling": { + }, - "imageId": "spinnaker/basic:latest", + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], "resources": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 + }, + "overrides": { + }, "env": { "EC2_REGION": "us-west-2", @@ -241,33 +189,26 @@ "NETFLIX_REGION": "us-west-2", "SPINNAKER_ACCOUNT": "titustestvpc" }, - "labels": { - "interestingHealthProviderNames": "Titus" + "tags": { + }, "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false - }, - "targetGroups": [], + "targetGroups": [ + + ], "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" - }, - "organization": "spinnaker", - "migrationPolicy": { - "type": "systemDefault" + }, "securityGroups": [ "nf-datacenter", "nf-infrastructure", "emburnstest" ], + "migrationPolicy": { + "type": "systemDefault" + }, "entryPoint": "", - "strategy": "highlander", "region": "us-west-2", - "user": "emburns@netflix.com", "account": "titustestvpc", "capacityGroup": "emburnstest" } @@ -275,19 +216,37 @@ }, { "key": "outputs", - "value": {} + "value": { + + } }, { "key": "notification.type", - "value": "disableservergroup" + "value": "upsertentitytags" }, { "key": "deploy.account.name", "value": "titustestvpc" }, + { + "key": "zeroDesiredCapacityCount", + "value": 0 + }, { "key": "stack", - "value": "temporal" + "value": "eggo" + }, + { + "key": "lastCapacityCheck", + "value": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + } }, { "key": "credentials", @@ -300,38 +259,43 @@ { "key": "source", "value": { - "serverGroupName": "emburnstest-temporal-local-v001", - "asgName": "emburnstest-temporal-local-v001", - "region": "us-west-2", - "account": "titustestvpc" + } }, { "key": "type", "value": "createServerGroup" }, - { - "key": "repository", - "value": "spinnaker/basic" - }, { "key": "constraints", "value": { - "hard": {}, - "soft": {} + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } } }, + { + "key": "currentInstanceCount", + "value": 1 + }, + { + "key": "network", + "value": "default" + }, { "key": "capacity", "value": { - "min": 0, - "desired": 0, - "max": 0 + "min": 1, + "desired": 1, + "max": 1 } }, { - "key": "freeFormDetails", - "value": "local" + "key": "targetDesiredSize", + "value": 1 }, { "key": "cloudProvider", @@ -345,122 +309,118 @@ "key": "deploy.server.groups", "value": { "us-west-2": [ - "emburnstest-temporal-local-v001" + "emburnstest-eggo-v000" ] } }, { "key": "tag", - "value": "latest" + "value": "master-h97.cd22e76" }, { "key": "kato.last.task.id", "value": { - "id": "01FE4VCK8FS7JF2P4K8K78FH4A" + "id": "01FH902203BKRMXMKXP9E6460E" } }, { "key": "inService", "value": true }, - { - "key": "iamProfile", - "value": "arn:aws:iam::1" - }, { "key": "registry", "value": "testregistry" }, { - "key": "disruptionBudget", + "key": "scaling", "value": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + } }, { "key": "imageId", - "value": "spinnaker/basic:latest" + "value": "emburns/spin-titus-demo:master-h97.cd22e76" }, { "key": "workflowExecution", "value": { "namespace": "spinnaker", "workflowType": "WorkflowDispatcher", - "runId": "308915a3-987d-407e-8abe-8d8fecc9322d", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VD1XCYHVGXXTDA4S2KXG0/308915a3-987d-407e-8abe-8d8fecc9322d/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VD1XCYHVGXXTDA4S2KXG0", + "runId": "049953b3-25be-434b-98f1-90b524408c3c", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH901NQ4W19BHYBKHXRHZ5K1/049953b3-25be-434b-98f1-90b524408c3c/summary", + "workflowId": "stage:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH901NQ4W19BHYBKHXRHZ5K1", "uiTarget": "https://temporal" } }, + { + "key": "loadBalancers", + "value": [ + + ] + }, { "key": "resources", "value": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 } }, + { + "key": "overrides", + "value": { + + } + }, { "key": "workflowResult", "value": { "childWorkflowExecutionInfos": { - "919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088": { + "b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e": { "namespace": "spinnaker", "workflowType": "StageContextCloudOperationRunner", - "runId": "2b3e0486-8a5c-4b5a-a49a-e48d668dbe0a", - "permalink": "https://temporal/namespaces/spinnaker/workflows/919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088/2b3e0486-8a5c-4b5a-a49a-e48d668dbe0a/summary", - "workflowId": "919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088", + "runId": "e769c158-cb41-429d-8759-f52f70e2bb14", + "permalink": "https://temporal/namespaces/spinnaker/workflows/b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e/e769c158-cb41-429d-8759-f52f70e2bb14/summary", + "workflowId": "b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e", "uiTarget": "https://temporal" } }, "childWorkflowExecutionResults": { - "919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088": { + "b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e": { "output": { - "outputType": "disableTitusServerGroupOperation", - "skippedInstances": [] + "jobUri": "52c7569b-0898-4302-a44d-90f440e94a76", + "serverGroupNames": [ + "us-west-2:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-west-2": "emburnstest-eggo-v000" + } + }, + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], + "logs": [ + + ], "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", "isCompleted": true } }, "logs": [ - "Disabling server group us-west-2/emburnstest-temporal-local-v001", - "Finished Disabling server group emburnstest-temporal-local-v001" + "Retrieved application data from front50 for application: emburnstest", + "Resolved disruption budget.", + "Prepared job request for: emburnstest-eggo-v000 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v000, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v000, jobUri: 52c7569b-0898-4302-a44d-90f440e94a76", + "Invalid source asg null", + "Entity tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-west-2 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-west-2 from ElasticSearch" ], "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", "isCompleted": true @@ -476,47 +436,44 @@ } }, { - "key": "labels", + "key": "tags", "value": { - "interestingHealthProviderNames": "Titus" + } }, { - "key": "application", - "value": "emburnstest" - }, - { - "key": "serviceJobProcesses", + "key": "capacitySnapshot", "value": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 } }, + { + "key": "application", + "value": "emburnstest" + }, { "key": "targetGroups", - "value": [] + "value": [ + + ] }, { "key": "containerAttributes", "value": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" + } }, - { - "key": "organization", - "value": "spinnaker" - }, { "key": "name", "value": "Deploy in us-west-2" }, { - "key": "migrationPolicy", - "value": { - "type": "systemDefault" - } + "key": "commits", + "value": [ + + ] }, { "key": "securityGroups", @@ -528,19 +485,35 @@ "nf-datacenter" ] }, + { + "key": "migrationPolicy", + "value": { + "type": "systemDefault" + } + }, { "key": "kato.tasks", "value": [ { "resultObjects": [ { - "outputType": "disableTitusServerGroupOperation", - "skippedInstances": [] + "jobUri": "52c7569b-0898-4302-a44d-90f440e94a76", + "serverGroupNames": [ + "us-west-2:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-west-2": "emburnstest-eggo-v000" + } } ], - "history": [], + "history": [ + + ], "id": "n/a", - "status": {} + "status": { + + } } ] }, @@ -548,18 +521,10 @@ "key": "entryPoint", "value": "" }, - { - "key": "strategy", - "value": "highlander" - }, { "key": "region", "value": "us-west-2" }, - { - "key": "user", - "value": "emburns@netflix.com" - }, { "key": "account", "value": "titustestvpc" @@ -568,49 +533,29 @@ "key": "capacityGroup", "value": "emburnstest" }, - { - "key": "cluster", - "value": "emburnstest-temporal-local" - }, { "key": "force.cache.refresh.errors", - "value": [] + "value": [ + + ] }, { "key": "processed.server.groups", - "value": [] - }, - { - "key": "retainLargerOverNewer", - "value": false + "value": [ + + ] }, { "key": "refreshed.server.groups", - "value": [] + "value": [ + + ] }, { "key": "zones", - "value": [] - }, - { - "key": "shrinkToSize", - "value": 1 - }, - { - "key": "allowDeleteActive", - "value": true - }, - { - "key": "preferLargerOverNewer", - "value": false - }, - { - "key": "remainingEnabledServerGroups", - "value": 1 - }, - { - "key": "continueIfClusterNotFound", - "value": false + "value": [ + + ] } ], "steps": [ @@ -618,8 +563,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutTask", "name": "completeManagedRollout", - "startTime": 1630103156465, - "endTime": 1630103156559, + "startTime": 1633463446362, + "endTime": 1633463446473, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -630,8 +575,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.StartManagedRolloutTask", "name": "startManagedRollout", - "startTime": 1630102893034, - "endTime": 1630102893509, + "startTime": 1633463208851, + "endTime": 1633463208990, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -642,8 +587,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", "name": "completeRolloutStep", - "startTime": 1630103020776, - "endTime": 1630103020912, + "startTime": 1633463357924, + "endTime": 1633463358154, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -654,8 +599,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", "name": "awaitReadyRolloutStep", - "startTime": 1630102893743, - "endTime": 1630102893987, + "startTime": 1633463209408, + "endTime": 1633463209593, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -666,8 +611,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", "name": "completeParallelDeploy", - "startTime": 1630103020613, - "endTime": 1630103020667, + "startTime": 1633463357395, + "endTime": 1633463357565, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -678,8 +623,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", "name": "determineSourceServerGroup", - "startTime": 1630102894978, - "endTime": 1630102896143, + "startTime": 1633463210881, + "endTime": 1633463211059, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -690,8 +635,8 @@ "id": "2", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", "name": "determineHealthProviders", - "startTime": 1630102896166, - "endTime": 1630102896683, + "startTime": 1633463211110, + "endTime": 1633463211324, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -702,8 +647,8 @@ "id": "3", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", "name": "snapshotSourceServerGroup", - "startTime": 1630102896709, - "endTime": 1630102896759, + "startTime": 1633463211390, + "endTime": 1633463211526, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -714,8 +659,8 @@ "id": "4", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", "name": "createServerGroup", - "startTime": 1630102896777, - "endTime": 1630102897253, + "startTime": 1633463211604, + "endTime": 1633463211834, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -726,8 +671,8 @@ "id": "5", "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", "name": "monitorDeploy", - "startTime": 1630102897314, - "endTime": 1630102902879, + "startTime": 1633463211886, + "endTime": 1633463222315, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -738,8 +683,8 @@ "id": "6", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", "name": "tagServerGroup", - "startTime": 1630102902898, - "endTime": 1630102904715, + "startTime": 1633463222367, + "endTime": 1633463222672, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -750,8 +695,8 @@ "id": "7", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", "name": "waitForUpInstances", - "startTime": 1630102904735, - "endTime": 1630102906149, + "startTime": 1633463222722, + "endTime": 1633463355316, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -762,8 +707,8 @@ "id": "8", "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", "name": "jarDiffs", - "startTime": 1630102906168, - "endTime": 1630102909279, + "startTime": 1633463355422, + "endTime": 1633463355601, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -774,8 +719,8 @@ "id": "9", "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", "name": "getCommits", - "startTime": 1630102909302, - "endTime": 1630102909699, + "startTime": 1633463355688, + "endTime": 1633463355900, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -784,10 +729,10 @@ }, { "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103010916, - "endTime": 1630103011344, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", + "name": "restoreMinCapacity", + "startTime": 1633463356354, + "endTime": 1633463356468, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -796,10 +741,10 @@ }, { "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.ShrinkClusterTask", - "name": "shrinkCluster", - "startTime": 1630103011366, - "endTime": 1630103013020, + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "waitForCapacityMatch", + "startTime": 1633463356518, + "endTime": 1633463356619, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -808,58 +753,58 @@ }, { "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorShrinkCluster", - "startTime": 1630103013039, - "endTime": 1630103018413, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", + "name": "forceCacheRefresh", + "startTime": 1633463356680, + "endTime": 1633463356875, "status": "SUCCEEDED", "stageStart": false, - "stageEnd": false, + "stageEnd": true, "loopStart": false, "loopEnd": false }, { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103018435, - "endTime": 1630103018531, + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "startTime": 1633463445866, + "endTime": 1633463446098, "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, + "stageStart": true, + "stageEnd": true, "loopStart": false, "loopEnd": false }, { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterShrinkTask", - "name": "waitForClusterShrink", - "startTime": 1630103018556, - "endTime": 1630103019550, + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633463358635, + "endTime": 1633463358823, "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, + "stageStart": true, + "stageEnd": true, "loopStart": false, "loopEnd": false }, { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103019571, - "endTime": 1630103019672, + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", + "name": "completeParallelDeploy", + "startTime": 1633463445256, + "endTime": 1633463445445, "status": "SUCCEEDED", - "stageStart": false, + "stageStart": true, "stageEnd": true, "loopStart": false, "loopEnd": false }, { "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630102910356, - "endTime": 1630102910598, + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", + "name": "determineSourceServerGroup", + "startTime": 1633463359942, + "endTime": 1633463360177, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -868,10 +813,10 @@ }, { "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.DisableClusterTask", - "name": "disableCluster", - "startTime": 1630102910619, - "endTime": 1630102912401, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", + "name": "determineHealthProviders", + "startTime": 1633463360235, + "endTime": 1633463360392, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -880,10 +825,10 @@ }, { "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorDisableCluster", - "startTime": 1630102912423, - "endTime": 1630102917815, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", + "name": "snapshotSourceServerGroup", + "startTime": 1633463360479, + "endTime": 1633463360610, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -892,10 +837,10 @@ }, { "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630102917835, - "endTime": 1630102917939, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", + "name": "createServerGroup", + "startTime": 1633463360667, + "endTime": 1633463360915, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -904,10 +849,10 @@ }, { "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterDisableTask", - "name": "waitForClusterDisable", - "startTime": 1630102917962, - "endTime": 1630103010421, + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "monitorDeploy", + "startTime": 1633463360972, + "endTime": 1633463371292, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -916,34 +861,34 @@ }, { "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103010444, - "endTime": 1630103010719, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", + "name": "tagServerGroup", + "startTime": 1633463371370, + "endTime": 1633463371898, "status": "SUCCEEDED", "stageStart": false, - "stageEnd": true, + "stageEnd": false, "loopStart": false, "loopEnd": false }, { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", - "name": "restoreMinCapacity", - "startTime": 1630103019882, - "endTime": 1630103019961, + "id": "7", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", + "name": "waitForUpInstances", + "startTime": 1633463371969, + "endTime": 1633463442968, "status": "SUCCEEDED", - "stageStart": true, + "stageStart": false, "stageEnd": false, "loopStart": false, "loopEnd": false }, { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "waitForCapacityMatch", - "startTime": 1630103019986, - "endTime": 1630103020026, + "id": "8", + "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", + "name": "jarDiffs", + "startTime": 1633463443050, + "endTime": 1633463443160, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -951,299 +896,11 @@ "loopEnd": false }, { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103020051, - "endTime": 1630103020305, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - }, - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", - "name": "completeRolloutStep", - "startTime": 1630103156283, - "endTime": 1630103156401, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - }, - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", - "name": "awaitReadyRolloutStep", - "startTime": 1630103021088, - "endTime": 1630103021259, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - }, - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", - "name": "completeParallelDeploy", - "startTime": 1630103156130, - "endTime": 1630103156173, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - }, - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", - "name": "determineSourceServerGroup", - "startTime": 1630103021820, - "endTime": 1630103022918, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103022941, - "endTime": 1630103023424, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", - "name": "snapshotSourceServerGroup", - "startTime": 1630103023444, - "endTime": 1630103023486, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", - "name": "createServerGroup", - "startTime": 1630103023509, - "endTime": 1630103023820, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorDeploy", - "startTime": 1630103023839, - "endTime": 1630103029275, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", - "name": "tagServerGroup", - "startTime": 1630103029293, - "endTime": 1630103031034, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "7", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", - "name": "waitForUpInstances", - "startTime": 1630103031054, - "endTime": 1630103042409, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "8", - "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", - "name": "jarDiffs", - "startTime": 1630103042566, - "endTime": 1630103045348, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "9", - "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", - "name": "getCommits", - "startTime": 1630103045371, - "endTime": 1630103045749, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - }, - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103146319, - "endTime": 1630103146924, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.ShrinkClusterTask", - "name": "shrinkCluster", - "startTime": 1630103146945, - "endTime": 1630103148539, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorShrinkCluster", - "startTime": 1630103148560, - "endTime": 1630103153937, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103153961, - "endTime": 1630103154048, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterShrinkTask", - "name": "waitForClusterShrink", - "startTime": 1630103154069, - "endTime": 1630103155122, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103155145, - "endTime": 1630103155242, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - }, - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103046349, - "endTime": 1630103046590, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.DisableClusterTask", - "name": "disableCluster", - "startTime": 1630103046609, - "endTime": 1630103048218, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorDisableCluster", - "startTime": 1630103048239, - "endTime": 1630103053692, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103053711, - "endTime": 1630103053814, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterDisableTask", - "name": "waitForClusterDisable", - "startTime": 1630103053833, - "endTime": 1630103145916, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103145952, - "endTime": 1630103146215, + "id": "9", + "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", + "name": "getCommits", + "startTime": 1633463443216, + "endTime": 1633463443465, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -1254,8 +911,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", "name": "restoreMinCapacity", - "startTime": 1630103155434, - "endTime": 1630103155483, + "startTime": 1633463443996, + "endTime": 1633463444146, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -1266,8 +923,8 @@ "id": "2", "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", "name": "waitForCapacityMatch", - "startTime": 1630103155503, - "endTime": 1630103155547, + "startTime": 1633463444206, + "endTime": 1633463444354, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -1278,8 +935,8 @@ "id": "3", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", "name": "forceCacheRefresh", - "startTime": 1630103155567, - "endTime": 1630103155809, + "startTime": 1633463444504, + "endTime": 1633463444758, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -1287,124 +944,90 @@ "loopEnd": false } ], - "buildTime": 1630102892253, - "startTime": 1630102892644, - "endTime": 1630103156753, + "buildTime": 1633463208417, + "startTime": 1633463208480, + "endTime": 1633463446589, "execution": { "type": "ORCHESTRATION", - "id": "01FE4V8BPR7B9K1FGD54T2K25Q", + "id": "01FH8ZX2F1F3YWGQTH0R9BQKR3", "application": "emburnstest", - "buildTime": 1630102892253, + "buildTime": 1633463208417, "canceled": false, "limitConcurrent": false, "keepWaitingPipelines": false, "stages": [ { - "id": "01FE4V8BPR9WCJX5MCQ9WN81BX", - "refId": "0", + "id": "01FH8ZX2F181G1G04H09KMYNSR", + "refId": "1", "type": "managedRollout", "name": "managedRollout", - "startTime": 1630102892747, - "endTime": 1630103156591, + "startTime": 1633463208531, + "endTime": 1633463446532, "status": "SUCCEEDED", "context": { "input": { "selectionStrategy": "ALPHABETICAL", "clusterDefinitions": [ { - "stack": "temporal", - "credentials": "titustestvpc", - "targetHealthyDeployPercentage": 100, - "repository": "spinnaker/basic", - "constraints": { - "hard": {}, - "soft": {} - }, - "capacity": { - "min": 0, - "desired": 0, - "max": 0 - }, - "freeFormDetails": "local", - "cloudProvider": "titus", - "tag": "latest", - "inService": true, - "iamProfile": "arn:aws:iam::1", "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + "stack": "eggo", + "scaling": { + }, - "imageId": "spinnaker/basic:latest", + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "targetHealthyDeployPercentage": 100, + "loadBalancers": [ + + ], "resources": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 + }, + "overrides": { + }, "env": { - "EC2_REGION": "us-west-2", - "NETFLIX_HOME_REGION": "us-west-2", - "NETFLIX_REGION": "us-west-2", - "SPINNAKER_ACCOUNT": "titustestvpc" + }, - "labels": { - "interestingHealthProviderNames": "Titus" + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, - "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 }, - "targetGroups": [], - "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" + "tags": { + }, - "organization": "spinnaker", - "migrationPolicy": { - "type": "systemDefault" + "application": "emburnstest", + "targetGroups": [ + + ], + "cloudProvider": "titus", + "containerAttributes": { + }, "securityGroups": [ "nf-datacenter", "nf-infrastructure", "emburnstest" ], + "migrationPolicy": { + "type": "systemDefault" + }, "entryPoint": "", - "strategy": "highlander", - "user": "emburns@netflix.com", - "account": "titustestvpc", + "tag": "master-h97.cd22e76", + "inService": true, "capacityGroup": "emburnstest" } ], @@ -1412,7 +1035,9 @@ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-east-1", "account": "titustestvpc" } @@ -1420,22 +1045,27 @@ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-west-2", "account": "titustestvpc" } } ] - } + }, + "reason": "Diff detected at 2021-10-05T19:46:48.154072Z[UTC]" + }, + "outputs": { + }, - "outputs": {}, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutTask", "name": "completeManagedRollout", - "startTime": 1630103156465, - "endTime": 1630103156559, + "startTime": 1633463446362, + "endTime": 1633463446473, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -1443,24 +1073,28 @@ "loopEnd": false } ], - "requisiteStageRefIds": [] + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4V8C9RRV25R9KVF9SM8A7T", - "refId": "0<1", + "id": "01FH8ZX2MSQ7BJJSW0RCFZXH3W", + "refId": "1<1", "type": "startManagedRollout", "name": "Start Managed Rollout", - "startTime": 1630102892943, - "endTime": 1630102893536, + "startTime": 1633463208684, + "endTime": 1633463209026, "status": "SUCCEEDED", "context": { "selectionStrategy": "ALPHABETICAL", - "rolloutWorkflowId": "rollout:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8BPR9WCJX5MCQ9WN81BX", + "rolloutWorkflowId": "rollout:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX2F181G1G04H09KMYNSR", "targets": [ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-east-1", "account": "titustestvpc" } @@ -1468,21 +1102,25 @@ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-west-2", "account": "titustestvpc" } } ] }, - "outputs": {}, + "outputs": { + + }, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.StartManagedRolloutTask", "name": "startManagedRollout", - "startTime": 1630102893034, - "endTime": 1630102893509, + "startTime": 1633463208851, + "endTime": 1633463208990, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -1491,30 +1129,34 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8BPR9WCJX5MCQ9WN81BX", - "requisiteStageRefIds": [] + "parentStageId": "01FH8ZX2F181G1G04H09KMYNSR", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4V8C9Y5518RHY4THEFG8RK", - "refId": "0<2", + "id": "01FH8ZX2MSBGDW3FK0F37Y91GW", + "refId": "1<2", "type": "initManagedRolloutStep", "name": "Run next Rollout Step", - "startTime": 1630102893571, - "endTime": 1630103020932, + "startTime": 1633463209080, + "endTime": 1633463358235, "status": "SUCCEEDED", "context": { - "rolloutWorkflowId": "rollout:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8BPR9WCJX5MCQ9WN81BX" + "rolloutWorkflowId": "rollout:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX2F181G1G04H09KMYNSR" }, "outputs": { "completedRolloutStep": { - "id": "90b14632-8957-3d78-a75a-ec0e1b872409", + "id": "3add7639-3a42-3bda-abfe-df9af902c928", "targets": [ { "cloudProvider": "titus", "location": { "account": "titustestvpc", "region": "us-east-1", - "sublocations": [] + "sublocations": [ + + ] } } ] @@ -1525,8 +1167,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", "name": "completeRolloutStep", - "startTime": 1630103020776, - "endTime": 1630103020912, + "startTime": 1633463357924, + "endTime": 1633463358154, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -1535,28 +1177,30 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8BPR9WCJX5MCQ9WN81BX", + "parentStageId": "01FH8ZX2F181G1G04H09KMYNSR", "requisiteStageRefIds": [ - "0<1" + "1<1" ] }, { - "id": "01FE4V8D1P6GRY68YWMYV5R4ZR", - "refId": "0<2<1", + "id": "01FH8ZX36BSNNTGMRV5KR8KAQ8", + "refId": "1<2<1", "type": "waitForNextRolloutStep", "name": "Wait For Next Rollout Step", - "startTime": 1630102893660, - "endTime": 1630102894017, + "startTime": 1633463209279, + "endTime": 1633463209714, "status": "SUCCEEDED", "context": { - "rolloutWorkflowId": "rollout:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8BPR9WCJX5MCQ9WN81BX", + "rolloutWorkflowId": "rollout:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX2F181G1G04H09KMYNSR", "rolloutStep": { - "id": "90b14632-8957-3d78-a75a-ec0e1b872409", + "id": "3add7639-3a42-3bda-abfe-df9af902c928", "targets": [ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-east-1", "account": "titustestvpc" } @@ -1564,14 +1208,16 @@ ] } }, - "outputs": {}, + "outputs": { + + }, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", "name": "awaitReadyRolloutStep", - "startTime": 1630102893743, - "endTime": 1630102893987, + "startTime": 1633463209408, + "endTime": 1633463209593, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -1580,96 +1226,94 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8C9Y5518RHY4THEFG8RK", - "requisiteStageRefIds": [] + "parentStageId": "01FH8ZX2MSBGDW3FK0F37Y91GW", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4V8D1P7W7NE3HMZXPTK2J4", - "refId": "0<2<2", + "id": "01FH8ZX36BY0ENERYK51MKW1Y3", + "refId": "1<2<2", "type": "doManagedRollout", "name": "Deploy Rollout Step", - "startTime": 1630102894040, - "endTime": 1630103020732, + "startTime": 1633463209822, + "endTime": 1633463357746, "status": "SUCCEEDED", - "context": {}, - "outputs": {}, - "tasks": [], + "context": { + + }, + "outputs": { + + }, + "tasks": [ + + ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8C9Y5518RHY4THEFG8RK", + "parentStageId": "01FH8ZX2MSBGDW3FK0F37Y91GW", "requisiteStageRefIds": [ - "0<2<1" + "1<2<1" ] }, { - "id": "01FE4V8DEY2BBY9Q2ZFE6S2S2K", - "refId": "0<2<2<1", + "id": "01FH8ZX3V71K36NEWFQ0V8HT7M", + "refId": "1<2<2<1", "type": "deploy", "name": "Deploy us-east-1", - "startTime": 1630102894079, - "endTime": 1630103020688, + "startTime": 1633463209919, + "endTime": 1633463357617, "status": "SUCCEEDED", "context": { + "targets": [ + { + "cloudProvider": "titus", + "location": { + "sublocations": [ + + ], + "region": "us-east-1", + "account": "titustestvpc" + } + } + ], "clusters": [ { - "stack": "temporal", + "stack": "eggo", "credentials": "titustestvpc", "targetHealthyDeployPercentage": 100, - "repository": "spinnaker/basic", "constraints": { - "hard": {}, - "soft": {} + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, + "network": "default", "capacity": { - "min": 0, - "desired": 0, - "max": 0 + "min": 1, + "desired": 1, + "max": 1 }, - "freeFormDetails": "local", "cloudProvider": "titus", - "tag": "latest", + "tag": "master-h97.cd22e76", "inService": true, - "iamProfile": "arn:aws:iam::1", "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + "scaling": { + }, - "imageId": "spinnaker/basic:latest", + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], "resources": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 + }, + "overrides": { + }, "env": { "EC2_REGION": "us-east-1", @@ -1677,46 +1321,41 @@ "NETFLIX_REGION": "us-east-1", "SPINNAKER_ACCOUNT": "titustestvpc" }, - "labels": { - "interestingHealthProviderNames": "Titus" + "tags": { + }, "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false - }, - "targetGroups": [], + "targetGroups": [ + + ], "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" - }, - "organization": "spinnaker", - "migrationPolicy": { - "type": "systemDefault" + }, "securityGroups": [ "nf-datacenter", "nf-infrastructure", "emburnstest" ], + "migrationPolicy": { + "type": "systemDefault" + }, "entryPoint": "", - "strategy": "highlander", "region": "us-east-1", - "user": "emburns@netflix.com", "account": "titustestvpc", "capacityGroup": "emburnstest" } ] }, - "outputs": {}, + "outputs": { + + }, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", "name": "completeParallelDeploy", - "startTime": 1630103020613, - "endTime": 1630103020667, + "startTime": 1633463357395, + "endTime": 1633463357565, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -1725,415 +1364,230 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8D1P7W7NE3HMZXPTK2J4", - "requisiteStageRefIds": [] + "parentStageId": "01FH8ZX36BY0ENERYK51MKW1Y3", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4V8DHRC7DKJRERY56XQ0T9", - "refId": "0<2<2<1<1", + "id": "01FH8ZX40JW83C6J8D6FE29FFD", + "refId": "1<2<2<1<1", "type": "createServerGroup", "name": "Deploy in us-east-1", - "startTime": 1630102894171, - "endTime": 1630103020564, + "startTime": 1633463210159, + "endTime": 1633463357246, "status": "SUCCEEDED", "context": { - "outputs": {}, + "outputs": { + + }, "notification.type": "upsertentitytags", "deploy.account.name": "titustestvpc", - "stack": "temporal", + "zeroDesiredCapacityCount": 0, + "stack": "eggo", + "lastCapacityCheck": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + }, "credentials": "titustestvpc", "targetHealthyDeployPercentage": 100, "source": { - "serverGroupName": "emburnstest-temporal-local-v002", - "asgName": "emburnstest-temporal-local-v002", - "region": "us-east-1", - "account": "titustestvpc" + }, "type": "createServerGroup", - "repository": "spinnaker/basic", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "sublocations": [ + + ], + "region": "us-east-1", + "account": "titustestvpc" + } + } + ], "constraints": { - "hard": {}, - "soft": {} + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, + "currentInstanceCount": 1, + "network": "default", "capacity": { - "min": 0, - "desired": 0, - "max": 0 + "min": 1, + "desired": 1, + "max": 1 }, - "freeFormDetails": "local", + "targetDesiredSize": 1, "cloudProvider": "titus", "kato.result.expected": true, "deploy.server.groups": { "us-east-1": [ - "emburnstest-temporal-local-v003" + "emburnstest-eggo-v000" ] }, - "tag": "latest", + "tag": "master-h97.cd22e76", "kato.last.task.id": { - "id": "01FE4V8QWRSCNJ8A4CVRW6S43Z" - }, - "inService": true, - "iamProfile": "arn:aws:iam::1", - "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false - }, - "imageId": "spinnaker/basic:latest", - "workflowExecution": { - "namespace": "spinnaker", - "workflowType": "WorkflowDispatcher", - "runId": "ec91b115-0577-498b-adac-fc3330f321cb", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8DHRC7DKJRERY56XQ0T9/ec91b115-0577-498b-adac-fc3330f321cb/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8DHRC7DKJRERY56XQ0T9", - "uiTarget": "https://temporal" - }, - "resources": { - "disk": 10000, - "memory": 512, - "networkMbps": 128, - "cpu": 1, - "gpu": 0 - }, - "workflowResult": { - "childWorkflowExecutionInfos": { - "b6b047721aa67ca08111bf81b46ed0d240d055d59763390dfbfe7c55c52150c3": { - "namespace": "spinnaker", - "workflowType": "StageContextCloudOperationRunner", - "runId": "8fe4e3a0-8ba6-4e32-963d-1ffc79f108f9", - "permalink": "https://temporal/namespaces/spinnaker/workflows/b6b047721aa67ca08111bf81b46ed0d240d055d59763390dfbfe7c55c52150c3/8fe4e3a0-8ba6-4e32-963d-1ffc79f108f9/summary", - "workflowId": "b6b047721aa67ca08111bf81b46ed0d240d055d59763390dfbfe7c55c52150c3", - "uiTarget": "https://temporal" - } - }, - "childWorkflowExecutionResults": { - "b6b047721aa67ca08111bf81b46ed0d240d055d59763390dfbfe7c55c52150c3": { - "output": { - "jobUri": "e3e07d6c-679f-425a-91ec-3c89c93eec3f", - "serverGroupNames": [ - "us-east-1:emburnstest-temporal-local-v003" - ], - "outputType": "titusServerGroupDeployOperation", - "ancestorServerGroupNameByRegion": { - "us-east-1": "emburnstest-temporal-local-v002" - }, - "serverGroupNameByRegion": { - "us-east-1": "emburnstest-temporal-local-v003" - } - }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], - "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", - "isCompleted": true - } - }, - "logs": [ - "Retrieved application data from front50 for application: emburnstest", - "Referring to source server group for details: emburnstest-temporal-local-v002", - "Resolved security groups: sg-1,sg-2,sg-3", - "Prepared job request for: emburnstest-temporal-local-v003 with imageId spinnaker/basic:latest", - "Submitting job with name: emburnstest-temporal-local-v003, attempt: 1", - "Successfully created job with name: emburnstest-temporal-local-v003, jobUri: e3e07d6c-679f-425a-91ec-3c89c93eec3f", - "Entity tags with titus:servergroup:emburnstest-temporal-local-v003:titustestvpc:us-east-1 not found in front50", - "Deleting tags with titus:servergroup:emburnstest-temporal-local-v003:titustestvpc:us-east-1 from ElasticSearch" - ], - "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", - "isCompleted": true - }, - "env": { - "EC2_REGION": "us-east-1", - "NETFLIX_HOME_REGION": "us-east-1", - "NETFLIX_REGION": "us-east-1", - "SPINNAKER_ACCOUNT": "titustestvpc" - }, - "labels": { - "interestingHealthProviderNames": "Titus" - }, - "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false - }, - "targetGroups": [], - "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" - }, - "organization": "spinnaker", - "name": "Deploy in us-east-1", - "migrationPolicy": { - "type": "systemDefault" - }, - "securityGroups": [ - "nf-datacenter", - "nf-infrastructure", - "emburnstest", - "nf-infrastructure", - "nf-datacenter" - ], - "kato.tasks": [ - { - "resultObjects": [ - { - "jobUri": "e3e07d6c-679f-425a-91ec-3c89c93eec3f", - "serverGroupNames": [ - "us-east-1:emburnstest-temporal-local-v003" - ], - "outputType": "titusServerGroupDeployOperation", - "ancestorServerGroupNameByRegion": { - "us-east-1": "emburnstest-temporal-local-v002" - }, - "serverGroupNameByRegion": { - "us-east-1": "emburnstest-temporal-local-v003" - } - } - ], - "history": [], - "id": "n/a", - "status": {} - } - ], - "entryPoint": "", - "strategy": "highlander", - "region": "us-east-1", - "user": "emburns@netflix.com", - "account": "titustestvpc", - "capacityGroup": "emburnstest" - }, - "outputs": {}, - "tasks": [ - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", - "name": "determineSourceServerGroup", - "startTime": 1630102894978, - "endTime": 1630102896143, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630102896166, - "endTime": 1630102896683, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", - "name": "snapshotSourceServerGroup", - "startTime": 1630102896709, - "endTime": 1630102896759, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", - "name": "createServerGroup", - "startTime": 1630102896777, - "endTime": 1630102897253, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorDeploy", - "startTime": 1630102897314, - "endTime": 1630102902879, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", - "name": "tagServerGroup", - "startTime": 1630102902898, - "endTime": 1630102904715, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "7", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", - "name": "waitForUpInstances", - "startTime": 1630102904735, - "endTime": 1630102906149, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "8", - "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", - "name": "jarDiffs", - "startTime": 1630102906168, - "endTime": 1630102909279, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false + "id": "01FH8ZXG947GM3CNJ9E6Q0T6VC" }, - { - "id": "9", - "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", - "name": "getCommits", - "startTime": 1630102909302, - "endTime": 1630102909699, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - } - ], - "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8DEY2BBY9Q2ZFE6S2S2K", - "requisiteStageRefIds": [] - }, - { - "id": "01FE4V8WT3HYYP3BZGHSH4MHGR", - "refId": "0<2<2<1<1>1", - "type": "shrinkCluster", - "name": "shrinkCluster", - "startTime": 1630102909813, - "endTime": 1630103019696, - "status": "SUCCEEDED", - "context": { - "outputs": {}, - "cluster": "emburnstest-temporal-local", - "notification.type": "destroyservergroup", - "deploy.account.name": "titustestvpc", - "force.cache.refresh.errors": [], - "credentials": "titustestvpc", + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", "workflowExecution": { "namespace": "spinnaker", "workflowType": "WorkflowDispatcher", - "runId": "5ac577dd-ced2-4a0e-b9f6-1aab3c8fb66a", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8WT3HYYP3BZGHSH4MHGR/5ac577dd-ced2-4a0e-b9f6-1aab3c8fb66a/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8WT3HYYP3BZGHSH4MHGR", - "uiTarget": "https://temporal.test.netflix.net" - }, - "processed.server.groups": [], - "retainLargerOverNewer": false, - "refreshed.server.groups": [], + "runId": "a26d98d2-7053-43ea-a204-edbe201638e8", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX40JW83C6J8D6FE29FFD/a26d98d2-7053-43ea-a204-edbe201638e8/summary", + "workflowId": "stage:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX40JW83C6J8D6FE29FFD", + "uiTarget": "https://temporal" + }, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, "workflowResult": { "childWorkflowExecutionInfos": { - "5bb1fdd98bd0cc5262f748904a2b21def1522bc3c62986e0f6d4788b53754430": { + "b80f65dd58f33eca39af44ddb38e042dc609352f046b43033ec0d36cfa852d12": { "namespace": "spinnaker", "workflowType": "StageContextCloudOperationRunner", - "runId": "98b7d129-1e55-4256-9b1f-7ac33f8680c1", - "permalink": "https://temporal.test.netflix.net/namespaces/spinnaker/workflows/5bb1fdd98bd0cc5262f748904a2b21def1522bc3c62986e0f6d4788b53754430/98b7d129-1e55-4256-9b1f-7ac33f8680c1/summary", - "workflowId": "5bb1fdd98bd0cc5262f748904a2b21def1522bc3c62986e0f6d4788b53754430", + "runId": "122744ae-95c9-4fc8-9de9-bec7a88838b1", + "permalink": "https://temporal/namespaces/spinnaker/workflows/b80f65dd58f33eca39af44ddb38e042dc609352f046b43033ec0d36cfa852d12/122744ae-95c9-4fc8-9de9-bec7a88838b1/summary", + "workflowId": "b80f65dd58f33eca39af44ddb38e042dc609352f046b43033ec0d36cfa852d12", "uiTarget": "https://temporal" } }, "childWorkflowExecutionResults": { - "5bb1fdd98bd0cc5262f748904a2b21def1522bc3c62986e0f6d4788b53754430": { + "b80f65dd58f33eca39af44ddb38e042dc609352f046b43033ec0d36cfa852d12": { "output": { - "outputType": "voidOperation" + "jobUri": "69973db2-88e0-4c44-8f84-1dac7834859f", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v000" + } }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + + }, + "logs": [ + + ], "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", "isCompleted": true } }, "logs": [ - "Deleting Titus job titustestvpc:us-east-1:2291409d-2aa7-4f67-a6fb-42bf1cf87dd7, initiated by emburns@netflix.com", - "Deleted Titus job titustestvpc:us-east-1:2291409d-2aa7-4f67-a6fb-42bf1cf87dd7", - "Deleting tags with titus:servergroup:emburnstest-temporal-local-v002:titustestvpc:us-east-1 from ElasticSearch and front50" + "Retrieved application data from front50 for application: emburnstest", + "Resolved disruption budget.", + "Prepared job request for: emburnstest-eggo-v000 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v000, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v000, jobUri: 69973db2-88e0-4c44-8f84-1dac7834859f", + "Invalid source asg null", + "Entity tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-east-1 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-east-1 from ElasticSearch" ], "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", "isCompleted": true }, - "zones": [], - "shrinkToSize": 1, - "cloudProvider": "titus", - "allowDeleteActive": true, - "deploy.server.groups": { - "us-east-1": [ - "emburnstest-temporal-local-v002" - ] + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "capacitySnapshot": { + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "name": "Deploy in us-east-1", + "commits": [ + + ], + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest", + "nf-infrastructure", + "nf-datacenter" + ], + "migrationPolicy": { + "type": "systemDefault" }, "kato.tasks": [ { "resultObjects": [ { - "outputType": "voidOperation" + "jobUri": "69973db2-88e0-4c44-8f84-1dac7834859f", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v000" + ], + "outputType": "titusServerGroupDeployOperation", + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v000" + } } ], - "history": [], + "history": [ + + ], "id": "n/a", - "status": {} + "status": { + + } } ], - "region": "us-east-1" + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + }, + "outputs": { + }, - "outputs": {}, "tasks": [ { "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103010916, - "endTime": 1630103011344, + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", + "name": "determineSourceServerGroup", + "startTime": 1633463210881, + "endTime": 1633463211059, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -2142,10 +1596,10 @@ }, { "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.ShrinkClusterTask", - "name": "shrinkCluster", - "startTime": 1630103011366, - "endTime": 1630103013020, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", + "name": "determineHealthProviders", + "startTime": 1633463211110, + "endTime": 1633463211324, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2154,10 +1608,10 @@ }, { "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorShrinkCluster", - "startTime": 1630103013039, - "endTime": 1630103018413, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", + "name": "snapshotSourceServerGroup", + "startTime": 1633463211390, + "endTime": 1633463211526, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2166,10 +1620,10 @@ }, { "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103018435, - "endTime": 1630103018531, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", + "name": "createServerGroup", + "startTime": 1633463211604, + "endTime": 1633463211834, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2178,10 +1632,10 @@ }, { "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterShrinkTask", - "name": "waitForClusterShrink", - "startTime": 1630103018556, - "endTime": 1630103019550, + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "monitorDeploy", + "startTime": 1633463211886, + "endTime": 1633463222315, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2190,137 +1644,10 @@ }, { "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103019571, - "endTime": 1630103019672, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - } - ], - "syntheticStageOwner": "STAGE_AFTER", - "parentStageId": "01FE4V8DHRC7DKJRERY56XQ0T9", - "requisiteStageRefIds": [] - }, - { - "id": "01FE4V8X3JAQREV4GE62HG41NN", - "refId": "0<2<2<1<1>1<1", - "type": "disableCluster", - "name": "disableCluster", - "startTime": 1630102910103, - "endTime": 1630103010740, - "status": "SUCCEEDED", - "context": { - "outputs": {}, - "cluster": "emburnstest-temporal-local", - "notification.type": "disableservergroup", - "deploy.account.name": "titustestvpc", - "force.cache.refresh.errors": [], - "credentials": "titustestvpc", - "workflowExecution": { - "namespace": "spinnaker", - "workflowType": "WorkflowDispatcher", - "runId": "327762be-be09-4faf-90ef-0e71370496c4", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8X3JAQREV4GE62HG41NN/327762be-be09-4faf-90ef-0e71370496c4/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8X3JAQREV4GE62HG41NN", - "uiTarget": "https://temporal" - }, - "processed.server.groups": [], - "retainLargerOverNewer": false, - "refreshed.server.groups": [], - "workflowResult": { - "childWorkflowExecutionInfos": { - "a6566f65b68c754a74fde2184bacd17dd813720641703fa9359382559c85ecb3": { - "namespace": "spinnaker", - "workflowType": "StageContextCloudOperationRunner", - "runId": "f2894d57-4268-4f24-b526-cfae8e16f0a0", - "permalink": "https://temporal/namespaces/spinnaker/workflows/a6566f65b68c754a74fde2184bacd17dd813720641703fa9359382559c85ecb3/f2894d57-4268-4f24-b526-cfae8e16f0a0/summary", - "workflowId": "a6566f65b68c754a74fde2184bacd17dd813720641703fa9359382559c85ecb3", - "uiTarget": "https://temporal" - } - }, - "childWorkflowExecutionResults": { - "a6566f65b68c754a74fde2184bacd17dd813720641703fa9359382559c85ecb3": { - "output": { - "outputType": "disableTitusServerGroupOperation", - "skippedInstances": [] - }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], - "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", - "isCompleted": true - } - }, - "logs": [ - "Disabling server group us-east-1/emburnstest-temporal-local-v002", - "Finished Disabling server group emburnstest-temporal-local-v002" - ], - "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", - "isCompleted": true - }, - "preferLargerOverNewer": false, - "zones": [], - "remainingEnabledServerGroups": 1, - "continueIfClusterNotFound": false, - "shrinkToSize": 1, - "cloudProvider": "titus", - "allowDeleteActive": true, - "deploy.server.groups": { - "us-east-1": [ - "emburnstest-temporal-local-v002" - ] - }, - "kato.tasks": [ - { - "resultObjects": [ - { - "outputType": "disableTitusServerGroupOperation", - "skippedInstances": [] - } - ], - "history": [], - "id": "n/a", - "status": {} - } - ], - "region": "us-east-1" - }, - "outputs": {}, - "tasks": [ - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630102910356, - "endTime": 1630102910598, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.DisableClusterTask", - "name": "disableCluster", - "startTime": 1630102910619, - "endTime": 1630102912401, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorDisableCluster", - "startTime": 1630102912423, - "endTime": 1630102917815, + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", + "name": "tagServerGroup", + "startTime": 1633463222367, + "endTime": 1633463222672, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2328,11 +1655,11 @@ "loopEnd": false }, { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630102917835, - "endTime": 1630102917939, + "id": "7", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", + "name": "waitForUpInstances", + "startTime": 1633463222722, + "endTime": 1633463355316, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2340,11 +1667,11 @@ "loopEnd": false }, { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterDisableTask", - "name": "waitForClusterDisable", - "startTime": 1630102917962, - "endTime": 1630103010421, + "id": "8", + "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", + "name": "jarDiffs", + "startTime": 1633463355422, + "endTime": 1633463355601, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2352,11 +1679,11 @@ "loopEnd": false }, { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103010444, - "endTime": 1630103010719, + "id": "9", + "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", + "name": "getCommits", + "startTime": 1633463355688, + "endTime": 1633463355900, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -2365,34 +1692,48 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8WT3HYYP3BZGHSH4MHGR", - "requisiteStageRefIds": [] + "parentStageId": "01FH8ZX3V71K36NEWFQ0V8HT7M", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4V8WTDK4AFHT7JN2HKZGF4", - "refId": "0<2<2<1<1>2", + "id": "01FH901JHGNKX85ZK7SEERRYT3", + "refId": "1<2<2<1<1>1", "type": "applySourceServerGroupCapacity", "name": "restoreMinCapacityFromSnapshot", - "startTime": 1630103019715, - "endTime": 1630103020522, + "startTime": 1633463356048, + "endTime": 1633463357051, "status": "SUCCEEDED", "context": { - "force.cache.refresh.errors": [], + "force.cache.refresh.errors": [ + + ], "credentials": "titustestvpc", - "processed.server.groups": [], + "processed.server.groups": [ + + ], "cloudProvider": "titus", - "deploy.server.groups": {}, - "refreshed.server.groups": [], - "zones": [] + "deploy.server.groups": { + + }, + "refreshed.server.groups": [ + + ], + "zones": [ + + ] + }, + "outputs": { + }, - "outputs": {}, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", "name": "restoreMinCapacity", - "startTime": 1630103019882, - "endTime": 1630103019961, + "startTime": 1633463356354, + "endTime": 1633463356468, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -2403,8 +1744,8 @@ "id": "2", "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", "name": "waitForCapacityMatch", - "startTime": 1630103019986, - "endTime": 1630103020026, + "startTime": 1633463356518, + "endTime": 1633463356619, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2415,8 +1756,8 @@ "id": "3", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", "name": "forceCacheRefresh", - "startTime": 1630103020051, - "endTime": 1630103020305, + "startTime": 1633463356680, + "endTime": 1633463356875, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -2425,32 +1766,34 @@ } ], "syntheticStageOwner": "STAGE_AFTER", - "parentStageId": "01FE4V8DHRC7DKJRERY56XQ0T9", + "parentStageId": "01FH8ZX40JW83C6J8D6FE29FFD", "requisiteStageRefIds": [ - "0<2<2<1<1>1" + ] }, { - "id": "01FE4V8CA0QHDTAPY76QVWJSBN", - "refId": "0<3", + "id": "01FH8ZX2MSJ7A8SMX6K9F9CPCR", + "refId": "1<3", "type": "initManagedRolloutStep", "name": "Run next Rollout Step", - "startTime": 1630103020953, - "endTime": 1630103156421, + "startTime": 1633463358286, + "endTime": 1633463446211, "status": "SUCCEEDED", "context": { - "rolloutWorkflowId": "rollout:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8BPR9WCJX5MCQ9WN81BX" + "rolloutWorkflowId": "rollout:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX2F181G1G04H09KMYNSR" }, "outputs": { "completedRolloutStep": { - "id": "6873fdd8-c7da-3c69-b389-651662752192", + "id": "c7178e73-e67a-35ff-aedc-504c3b178973", "targets": [ { "cloudProvider": "titus", "location": { "account": "titustestvpc", "region": "us-west-2", - "sublocations": [] + "sublocations": [ + + ] } } ] @@ -2461,8 +1804,8 @@ "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", "name": "completeRolloutStep", - "startTime": 1630103156283, - "endTime": 1630103156401, + "startTime": 1633463445866, + "endTime": 1633463446098, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -2471,28 +1814,30 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8BPR9WCJX5MCQ9WN81BX", + "parentStageId": "01FH8ZX2F181G1G04H09KMYNSR", "requisiteStageRefIds": [ - "0<2" + "1<2" ] }, { - "id": "01FE4VC9EAATDRFZNGDJW6DEE1", - "refId": "0<3<1", + "id": "01FH901MWMWFR2RD95XS28AMNT", + "refId": "1<3<1", "type": "waitForNextRolloutStep", "name": "Wait For Next Rollout Step", - "startTime": 1630103021029, - "endTime": 1630103021282, + "startTime": 1633463358459, + "endTime": 1633463358907, "status": "SUCCEEDED", "context": { - "rolloutWorkflowId": "rollout:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4V8BPR9WCJX5MCQ9WN81BX", + "rolloutWorkflowId": "rollout:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH8ZX2F181G1G04H09KMYNSR", "rolloutStep": { - "id": "6873fdd8-c7da-3c69-b389-651662752192", + "id": "c7178e73-e67a-35ff-aedc-504c3b178973", "targets": [ { "cloudProvider": "titus", "location": { - "sublocations": [], + "sublocations": [ + + ], "region": "us-west-2", "account": "titustestvpc" } @@ -2500,14 +1845,16 @@ ] } }, - "outputs": {}, + "outputs": { + + }, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", "name": "awaitReadyRolloutStep", - "startTime": 1630103021088, - "endTime": 1630103021259, + "startTime": 1633463358635, + "endTime": 1633463358823, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -2516,96 +1863,94 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8CA0QHDTAPY76QVWJSBN", - "requisiteStageRefIds": [] + "parentStageId": "01FH8ZX2MSJ7A8SMX6K9F9CPCR", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4VC9EAAN1AN3D6N2VRRY1N", - "refId": "0<3<2", + "id": "01FH901MWMQ9Q6G99SYV6HM09G", + "refId": "1<3<2", "type": "doManagedRollout", "name": "Deploy Rollout Step", - "startTime": 1630103021303, - "endTime": 1630103156242, + "startTime": 1633463358990, + "endTime": 1633463445688, "status": "SUCCEEDED", - "context": {}, - "outputs": {}, - "tasks": [], + "context": { + + }, + "outputs": { + + }, + "tasks": [ + + ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4V8CA0QHDTAPY76QVWJSBN", + "parentStageId": "01FH8ZX2MSJ7A8SMX6K9F9CPCR", "requisiteStageRefIds": [ - "0<3<1" + "1<3<1" ] }, { - "id": "01FE4VC9QV7A4YBDC9KNPR6JAB", - "refId": "0<3<2<1", + "id": "01FH901NH6KZRXSDQZ1JGRGABG", + "refId": "1<3<2<1", "type": "deploy", "name": "Deploy us-west-2", - "startTime": 1630103021324, - "endTime": 1630103156195, + "startTime": 1633463359135, + "endTime": 1633463445518, "status": "SUCCEEDED", "context": { + "targets": [ + { + "cloudProvider": "titus", + "location": { + "sublocations": [ + + ], + "region": "us-west-2", + "account": "titustestvpc" + } + } + ], "clusters": [ { - "stack": "temporal", + "stack": "eggo", "credentials": "titustestvpc", "targetHealthyDeployPercentage": 100, - "repository": "spinnaker/basic", "constraints": { - "hard": {}, - "soft": {} + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, + "network": "default", "capacity": { - "min": 0, - "desired": 0, - "max": 0 + "min": 1, + "desired": 1, + "max": 1 }, - "freeFormDetails": "local", "cloudProvider": "titus", - "tag": "latest", + "tag": "master-h97.cd22e76", "inService": true, - "iamProfile": "arn:aws:iam::1", "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + "scaling": { + }, - "imageId": "spinnaker/basic:latest", + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], "resources": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 + }, + "overrides": { + }, "env": { "EC2_REGION": "us-west-2", @@ -2613,46 +1958,41 @@ "NETFLIX_REGION": "us-west-2", "SPINNAKER_ACCOUNT": "titustestvpc" }, - "labels": { - "interestingHealthProviderNames": "Titus" + "tags": { + }, "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false - }, - "targetGroups": [], + "targetGroups": [ + + ], "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" - }, - "organization": "spinnaker", - "migrationPolicy": { - "type": "systemDefault" + }, "securityGroups": [ "nf-datacenter", "nf-infrastructure", "emburnstest" ], + "migrationPolicy": { + "type": "systemDefault" + }, "entryPoint": "", - "strategy": "highlander", "region": "us-west-2", - "user": "emburns@netflix.com", "account": "titustestvpc", "capacityGroup": "emburnstest" } ] }, - "outputs": {}, + "outputs": { + + }, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", "name": "completeParallelDeploy", - "startTime": 1630103156130, - "endTime": 1630103156173, + "startTime": 1633463445256, + "endTime": 1633463445445, "status": "SUCCEEDED", "stageStart": true, "stageEnd": true, @@ -2661,147 +2001,154 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4VC9EAAN1AN3D6N2VRRY1N", - "requisiteStageRefIds": [] + "parentStageId": "01FH901MWMQ9Q6G99SYV6HM09G", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4VC9T0YNAW74JSCA9M0B1Z", - "refId": "0<3<2<1<1", + "id": "01FH901NQ4W19BHYBKHXRHZ5K1", + "refId": "1<3<2<1<1", "type": "createServerGroup", "name": "Deploy in us-west-2", - "startTime": 1630103021398, - "endTime": 1630103156080, + "startTime": 1633463359269, + "endTime": 1633463445044, "status": "SUCCEEDED", "context": { - "outputs": {}, + "outputs": { + + }, "notification.type": "upsertentitytags", "deploy.account.name": "titustestvpc", - "stack": "temporal", + "zeroDesiredCapacityCount": 0, + "stack": "eggo", + "lastCapacityCheck": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + }, "credentials": "titustestvpc", "targetHealthyDeployPercentage": 100, "source": { - "serverGroupName": "emburnstest-temporal-local-v001", - "asgName": "emburnstest-temporal-local-v001", - "region": "us-west-2", - "account": "titustestvpc" + }, "type": "createServerGroup", - "repository": "spinnaker/basic", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "sublocations": [ + + ], + "region": "us-west-2", + "account": "titustestvpc" + } + } + ], "constraints": { - "hard": {}, - "soft": {} + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } }, + "currentInstanceCount": 1, + "network": "default", "capacity": { - "min": 0, - "desired": 0, - "max": 0 + "min": 1, + "desired": 1, + "max": 1 }, - "freeFormDetails": "local", + "targetDesiredSize": 1, "cloudProvider": "titus", "kato.result.expected": true, "deploy.server.groups": { "us-west-2": [ - "emburnstest-temporal-local-v002" + "emburnstest-eggo-v000" ] }, - "tag": "latest", + "tag": "master-h97.cd22e76", "kato.last.task.id": { - "id": "01FE4VCK8FS7JF2P4K8K78FH4A" + "id": "01FH902203BKRMXMKXP9E6460E" }, "inService": true, - "iamProfile": "arn:aws:iam::1", "registry": "testregistry", - "disruptionBudget": { - "timeWindows": [ - { - "hourlyTimeWindows": [ - { - "endHour": 16, - "startHour": 10 - } - ], - "days": [ - "Monday", - "Tuesday", - "Wednesday", - "Thursday", - "Friday" - ], - "timeZone": "PST" - } - ], - "containerHealthProviders": [ - { - "name": "eureka" - } - ], - "availabilityPercentageLimit": { - "percentageOfHealthyContainers": 95 - }, - "ratePercentagePerInterval": { - "intervalMs": 600000, - "percentageLimitPerInterval": 5 - }, - "rateUnlimited": false + "scaling": { + }, - "imageId": "spinnaker/basic:latest", + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", "workflowExecution": { "namespace": "spinnaker", "workflowType": "WorkflowDispatcher", - "runId": "d3571382-ee9e-467c-9001-268abce9d2c5", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VC9T0YNAW74JSCA9M0B1Z/d3571382-ee9e-467c-9001-268abce9d2c5/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VC9T0YNAW74JSCA9M0B1Z", + "runId": "049953b3-25be-434b-98f1-90b524408c3c", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH901NQ4W19BHYBKHXRHZ5K1/049953b3-25be-434b-98f1-90b524408c3c/summary", + "workflowId": "stage:01FH8ZX2F1F3YWGQTH0R9BQKR3:01FH901NQ4W19BHYBKHXRHZ5K1", "uiTarget": "https://temporal" }, + "loadBalancers": [ + + ], "resources": { "disk": 10000, - "memory": 512, + "memory": 5000, "networkMbps": 128, "cpu": 1, "gpu": 0 + }, + "overrides": { + }, "workflowResult": { "childWorkflowExecutionInfos": { - "d62ef3801f69003c5a824bb9979a3fdfc2b79c35f93f11cc8b2a5d1a6ca2be11": { + "b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e": { "namespace": "spinnaker", "workflowType": "StageContextCloudOperationRunner", - "runId": "5561ef2e-011e-47f8-a6f0-688b3c6698ed", - "permalink": "https://temporal/namespaces/spinnaker/workflows/d62ef3801f69003c5a824bb9979a3fdfc2b79c35f93f11cc8b2a5d1a6ca2be11/5561ef2e-011e-47f8-a6f0-688b3c6698ed/summary", - "workflowId": "d62ef3801f69003c5a824bb9979a3fdfc2b79c35f93f11cc8b2a5d1a6ca2be11", + "runId": "e769c158-cb41-429d-8759-f52f70e2bb14", + "permalink": "https://temporal/namespaces/spinnaker/workflows/b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e/e769c158-cb41-429d-8759-f52f70e2bb14/summary", + "workflowId": "b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e", "uiTarget": "https://temporal" } }, "childWorkflowExecutionResults": { - "d62ef3801f69003c5a824bb9979a3fdfc2b79c35f93f11cc8b2a5d1a6ca2be11": { + "b4b4e51d996ef88fac830813b80fc1daf735479ec58399bb7b2fa17827b84d4e": { "output": { - "jobUri": "1539c6e7-8bc4-4659-a2ed-f036e24fe729", + "jobUri": "52c7569b-0898-4302-a44d-90f440e94a76", "serverGroupNames": [ - "us-west-2:emburnstest-temporal-local-v002" + "us-west-2:emburnstest-eggo-v000" ], "outputType": "titusServerGroupDeployOperation", - "ancestorServerGroupNameByRegion": { - "us-west-2": "emburnstest-temporal-local-v001" - }, "serverGroupNameByRegion": { - "us-west-2": "emburnstest-temporal-local-v002" + "us-west-2": "emburnstest-eggo-v000" } }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + + }, + "logs": [ + + ], "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", "isCompleted": true } }, "logs": [ "Retrieved application data from front50 for application: emburnstest", - "Referring to source server group for details: emburnstest-temporal-local-v001", - "Resolved security groups: sg-1,sg-2,sg-3", - "Prepared job request for: emburnstest-temporal-local-v002 with imageId spinnaker/basic:latest", - "Submitting job with name: emburnstest-temporal-local-v002, attempt: 1", - "Successfully created job with name: emburnstest-temporal-local-v002, jobUri: 1539c6e7-8bc4-4659-a2ed-f036e24fe729", - "Entity tags with titus:servergroup:emburnstest-temporal-local-v002:titustestvpc:us-west-2 not found in front50", - "Deleting tags with titus:servergroup:emburnstest-temporal-local-v002:titustestvpc:us-west-2 from ElasticSearch" + "Resolved disruption budget.", + "Resolved security groups: sg-6b98a80e,sg-1398a876,sg-0514c958346cc16a4", + "Prepared job request for: emburnstest-eggo-v000 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v000, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v000, jobUri: 52c7569b-0898-4302-a44d-90f440e94a76", + "Invalid source asg null", + "Entity tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-west-2 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v000:titustestvpc:us-west-2 from ElasticSearch" ], "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", "isCompleted": true @@ -2812,25 +2159,25 @@ "NETFLIX_REGION": "us-west-2", "SPINNAKER_ACCOUNT": "titustestvpc" }, - "labels": { - "interestingHealthProviderNames": "Titus" + "tags": { + }, - "application": "emburnstest", - "serviceJobProcesses": { - "disableDecreaseDesired": false, - "disableIncreaseDesired": false + "capacitySnapshot": { + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 }, - "targetGroups": [], + "application": "emburnstest", + "targetGroups": [ + + ], "containerAttributes": { - "titusParameter.agent.subnets": "subnet-1,subnet-2,subnet-3", - "titusParameter.agent.accountId": "1", - "titusParameter.agent.assignIPv6Address": "true" + }, - "organization": "spinnaker", "name": "Deploy in us-west-2", - "migrationPolicy": { - "type": "systemDefault" - }, + "commits": [ + + ], "securityGroups": [ "nf-datacenter", "nf-infrastructure", @@ -2838,43 +2185,47 @@ "nf-infrastructure", "nf-datacenter" ], + "migrationPolicy": { + "type": "systemDefault" + }, "kato.tasks": [ { "resultObjects": [ { - "jobUri": "1539c6e7-8bc4-4659-a2ed-f036e24fe729", + "jobUri": "52c7569b-0898-4302-a44d-90f440e94a76", "serverGroupNames": [ - "us-west-2:emburnstest-temporal-local-v002" + "us-west-2:emburnstest-eggo-v000" ], "outputType": "titusServerGroupDeployOperation", - "ancestorServerGroupNameByRegion": { - "us-west-2": "emburnstest-temporal-local-v001" - }, "serverGroupNameByRegion": { - "us-west-2": "emburnstest-temporal-local-v002" + "us-west-2": "emburnstest-eggo-v000" } } ], - "history": [], + "history": [ + + ], "id": "n/a", - "status": {} + "status": { + + } } ], "entryPoint": "", - "strategy": "highlander", "region": "us-west-2", - "user": "emburns@netflix.com", "account": "titustestvpc", "capacityGroup": "emburnstest" }, - "outputs": {}, + "outputs": { + + }, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", "name": "determineSourceServerGroup", - "startTime": 1630103021820, - "endTime": 1630103022918, + "startTime": 1633463359942, + "endTime": 1633463360177, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -2885,8 +2236,8 @@ "id": "2", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", "name": "determineHealthProviders", - "startTime": 1630103022941, - "endTime": 1630103023424, + "startTime": 1633463360235, + "endTime": 1633463360392, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2897,8 +2248,8 @@ "id": "3", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", "name": "snapshotSourceServerGroup", - "startTime": 1630103023444, - "endTime": 1630103023486, + "startTime": 1633463360479, + "endTime": 1633463360610, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2909,8 +2260,8 @@ "id": "4", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", "name": "createServerGroup", - "startTime": 1630103023509, - "endTime": 1630103023820, + "startTime": 1633463360667, + "endTime": 1633463360915, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2921,8 +2272,8 @@ "id": "5", "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", "name": "monitorDeploy", - "startTime": 1630103023839, - "endTime": 1630103029275, + "startTime": 1633463360972, + "endTime": 1633463371292, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2933,8 +2284,8 @@ "id": "6", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", "name": "tagServerGroup", - "startTime": 1630103029293, - "endTime": 1630103031034, + "startTime": 1633463371370, + "endTime": 1633463371898, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2945,8 +2296,8 @@ "id": "7", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", "name": "waitForUpInstances", - "startTime": 1630103031054, - "endTime": 1630103042409, + "startTime": 1633463371969, + "endTime": 1633463442968, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2957,8 +2308,8 @@ "id": "8", "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", "name": "jarDiffs", - "startTime": 1630103042566, - "endTime": 1630103045348, + "startTime": 1633463443050, + "endTime": 1633463443160, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -2969,8 +2320,8 @@ "id": "9", "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", "name": "getCommits", - "startTime": 1630103045371, - "endTime": 1630103045749, + "startTime": 1633463443216, + "endTime": 1633463443465, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -2979,356 +2330,48 @@ } ], "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4VC9QV7A4YBDC9KNPR6JAB", - "requisiteStageRefIds": [] + "parentStageId": "01FH901NH6KZRXSDQZ1JGRGABG", + "requisiteStageRefIds": [ + + ] }, { - "id": "01FE4VD1MAEEDAKK2TMVZNWRK3", - "refId": "0<3<2<1<1>1", - "type": "shrinkCluster", - "name": "shrinkCluster", - "startTime": 1630103045815, - "endTime": 1630103155264, + "id": "01FH9048453H8PC7Z2ZD7M1DNK", + "refId": "1<3<2<1<1>1", + "type": "applySourceServerGroupCapacity", + "name": "restoreMinCapacityFromSnapshot", + "startTime": 1633463443714, + "endTime": 1633463444903, "status": "SUCCEEDED", "context": { - "outputs": {}, - "cluster": "emburnstest-temporal-local", - "notification.type": "destroyservergroup", - "deploy.account.name": "titustestvpc", - "force.cache.refresh.errors": [], - "credentials": "titustestvpc", - "workflowExecution": { - "namespace": "spinnaker", - "workflowType": "WorkflowDispatcher", - "runId": "a0900d17-0e88-4684-b5f0-8edf3ed210cd", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VD1MAEEDAKK2TMVZNWRK3/a0900d17-0e88-4684-b5f0-8edf3ed210cd/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VD1MAEEDAKK2TMVZNWRK3", - "uiTarget": "https://temporal" - }, - "processed.server.groups": [], - "retainLargerOverNewer": false, - "refreshed.server.groups": [], - "workflowResult": { - "childWorkflowExecutionInfos": { - "2db9ad192f28a8e5a82cb69058769b88fa6fa74aecb5cfebab155efcadc03eb3": { - "namespace": "spinnaker", - "workflowType": "StageContextCloudOperationRunner", - "runId": "51de8f6b-74b6-4ee5-b862-d6c86245d380", - "permalink": "https://temporal/namespaces/spinnaker/workflows/2db9ad192f28a8e5a82cb69058769b88fa6fa74aecb5cfebab155efcadc03eb3/51de8f6b-74b6-4ee5-b862-d6c86245d380/summary", - "workflowId": "2db9ad192f28a8e5a82cb69058769b88fa6fa74aecb5cfebab155efcadc03eb3", - "uiTarget": "https://temporal" - } - }, - "childWorkflowExecutionResults": { - "2db9ad192f28a8e5a82cb69058769b88fa6fa74aecb5cfebab155efcadc03eb3": { - "output": { - "outputType": "voidOperation" - }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], - "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", - "isCompleted": true - } - }, - "logs": [ - "Deleting Titus job titustestvpc:us-west-2:a59c3bc2-9941-42d9-a7f6-8d558cc26b13, initiated by emburns@netflix.com", - "Deleted Titus job titustestvpc:us-west-2:a59c3bc2-9941-42d9-a7f6-8d558cc26b13", - "Deleting tags with titus:servergroup:emburnstest-temporal-local-v001:titustestvpc:us-west-2 from ElasticSearch and front50" - ], - "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", - "isCompleted": true - }, - "zones": [], - "shrinkToSize": 1, - "cloudProvider": "titus", - "allowDeleteActive": true, - "deploy.server.groups": { - "us-west-2": [ - "emburnstest-temporal-local-v001" - ] - }, - "kato.tasks": [ - { - "resultObjects": [ - { - "outputType": "voidOperation" - } - ], - "history": [], - "id": "n/a", - "status": {} - } + "force.cache.refresh.errors": [ + ], - "region": "us-west-2" - }, - "outputs": {}, - "tasks": [ - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103146319, - "endTime": 1630103146924, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.ShrinkClusterTask", - "name": "shrinkCluster", - "startTime": 1630103146945, - "endTime": 1630103148539, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorShrinkCluster", - "startTime": 1630103148560, - "endTime": 1630103153937, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103153961, - "endTime": 1630103154048, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterShrinkTask", - "name": "waitForClusterShrink", - "startTime": 1630103154069, - "endTime": 1630103155122, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103155145, - "endTime": 1630103155242, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - } - ], - "syntheticStageOwner": "STAGE_AFTER", - "parentStageId": "01FE4VC9T0YNAW74JSCA9M0B1Z", - "requisiteStageRefIds": [] - }, - { - "id": "01FE4VD1XCYHVGXXTDA4S2KXG0", - "refId": "0<3<2<1<1>1<1", - "type": "disableCluster", - "name": "disableCluster", - "startTime": 1630103046088, - "endTime": 1630103146276, - "status": "SUCCEEDED", - "context": { - "outputs": {}, - "cluster": "emburnstest-temporal-local", - "notification.type": "disableservergroup", - "deploy.account.name": "titustestvpc", - "force.cache.refresh.errors": [], "credentials": "titustestvpc", - "workflowExecution": { - "namespace": "spinnaker", - "workflowType": "WorkflowDispatcher", - "runId": "308915a3-987d-407e-8abe-8d8fecc9322d", - "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VD1XCYHVGXXTDA4S2KXG0/308915a3-987d-407e-8abe-8d8fecc9322d/summary", - "workflowId": "stage:01FE4V8BPR7B9K1FGD54T2K25Q:01FE4VD1XCYHVGXXTDA4S2KXG0", - "uiTarget": "https://temporal" - }, - "processed.server.groups": [], - "retainLargerOverNewer": false, - "refreshed.server.groups": [], - "workflowResult": { - "childWorkflowExecutionInfos": { - "919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088": { - "namespace": "spinnaker", - "workflowType": "StageContextCloudOperationRunner", - "runId": "2b3e0486-8a5c-4b5a-a49a-e48d668dbe0a", - "permalink": "https://temporal/namespaces/spinnaker/workflows/919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088/2b3e0486-8a5c-4b5a-a49a-e48d668dbe0a/summary", - "workflowId": "919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088", - "uiTarget": "https://temporal" - } - }, - "childWorkflowExecutionResults": { - "919711d592093f268e3b76c538614f9e5fb45535203904c50e2f67dc73955088": { - "output": { - "outputType": "disableTitusServerGroupOperation", - "skippedInstances": [] - }, - "childWorkflowExecutionInfos": {}, - "childWorkflowExecutionResults": {}, - "logs": [], - "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", - "isCompleted": true - } - }, - "logs": [ - "Disabling server group us-west-2/emburnstest-temporal-local-v001", - "Finished Disabling server group emburnstest-temporal-local-v001" - ], - "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", - "isCompleted": true - }, - "preferLargerOverNewer": false, - "zones": [], - "remainingEnabledServerGroups": 1, - "continueIfClusterNotFound": false, - "shrinkToSize": 1, + "processed.server.groups": [ + + ], "cloudProvider": "titus", - "allowDeleteActive": true, "deploy.server.groups": { - "us-west-2": [ - "emburnstest-temporal-local-v001" - ] + }, - "kato.tasks": [ - { - "resultObjects": [ - { - "outputType": "disableTitusServerGroupOperation", - "skippedInstances": [] - } - ], - "history": [], - "id": "n/a", - "status": {} - } + "refreshed.server.groups": [ + ], - "region": "us-west-2" + "zones": [ + + ] }, - "outputs": {}, - "tasks": [ - { - "id": "1", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", - "name": "determineHealthProviders", - "startTime": 1630103046349, - "endTime": 1630103046590, - "status": "SUCCEEDED", - "stageStart": true, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "2", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.DisableClusterTask", - "name": "disableCluster", - "startTime": 1630103046609, - "endTime": 1630103048218, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "3", - "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", - "name": "monitorDisableCluster", - "startTime": 1630103048239, - "endTime": 1630103053692, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "4", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103053711, - "endTime": 1630103053814, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "5", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.cluster.WaitForClusterDisableTask", - "name": "waitForClusterDisable", - "startTime": 1630103053833, - "endTime": 1630103145916, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": false, - "loopStart": false, - "loopEnd": false - }, - { - "id": "6", - "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", - "name": "forceCacheRefresh", - "startTime": 1630103145952, - "endTime": 1630103146215, - "status": "SUCCEEDED", - "stageStart": false, - "stageEnd": true, - "loopStart": false, - "loopEnd": false - } - ], - "syntheticStageOwner": "STAGE_BEFORE", - "parentStageId": "01FE4VD1MAEEDAKK2TMVZNWRK3", - "requisiteStageRefIds": [] - }, - { - "id": "01FE4VD1MB0QS1CRJ02ZYBF2F8", - "refId": "0<3<2<1<1>2", - "type": "applySourceServerGroupCapacity", - "name": "restoreMinCapacityFromSnapshot", - "startTime": 1630103155287, - "endTime": 1630103156019, - "status": "SUCCEEDED", - "context": { - "force.cache.refresh.errors": [], - "credentials": "titustestvpc", - "processed.server.groups": [], - "cloudProvider": "titus", - "deploy.server.groups": {}, - "refreshed.server.groups": [], - "zones": [] + "outputs": { + }, - "outputs": {}, "tasks": [ { "id": "1", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", "name": "restoreMinCapacity", - "startTime": 1630103155434, - "endTime": 1630103155483, + "startTime": 1633463443996, + "endTime": 1633463444146, "status": "SUCCEEDED", "stageStart": true, "stageEnd": false, @@ -3339,8 +2382,8 @@ "id": "2", "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", "name": "waitForCapacityMatch", - "startTime": 1630103155503, - "endTime": 1630103155547, + "startTime": 1633463444206, + "endTime": 1633463444354, "status": "SUCCEEDED", "stageStart": false, "stageEnd": false, @@ -3351,8 +2394,8 @@ "id": "3", "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", "name": "forceCacheRefresh", - "startTime": 1630103155567, - "endTime": 1630103155809, + "startTime": 1633463444504, + "endTime": 1633463444758, "status": "SUCCEEDED", "stageStart": false, "stageEnd": true, @@ -3361,45 +2404,55 @@ } ], "syntheticStageOwner": "STAGE_AFTER", - "parentStageId": "01FE4VC9T0YNAW74JSCA9M0B1Z", + "parentStageId": "01FH901NQ4W19BHYBKHXRHZ5K1", "requisiteStageRefIds": [ - "0<3<2<1<1>1" + ] } ], - "startTime": 1630102892644, - "endTime": 1630103156753, + "startTime": 1633463208480, + "endTime": 1633463446589, "status": "SUCCEEDED", "authentication": { "user": "emburns@netflix.com", - "allowedAccounts": [] + "allowedAccounts": [ + + ] }, - "origin": "unknown", + "origin": "keel", "trigger": { - "type": "manual", - "user": "emburns@netflix.com", - "parameters": {}, - "artifacts": [], - "notifications": [], - "rebake": false, - "dryRun": false, - "strategy": false, - "resolvedExpectedArtifacts": [], - "type": "manual", - "user": "emburns@netflix.com", - "parameters": {}, - "artifacts": [], - "notifications": [], + "type": "keel", + "correlationId": "titus:cluster:titustestvpc:emburnstest-eggo:managed-rollout", + "user": "keel", + "parameters": { + + }, + "artifacts": [ + + ], + "notifications": [ + + ], "rebake": false, "dryRun": false, "strategy": false, - "resolvedExpectedArtifacts": [], - "expectedArtifacts": [] + "resolvedExpectedArtifacts": [ + + ], + "expectedArtifacts": [ + + ] + }, + "description": "Deploy master-h97.cd22e76 to cluster emburnstest-eggo in titustestvpc/us-east-1,us-west-2 using a managed rollout", + "notifications": [ + + ], + "initialConfig": { + }, - "description": "Managing rollouts", - "notifications": [], - "initialConfig": {}, - "systemNotifications": [], + "systemNotifications": [ + + ], "partition": "us-west-2" } } diff --git a/keel-orca/src/test/resources/running-managed-rollout.json b/keel-orca/src/test/resources/running-managed-rollout.json new file mode 100644 index 0000000000..6444d45130 --- /dev/null +++ b/keel-orca/src/test/resources/running-managed-rollout.json @@ -0,0 +1,1803 @@ +{ + "id": "01FH8ZD7107CZN3V5YKE15RRVA", + "name": "Deploy master-h97.cd22e76 to cluster emburnstest-eggo in titustestvpc/us-east-1,us-west-2 using a managed rollout", + "application": "emburnstest", + "status": "RUNNING", + "variables": [ + { + "key": "input", + "value": { + "selectionStrategy": "ALPHABETICAL", + "clusterDefinitions": [ + { + "registry": "testregistry", + "stack": "eggo", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "targetHealthyDeployPercentage": 100, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + + }, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "cloudProvider": "titus", + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "tag": "master-h97.cd22e76", + "inService": true, + "capacityGroup": "emburnstest" + } + ], + "targets": [ + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-west-2", + "account": "titustestvpc" + } + } + ] + } + }, + { + "key": "reason", + "value": "Diff detected at 2021-10-05T19:38:08.75631Z[UTC]" + }, + { + "key": "selectionStrategy", + "value": "ALPHABETICAL" + }, + { + "key": "rolloutWorkflowId", + "value": "rollout:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD710VC49HBF8G9VF7V28" + }, + { + "key": "targets", + "value": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ] + }, + { + "key": "rolloutStep", + "value": { + "id": "1deda3cf-3926-323c-97bd-2d86ab2676e6", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ] + } + }, + { + "key": "clusters", + "value": [ + { + "stack": "eggo", + "credentials": "titustestvpc", + "targetHealthyDeployPercentage": 100, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "cloudProvider": "titus", + "tag": "master-h97.cd22e76", + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + } + ] + }, + { + "key": "outputs", + "value": { + + } + }, + { + "key": "notification.type", + "value": "upsertentitytags" + }, + { + "key": "deploy.account.name", + "value": "titustestvpc" + }, + { + "key": "zeroDesiredCapacityCount", + "value": 0 + }, + { + "key": "stack", + "value": "eggo" + }, + { + "key": "jarDiffs", + "value": { + "totalLibraries": 360, + "duplicates": [ + { + "displayDiff": "jetty-util: 6.1.26, 9.2.29.v20191105" + }, + { + "displayDiff": "annotations: 3.0.1, 4.1.1.4" + }, + { + "displayDiff": "ksclient-core-kafka: 2.1.10-javadoc, 2.1.10-shadow, 2.1.10-sources" + } + ], + "upgraded": [ + + ], + "downgraded": [ + + ], + "removed": [ + + ], + "added": [ + + ], + "unchanged": [ + + ], + "hasDiff": false, + "unknown": [ + + ] + } + }, + { + "key": "lastCapacityCheck", + "value": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + } + }, + { + "key": "credentials", + "value": "titustestvpc" + }, + { + "key": "targetHealthyDeployPercentage", + "value": 100 + }, + { + "key": "source", + "value": { + "serverGroupName": "emburnstest-eggo-v006", + "asgName": "emburnstest-eggo-v006", + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "key": "type", + "value": "createServerGroup" + }, + { + "key": "constraints", + "value": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + } + }, + { + "key": "currentInstanceCount", + "value": 1 + }, + { + "key": "network", + "value": "default" + }, + { + "key": "capacity", + "value": { + "min": 1, + "desired": 1, + "max": 1 + } + }, + { + "key": "targetDesiredSize", + "value": 1 + }, + { + "key": "cloudProvider", + "value": "titus" + }, + { + "key": "kato.result.expected", + "value": true + }, + { + "key": "deploy.server.groups", + "value": { + "us-east-1": [ + "emburnstest-eggo-v007" + ] + } + }, + { + "key": "tag", + "value": "master-h97.cd22e76" + }, + { + "key": "kato.last.task.id", + "value": { + "id": "01FH8ZDMNGW6R952HHSTBYQ32W" + } + }, + { + "key": "inService", + "value": true + }, + { + "key": "registry", + "value": "testregistry" + }, + { + "key": "scaling", + "value": { + + } + }, + { + "key": "imageId", + "value": "emburns/spin-titus-demo:master-h97.cd22e76" + }, + { + "key": "workflowExecution", + "value": { + "namespace": "spinnaker", + "workflowType": "WorkflowDispatcher", + "runId": "f40a8c6e-5872-439e-b01e-f1095307c656", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD8KZGXXM45M7N160NR1R/f40a8c6e-5872-439e-b01e-f1095307c656/summary", + "workflowId": "stage:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD8KZGXXM45M7N160NR1R", + "uiTarget": "https://temporal" + } + }, + { + "key": "loadBalancers", + "value": [ + + ] + }, + { + "key": "resources", + "value": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + } + }, + { + "key": "overrides", + "value": { + + } + }, + { + "key": "workflowResult", + "value": { + "childWorkflowExecutionInfos": { + "ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981": { + "namespace": "spinnaker", + "workflowType": "StageContextCloudOperationRunner", + "runId": "d8f03b5f-94c6-4d46-8ed0-50a644a942b2", + "permalink": "https://temporal/namespaces/spinnaker/workflows/ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981/d8f03b5f-94c6-4d46-8ed0-50a644a942b2/summary", + "workflowId": "ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981", + "uiTarget": "https://temporal" + } + }, + "childWorkflowExecutionResults": { + "ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981": { + "output": { + "jobUri": "019285cd-a78e-4052-954a-317c8c74ecc0", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v007" + ], + "outputType": "titusServerGroupDeployOperation", + "ancestorServerGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v006" + }, + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v007" + } + }, + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + + }, + "logs": [ + + ], + "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", + "isCompleted": true + } + }, + "logs": [ + "Retrieved application data from front50 for application: emburnstest", + "Referring to source server group for details: emburnstest-eggo-v006", + "Resolved disruption budget.", + "Resolved security groups: sg-f0f19494,sg-f2f19496,sg-bbff21c9", + "Prepared job request for: emburnstest-eggo-v007 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v007, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v007, jobUri: 019285cd-a78e-4052-954a-317c8c74ecc0", + "Entity tags with titus:servergroup:emburnstest-eggo-v007:titustestvpc:us-east-1 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v007:titustestvpc:us-east-1 from ElasticSearch" + ], + "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", + "isCompleted": true + } + }, + { + "key": "env", + "value": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + } + }, + { + "key": "tags", + "value": { + + } + }, + { + "key": "capacitySnapshot", + "value": { + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 + } + }, + { + "key": "application", + "value": "emburnstest" + }, + { + "key": "targetGroups", + "value": [ + + ] + }, + { + "key": "containerAttributes", + "value": { + + } + }, + { + "key": "name", + "value": "Deploy in us-east-1" + }, + { + "key": "commits", + "value": [ + + ] + }, + { + "key": "securityGroups", + "value": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest", + "nf-infrastructure", + "nf-datacenter" + ] + }, + { + "key": "migrationPolicy", + "value": { + "type": "systemDefault" + } + }, + { + "key": "kato.tasks", + "value": [ + { + "resultObjects": [ + { + "jobUri": "019285cd-a78e-4052-954a-317c8c74ecc0", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v007" + ], + "outputType": "titusServerGroupDeployOperation", + "ancestorServerGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v006" + }, + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v007" + } + } + ], + "history": [ + + ], + "id": "n/a", + "status": { + + } + } + ] + }, + { + "key": "entryPoint", + "value": "" + }, + { + "key": "region", + "value": "us-east-1" + }, + { + "key": "account", + "value": "titustestvpc" + }, + { + "key": "capacityGroup", + "value": "emburnstest" + }, + { + "key": "force.cache.refresh.errors", + "value": [ + + ] + }, + { + "key": "processed.server.groups", + "value": [ + + ] + }, + { + "key": "refreshed.server.groups", + "value": [ + + ] + }, + { + "key": "zones", + "value": [ + + ] + } + ], + "steps": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutTask", + "name": "completeManagedRollout", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.StartManagedRolloutTask", + "name": "startManagedRollout", + "startTime": 1633462689275, + "endTime": 1633462689491, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "startTime": 1633462818083, + "endTime": 1633462818291, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633462689981, + "endTime": 1633462690183, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", + "name": "completeParallelDeploy", + "startTime": 1633462817635, + "endTime": 1633462817760, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", + "name": "determineSourceServerGroup", + "startTime": 1633462691062, + "endTime": 1633462691248, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", + "name": "determineHealthProviders", + "startTime": 1633462691295, + "endTime": 1633462691436, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", + "name": "snapshotSourceServerGroup", + "startTime": 1633462691489, + "endTime": 1633462691585, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "4", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", + "name": "createServerGroup", + "startTime": 1633462691634, + "endTime": 1633462691840, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "5", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "monitorDeploy", + "startTime": 1633462691895, + "endTime": 1633462702409, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "6", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", + "name": "tagServerGroup", + "startTime": 1633462702460, + "endTime": 1633462702872, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "7", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", + "name": "waitForUpInstances", + "startTime": 1633462702962, + "endTime": 1633462814267, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "8", + "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", + "name": "jarDiffs", + "startTime": 1633462814315, + "endTime": 1633462815619, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "9", + "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", + "name": "getCommits", + "startTime": 1633462815713, + "endTime": 1633462815925, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", + "name": "restoreMinCapacity", + "startTime": 1633462816376, + "endTime": 1633462816545, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "waitForCapacityMatch", + "startTime": 1633462816674, + "endTime": 1633462816869, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", + "name": "forceCacheRefresh", + "startTime": 1633462816925, + "endTime": 1633462817147, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + }, + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633462818683, + "status": "RUNNING", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "buildTime": 1633462688801, + "startTime": 1633462688877, + "execution": { + "type": "ORCHESTRATION", + "id": "01FH8ZD7107CZN3V5YKE15RRVA", + "application": "emburnstest", + "buildTime": 1633462688801, + "canceled": false, + "limitConcurrent": false, + "keepWaitingPipelines": false, + "stages": [ + { + "id": "01FH8ZD710VC49HBF8G9VF7V28", + "refId": "1", + "type": "managedRollout", + "name": "managedRollout", + "startTime": 1633462688938, + "status": "RUNNING", + "context": { + "input": { + "selectionStrategy": "ALPHABETICAL", + "clusterDefinitions": [ + { + "registry": "testregistry", + "stack": "eggo", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "targetHealthyDeployPercentage": 100, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + + }, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "cloudProvider": "titus", + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "tag": "master-h97.cd22e76", + "inService": true, + "capacityGroup": "emburnstest" + } + ], + "targets": [ + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "cloudProvider": "titus", + "location": { + "subLocations": [ + + ], + "region": "us-west-2", + "account": "titustestvpc" + } + } + ] + }, + "reason": "Diff detected at 2021-10-05T19:38:08.75631Z[UTC]" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutTask", + "name": "completeManagedRollout", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZD77B1WAQCG5X2TE76HSR", + "refId": "1<1", + "type": "startManagedRollout", + "name": "Start Managed Rollout", + "startTime": 1633462689109, + "endTime": 1633462689596, + "status": "SUCCEEDED", + "context": { + "selectionStrategy": "ALPHABETICAL", + "rolloutWorkflowId": "rollout:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD710VC49HBF8G9VF7V28", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + }, + { + "cloudProvider": "titus", + "location": { + "region": "us-west-2", + "account": "titustestvpc" + } + } + ] + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.StartManagedRolloutTask", + "name": "startManagedRollout", + "startTime": 1633462689275, + "endTime": 1633462689491, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD710VC49HBF8G9VF7V28", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZD77BPX0JZ6ZFN04T8CK7", + "refId": "1<2", + "type": "initManagedRolloutStep", + "name": "Run next Rollout Step", + "startTime": 1633462689684, + "endTime": 1633462818342, + "status": "SUCCEEDED", + "context": { + "rolloutWorkflowId": "rollout:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD710VC49HBF8G9VF7V28" + }, + "outputs": { + "completedRolloutStep": { + "id": "1deda3cf-3926-323c-97bd-2d86ab2676e6", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "account": "titustestvpc", + "region": "us-east-1" + } + } + ] + } + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "startTime": 1633462818083, + "endTime": 1633462818291, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD710VC49HBF8G9VF7V28", + "requisiteStageRefIds": [ + "1<1" + ] + }, + { + "id": "01FH8ZD7YJS31TYH3CEDZR7XHB", + "refId": "1<2<1", + "type": "waitForNextRolloutStep", + "name": "Wait For Next Rollout Step", + "startTime": 1633462689844, + "endTime": 1633462690241, + "status": "SUCCEEDED", + "context": { + "rolloutWorkflowId": "rollout:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD710VC49HBF8G9VF7V28", + "rolloutStep": { + "id": "1deda3cf-3926-323c-97bd-2d86ab2676e6", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ] + } + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633462689981, + "endTime": 1633462690183, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD77BPX0JZ6ZFN04T8CK7", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZD7YJPTY9897P6JFR9ZCT", + "refId": "1<2<2", + "type": "doManagedRollout", + "name": "Deploy Rollout Step", + "startTime": 1633462690289, + "endTime": 1633462817908, + "status": "SUCCEEDED", + "context": { + + }, + "outputs": { + + }, + "tasks": [ + + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD77BPX0JZ6ZFN04T8CK7", + "requisiteStageRefIds": [ + "1<2<1" + ] + }, + { + "id": "01FH8ZD8FTVRR9JGC1J4PFM54W", + "refId": "1<2<2<1", + "type": "deploy", + "name": "Deploy us-east-1", + "startTime": 1633462690371, + "endTime": 1633462817809, + "status": "SUCCEEDED", + "context": { + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ], + "clusters": [ + { + "stack": "eggo", + "credentials": "titustestvpc", + "targetHealthyDeployPercentage": 100, + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "cloudProvider": "titus", + "tag": "master-h97.cd22e76", + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + } + ] + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.ParallelDeployStage.CompleteParallelDeployTask", + "name": "completeParallelDeploy", + "startTime": 1633462817635, + "endTime": 1633462817760, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD7YJPTY9897P6JFR9ZCT", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZD8KZGXXM45M7N160NR1R", + "refId": "1<2<2<1<1", + "type": "createServerGroup", + "name": "Deploy in us-east-1", + "startTime": 1633462690490, + "endTime": 1633462817441, + "status": "SUCCEEDED", + "context": { + "outputs": { + + }, + "notification.type": "upsertentitytags", + "deploy.account.name": "titustestvpc", + "zeroDesiredCapacityCount": 0, + "stack": "eggo", + "jarDiffs": { + "totalLibraries": 360, + "duplicates": [ + { + "displayDiff": "jetty-util: 6.1.26, 9.2.29.v20191105" + }, + { + "displayDiff": "annotations: 3.0.1, 4.1.1.4" + }, + { + "displayDiff": "ksclient-core-kafka: 2.1.10-javadoc, 2.1.10-shadow, 2.1.10-sources" + } + ], + "upgraded": [ + + ], + "downgraded": [ + + ], + "removed": [ + + ], + "added": [ + + ], + "unchanged": [ + + ], + "hasDiff": false, + "unknown": [ + + ] + }, + "lastCapacityCheck": { + "outOfService": 0, + "up": 0, + "failed": 0, + "starting": 0, + "down": 0, + "succeeded": 0, + "unknown": 1 + }, + "credentials": "titustestvpc", + "targetHealthyDeployPercentage": 100, + "source": { + "serverGroupName": "emburnstest-eggo-v006", + "asgName": "emburnstest-eggo-v006", + "region": "us-east-1", + "account": "titustestvpc" + }, + "type": "createServerGroup", + "targets": [ + { + "cloudProvider": "titus", + "location": { + "region": "us-east-1", + "account": "titustestvpc" + } + } + ], + "constraints": { + "hard": { + + }, + "soft": { + "ZoneBalance": "true" + } + }, + "currentInstanceCount": 1, + "network": "default", + "capacity": { + "min": 1, + "desired": 1, + "max": 1 + }, + "targetDesiredSize": 1, + "cloudProvider": "titus", + "kato.result.expected": true, + "deploy.server.groups": { + "us-east-1": [ + "emburnstest-eggo-v007" + ] + }, + "tag": "master-h97.cd22e76", + "kato.last.task.id": { + "id": "01FH8ZDMNGW6R952HHSTBYQ32W" + }, + "inService": true, + "registry": "testregistry", + "scaling": { + + }, + "imageId": "emburns/spin-titus-demo:master-h97.cd22e76", + "workflowExecution": { + "namespace": "spinnaker", + "workflowType": "WorkflowDispatcher", + "runId": "f40a8c6e-5872-439e-b01e-f1095307c656", + "permalink": "https://temporal/namespaces/spinnaker/workflows/stage:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD8KZGXXM45M7N160NR1R/f40a8c6e-5872-439e-b01e-f1095307c656/summary", + "workflowId": "stage:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD8KZGXXM45M7N160NR1R", + "uiTarget": "https://temporal" + }, + "loadBalancers": [ + + ], + "resources": { + "disk": 10000, + "memory": 5000, + "networkMbps": 128, + "cpu": 1, + "gpu": 0 + }, + "overrides": { + + }, + "workflowResult": { + "childWorkflowExecutionInfos": { + "ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981": { + "namespace": "spinnaker", + "workflowType": "StageContextCloudOperationRunner", + "runId": "d8f03b5f-94c6-4d46-8ed0-50a644a942b2", + "permalink": "https://temporal/namespaces/spinnaker/workflows/ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981/d8f03b5f-94c6-4d46-8ed0-50a644a942b2/summary", + "workflowId": "ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981", + "uiTarget": "https://temporal" + } + }, + "childWorkflowExecutionResults": { + "ecf66d3a950c66a202ec1f737e8d2793a11f301bcc2ab1d6d1470a38b53af981": { + "output": { + "jobUri": "019285cd-a78e-4052-954a-317c8c74ecc0", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v007" + ], + "outputType": "titusServerGroupDeployOperation", + "ancestorServerGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v006" + }, + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v007" + } + }, + "childWorkflowExecutionInfos": { + + }, + "childWorkflowExecutionResults": { + + }, + "logs": [ + + ], + "status": "WORKFLOW_EXECUTION_STATUS_UNSPECIFIED", + "isCompleted": true + } + }, + "logs": [ + "Retrieved application data from front50 for application: emburnstest", + "Referring to source server group for details: emburnstest-eggo-v006", + "Resolved disruption budget.", + "Resolved security groups: sg-f0f19494,sg-f2f19496,sg-bbff21c9", + "Prepared job request for: emburnstest-eggo-v007 with imageId emburns/spin-titus-demo:master-h97.cd22e76", + "Submitting job with name: emburnstest-eggo-v007, attempt: 1", + "Successfully created job with name: emburnstest-eggo-v007, jobUri: 019285cd-a78e-4052-954a-317c8c74ecc0", + "Entity tags with titus:servergroup:emburnstest-eggo-v007:titustestvpc:us-east-1 not found in front50", + "Deleting tags with titus:servergroup:emburnstest-eggo-v007:titustestvpc:us-east-1 from ElasticSearch" + ], + "status": "WORKFLOW_EXECUTION_STATUS_COMPLETED", + "isCompleted": true + }, + "env": { + "EC2_REGION": "us-east-1", + "NETFLIX_HOME_REGION": "us-east-1", + "NETFLIX_REGION": "us-east-1", + "SPINNAKER_ACCOUNT": "titustestvpc" + }, + "tags": { + + }, + "capacitySnapshot": { + "minSize": 1, + "maxSize": 1, + "desiredCapacity": 1 + }, + "application": "emburnstest", + "targetGroups": [ + + ], + "containerAttributes": { + + }, + "name": "Deploy in us-east-1", + "commits": [ + + ], + "securityGroups": [ + "nf-datacenter", + "nf-infrastructure", + "emburnstest", + "nf-infrastructure", + "nf-datacenter" + ], + "migrationPolicy": { + "type": "systemDefault" + }, + "kato.tasks": [ + { + "resultObjects": [ + { + "jobUri": "019285cd-a78e-4052-954a-317c8c74ecc0", + "serverGroupNames": [ + "us-east-1:emburnstest-eggo-v007" + ], + "outputType": "titusServerGroupDeployOperation", + "ancestorServerGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v006" + }, + "serverGroupNameByRegion": { + "us-east-1": "emburnstest-eggo-v007" + } + } + ], + "history": [ + + ], + "id": "n/a", + "status": { + + } + } + ], + "entryPoint": "", + "region": "us-east-1", + "account": "titustestvpc", + "capacityGroup": "emburnstest" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.kato.pipeline.strategy.DetermineSourceServerGroupTask", + "name": "determineSourceServerGroup", + "startTime": 1633462691062, + "endTime": 1633462691248, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.DetermineHealthProvidersTask", + "name": "determineHealthProviders", + "startTime": 1633462691295, + "endTime": 1633462691436, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.CaptureSourceServerGroupCapacityTask", + "name": "snapshotSourceServerGroup", + "startTime": 1633462691489, + "endTime": 1633462691585, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "4", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.CreateServerGroupTask", + "name": "createServerGroup", + "startTime": 1633462691634, + "endTime": 1633462691840, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "5", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "monitorDeploy", + "startTime": 1633462691895, + "endTime": 1633462702409, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "6", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.AddServerGroupEntityTagsTask", + "name": "tagServerGroup", + "startTime": 1633462702460, + "endTime": 1633462702872, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "7", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.instance.WaitForUpInstancesTask", + "name": "waitForUpInstances", + "startTime": 1633462702962, + "endTime": 1633462814267, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "8", + "implementingClass": "com.netflix.spinnaker.orca.kato.tasks.JarDiffsTask", + "name": "jarDiffs", + "startTime": 1633462814315, + "endTime": 1633462815619, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "9", + "implementingClass": "com.netflix.spinnaker.orca.igor.tasks.GetCommitsTask", + "name": "getCommits", + "startTime": 1633462815713, + "endTime": 1633462815925, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD8FTVRR9JGC1J4PFM54W", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZH37H9YMENN05MCTRCS2Q", + "refId": "1<2<2<1<1>1", + "type": "applySourceServerGroupCapacity", + "name": "restoreMinCapacityFromSnapshot", + "startTime": 1633462816093, + "endTime": 1633462817296, + "status": "SUCCEEDED", + "context": { + "force.cache.refresh.errors": [ + + ], + "credentials": "titustestvpc", + "processed.server.groups": [ + + ], + "cloudProvider": "titus", + "deploy.server.groups": { + + }, + "refreshed.server.groups": [ + + ], + "zones": [ + + ] + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.pipeline.providers.aws.ApplySourceServerGroupCapacityTask", + "name": "restoreMinCapacity", + "startTime": 1633462816376, + "endTime": 1633462816545, + "status": "SUCCEEDED", + "stageStart": true, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "2", + "implementingClass": "com.netflix.spinnaker.orca.temporal.stage.MonitorTemporalWorkflowTask", + "name": "waitForCapacityMatch", + "startTime": 1633462816674, + "endTime": 1633462816869, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": false, + "loopStart": false, + "loopEnd": false + }, + { + "id": "3", + "implementingClass": "com.netflix.spinnaker.orca.clouddriver.tasks.servergroup.ServerGroupCacheForceRefreshTask", + "name": "forceCacheRefresh", + "startTime": 1633462816925, + "endTime": 1633462817147, + "status": "SUCCEEDED", + "stageStart": false, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_AFTER", + "parentStageId": "01FH8ZD8KZGXXM45M7N160NR1R", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZD77BQC9QC909T50AB5EJ", + "refId": "1<3", + "type": "initManagedRolloutStep", + "name": "Run next Rollout Step", + "startTime": 1633462818391, + "status": "RUNNING", + "context": { + "rolloutWorkflowId": "rollout:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD710VC49HBF8G9VF7V28" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.CompleteManagedRolloutStepTask", + "name": "completeRolloutStep", + "status": "NOT_STARTED", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD710VC49HBF8G9VF7V28", + "requisiteStageRefIds": [ + "1<2" + ] + }, + { + "id": "01FH8ZH5MPNABYRKAAMW8E3DF5", + "refId": "1<3<1", + "type": "waitForNextRolloutStep", + "name": "Wait For Next Rollout Step", + "startTime": 1633462818551, + "status": "RUNNING", + "context": { + "rolloutWorkflowId": "rollout:01FH8ZD7107CZN3V5YKE15RRVA:01FH8ZD710VC49HBF8G9VF7V28" + }, + "outputs": { + + }, + "tasks": [ + { + "id": "1", + "implementingClass": "com.netflix.spinnaker.orca.managedrollout.tasks.AwaitReadyRolloutStepTask", + "name": "awaitReadyRolloutStep", + "startTime": 1633462818683, + "status": "RUNNING", + "stageStart": true, + "stageEnd": true, + "loopStart": false, + "loopEnd": false + } + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD77BQC9QC909T50AB5EJ", + "requisiteStageRefIds": [ + + ] + }, + { + "id": "01FH8ZH5MPDSPC6XXQ4JN14SYC", + "refId": "1<3<2", + "type": "doManagedRollout", + "name": "Deploy Rollout Step", + "status": "NOT_STARTED", + "context": { + + }, + "outputs": { + + }, + "tasks": [ + + ], + "syntheticStageOwner": "STAGE_BEFORE", + "parentStageId": "01FH8ZD77BQC9QC909T50AB5EJ", + "requisiteStageRefIds": [ + "1<3<1" + ] + } + ], + "startTime": 1633462688877, + "status": "RUNNING", + "authentication": { + "user": "emburns@netflix.com", + "allowedAccounts": [ + + ] + }, + "origin": "keel", + "trigger": { + "type": "keel", + "correlationId": "titus:cluster:titustestvpc:emburnstest-eggo:managed-rollout", + "user": "keel", + "parameters": { + + }, + "artifacts": [ + + ], + "notifications": [ + + ], + "rebake": false, + "dryRun": false, + "strategy": false, + "resolvedExpectedArtifacts": [ + + ], + "expectedArtifacts": [ + + ] + }, + "description": "Deploy master-h97.cd22e76 to cluster emburnstest-eggo in titustestvpc/us-east-1,us-west-2 using a managed rollout", + "notifications": [ + + ], + "initialConfig": { + + }, + "systemNotifications": [ + + ], + "partition": "us-west-2" + } +} diff --git a/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListener.kt b/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListener.kt index 067f595c4e..321a247a5b 100644 --- a/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListener.kt +++ b/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListener.kt @@ -6,12 +6,11 @@ import com.netflix.spectator.api.Registry import com.netflix.spinnaker.keel.api.ArtifactReferenceProvider import com.netflix.spinnaker.keel.api.DeliveryConfig import com.netflix.spinnaker.keel.api.Dependent -import com.netflix.spinnaker.keel.api.Moniker -import com.netflix.spinnaker.keel.api.Monikered import com.netflix.spinnaker.keel.api.PreviewEnvironmentSpec import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.ResourceSpec -import com.netflix.spinnaker.keel.api.generateId +import com.netflix.spinnaker.keel.api.artifacts.ArtifactOriginFilter +import com.netflix.spinnaker.keel.api.artifacts.DeliveryArtifact import com.netflix.spinnaker.keel.api.titus.TitusClusterSpec import com.netflix.spinnaker.keel.docker.ReferenceProvider import com.netflix.spinnaker.keel.front50.Front50Cache @@ -97,27 +96,25 @@ class PreviewEnvironmentCodeEventListener( } log.debug("Processing PR finished event: $event") - matchingPreviewEnvironmentSpecs(event).forEach { (deliveryConfig, previewEnvSpecs) -> - previewEnvSpecs.forEach { previewEnvSpec -> - // Need to get a fully-hydrated delivery config here because the one we get above doesn't include environments - // for the sake of performance. - val hydratedDeliveryConfig = repository.getDeliveryConfig(deliveryConfig.name) - - hydratedDeliveryConfig.environments.filter { - it.isPreview && it.repoKey == event.repoKey && it.branch == event.pullRequestBranch - }.forEach { previewEnv -> - log.debug("Marking preview environment for deletion: ${previewEnv.name} in app ${deliveryConfig.application}, " + + event.matchingPreviewEnvironmentSpecs().forEach { (deliveryConfig, _) -> + // Need to get a fully-hydrated delivery config here because the one we get above doesn't include environments + // for the sake of performance. + val hydratedDeliveryConfig = repository.getDeliveryConfig(deliveryConfig.name) + + hydratedDeliveryConfig.environments.filter { + it.isPreview && it.repoKey == event.repoKey && it.branch == event.pullRequestBranch + }.forEach { previewEnv -> + log.debug("Marking preview environment for deletion: ${previewEnv.name} in app ${deliveryConfig.application}, " + + "branch ${event.pullRequestBranch} of repository ${event.repoKey}") + // Here we just mark preview environments for deletion. [ResourceActuator] will delete the associated resources + // and [EnvironmentCleaner] will delete the environments when empty. + try { + environmentDeletionRepository.markForDeletion(previewEnv) + event.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_MARK_FOR_DELETION_SUCCESS, deliveryConfig.application) + } catch(e: Exception) { + log.error("Failed to mark preview environment for deletion:${previewEnv.name} in app ${deliveryConfig.application}, " + "branch ${event.pullRequestBranch} of repository ${event.repoKey}") - // Here we just mark preview environments for deletion. [ResourceActuator] will delete the associated resources - // and [EnvironmentCleaner] will delete the environments when empty. - try { - environmentDeletionRepository.markForDeletion(previewEnv) - event.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_MARK_FOR_DELETION_SUCCESS, deliveryConfig.application) - } catch(e: Exception) { - log.error("Failed to mark preview environment for deletion:${previewEnv.name} in app ${deliveryConfig.application}, " + - "branch ${event.pullRequestBranch} of repository ${event.repoKey}") - event.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_MARK_FOR_DELETION_ERROR, deliveryConfig.application) - } + event.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_MARK_FOR_DELETION_ERROR, deliveryConfig.application) } } } @@ -142,22 +139,25 @@ class PreviewEnvironmentCodeEventListener( log.debug("Processing PR event: $event") val startTime = clock.instant() - if (event.pullRequestId == null || event.pullRequestId == "-1") { + if (event.pullRequestId == "-1") { log.debug("Ignoring PR event as it's not associated with a PR: $event") return } - matchingPreviewEnvironmentSpecs(event).forEach { (deliveryConfig, previewEnvSpecs) -> + event.matchingPreviewEnvironmentSpecs().forEach { (deliveryConfig, previewEnvSpecs) -> log.debug("Importing delivery config for app ${deliveryConfig.application} " + "from branch ${event.pullRequestBranch}") // We always want to dismiss the previous notifications, and if needed to create a new one notificationRepository.dismissNotification(DeliveryConfigImportFailed::class.java, deliveryConfig.application, event.pullRequestBranch) + val manifestPath = runBlocking { front50Cache.applicationByName(deliveryConfig.application).managedDelivery?.manifestPath } + val commitEvent = event.toCommitEvent() - val newDeliveryConfig = try { + + val deliveryConfigFromBranch = try { deliveryConfigImporter.import( codeEvent = commitEvent, manifestPath = manifestPath @@ -169,13 +169,20 @@ class PreviewEnvironmentCodeEventListener( } catch (e: Exception) { log.error("Error retrieving delivery config: $e", e) event.emitCounterMetric(CODE_EVENT_COUNTER, DELIVERY_CONFIG_RETRIEVAL_ERROR, deliveryConfig.application) - eventPublisher.publishDeliveryConfigImportFailed(deliveryConfig.application, event, clock.instant(), e.message ?: "Unknown", scmUtils.getPullRequestLink(event)) + eventPublisher.publishDeliveryConfigImportFailed( + deliveryConfig.application, + event, + event.pullRequestBranch, + clock.instant(), + e.message ?: "Unknown", + scmUtils.getPullRequestLink(event) + ) return@forEach } log.info("Creating/updating preview environments for application ${deliveryConfig.application} " + "from branch ${event.pullRequestBranch}") - createPreviewEnvironments(event, newDeliveryConfig, previewEnvSpecs) + createPreviewEnvironments(event, deliveryConfig, deliveryConfigFromBranch, previewEnvSpecs) event.emitDurationMetric(COMMIT_HANDLING_DURATION, startTime, deliveryConfig.application) } } @@ -183,8 +190,8 @@ class PreviewEnvironmentCodeEventListener( /** * Returns a map of [DeliveryConfig]s to the [PreviewEnvironmentSpec]s that match the code event. */ - private fun matchingPreviewEnvironmentSpecs(event: CodeEvent): Map> { - val branchToMatch = if (event is PrEvent) event.pullRequestBranch else event.targetBranch + private fun CodeEvent.matchingPreviewEnvironmentSpecs(): Map> { + val branchToMatch = if (this is PrEvent) pullRequestBranch else targetBranch return repository .allDeliveryConfigs(ATTACH_PREVIEW_ENVIRONMENTS) .associateWith { deliveryConfig -> @@ -193,22 +200,22 @@ class PreviewEnvironmentCodeEventListener( front50Cache.applicationByName(deliveryConfig.application) } catch (e: Exception) { log.error("Error retrieving application ${deliveryConfig.application}: $e") - event.emitCounterMetric(CODE_EVENT_COUNTER, APPLICATION_RETRIEVAL_ERROR, deliveryConfig.application) + emitCounterMetric(CODE_EVENT_COUNTER, APPLICATION_RETRIEVAL_ERROR, deliveryConfig.application) null } } deliveryConfig.previewEnvironments.filter { previewEnvSpec -> - event.matchesApplicationConfig(appConfig) && previewEnvSpec.branch.matches(branchToMatch) + matchesApplicationConfig(appConfig) && previewEnvSpec.branch.matches(branchToMatch) } } .filterValues { it.isNotEmpty() } .also { if (it.isEmpty()) { - log.debug("No delivery configs with matching preview environments found for event: $event") - event.emitCounterMetric(CODE_EVENT_COUNTER, DELIVERY_CONFIG_NOT_FOUND) + log.debug("No delivery configs with matching preview environments found for code event: $this") + emitCounterMetric(CODE_EVENT_COUNTER, DELIVERY_CONFIG_NOT_FOUND) } else if (it.size > 1 || it.any { (_, previewEnvSpecs) -> previewEnvSpecs.size > 1 }) { - log.warn("Expected a single delivery config and preview env spec to match code event, found many: $event") + log.warn("Expected a single delivery config and preview env spec to match code event, found many: $this") } } } @@ -219,24 +226,44 @@ class PreviewEnvironmentCodeEventListener( */ private fun createPreviewEnvironments( prEvent: PrEvent, - deliveryConfig: DeliveryConfig, + originalConfig: DeliveryConfig, + configFromBranch: DeliveryConfig, previewEnvSpecs: List ) { + // Need to get a fully-hydrated delivery config here because the one we get above doesn't include environments + // for the sake of performance. + val hydratedOriginalConfig = repository.getDeliveryConfig(originalConfig.name) + previewEnvSpecs.forEach { previewEnvSpec -> - val baseEnv = deliveryConfig.environments.find { it.name == previewEnvSpec.baseEnvironment } + val baseEnv = configFromBranch.environments.find { it.name == previewEnvSpec.baseEnvironment } ?: error("Environment '${previewEnvSpec.baseEnvironment}' referenced in preview environment spec not found.") + val suffix = prEvent.pullRequestBranch.shortHash + + // Before generating the preview environment, create artifacts with the same branch filter as the preview + // environment spec, which will be substituted in the preview resources + val previewArtifacts = baseEnv.resources.mapNotNull { resource -> + log.debug("Looking for artifact associated with resource {} in delivery config for {} from branch {}", + resource.id, configFromBranch.application, prEvent.pullRequestBranch) + resourceFactory.migrate(resource).findAssociatedArtifact(configFromBranch) + }.map { artifact -> + val previewArtifact = artifact.toPreviewArtifact(configFromBranch, previewEnvSpec) + log.debug("Generated preview artifact $previewArtifact with branch filter ${previewEnvSpec.branch}") + previewArtifact + }.toSet() + + // Add the preview artifacts to the updated config we need to store + var updatedConfig = hydratedOriginalConfig.run { copy(artifacts = artifacts + previewArtifacts) } + val previewEnv = baseEnv.copy( - // if the branch is "feature/abc", and the base environment is "test", then the environment - // would be named "test-feature-abc" - name = "${baseEnv.name}-${prEvent.pullRequestBranch.toPreviewName()}", + name = "${baseEnv.name}-$suffix", isPreview = true, constraints = emptySet(), postDeploy = emptyList(), verifyWith = previewEnvSpec.verifyWith, notifications = previewEnvSpec.notifications, resources = baseEnv.resources.mapNotNull { res -> - res.toPreviewResource(deliveryConfig, previewEnvSpec, prEvent.pullRequestBranch) + res.toPreviewResource(updatedConfig, previewEnvSpec, suffix) }.toSet() ).apply { addMetadata( @@ -247,48 +274,56 @@ class PreviewEnvironmentCodeEventListener( ) } - log.debug("Creating/updating preview environment ${previewEnv.name} for application ${deliveryConfig.application} " + + // Add the preview environment to the updated config we need to store + updatedConfig = updatedConfig.run { copy(environments = environments + previewEnv) } + + log.debug("Updating delivery config with preview environment ${previewEnv.name} for application ${configFromBranch.application} " + "from branch ${prEvent.pullRequestBranch}") try { - // TODO: run all these within a single transaction - previewEnv.resources.forEach { resource -> - repository.upsertResource(resource, deliveryConfig.name) - } - repository.storeEnvironment(deliveryConfig.name, previewEnv) - prEvent.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_UPSERT_SUCCESS, deliveryConfig.application) + repository.upsertDeliveryConfig(updatedConfig) + prEvent.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_UPSERT_SUCCESS, configFromBranch.application) } catch (e: Exception) { - log.error("Error storing/updating preview environment ${deliveryConfig.application}/${previewEnv.name}: $e", e) - prEvent.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_UPSERT_ERROR, deliveryConfig.application) + log.error("Error storing/updating preview environment ${configFromBranch.application}/${previewEnv.name}: $e", e) + prEvent.emitCounterMetric(CODE_EVENT_COUNTER, PREVIEW_ENVIRONMENT_UPSERT_ERROR, configFromBranch.application) } } } + /** + * Creates a copy of the [DeliveryArtifact] with the [ArtifactOriginFilter] replaced to match the branch + * filter in the [PreviewEnvironmentSpec]. + */ + private fun DeliveryArtifact.toPreviewArtifact(deliveryConfig: DeliveryConfig, previewEnvSpec: PreviewEnvironmentSpec) = + // we convert to a map and back because the artifact sub-types are unknown at compile time + objectMapper.convertValue>(this) + .let { + it["deliveryConfigName"] = deliveryConfig.name + it["reference"] = "$reference-preview-${previewEnvSpec.branch.toString().shortHash}" + it["from"] = ArtifactOriginFilter(branch = previewEnvSpec.branch) + it["isPreview"] = true + objectMapper.convertValue(it) + } + /** * Converts a [Resource] to its preview environment version, i.e. a resource with the exact same - * characteristics but with its name/ID modified to include the specified [branch], and with + * characteristics but with its name/ID modified to include the specified [suffix], and with * its artifact reference (if applicable) updated to use the artifact matching the [PreviewEnvironmentSpec] * branch filter, if available. */ private fun Resource<*>.toPreviewResource( deliveryConfig: DeliveryConfig, previewEnvSpec: PreviewEnvironmentSpec, - branch: String + suffix: String ): Resource<*>? { // start by migrating the resource spec so we can rely on the latest implementation var previewResource = resourceFactory.migrate(this) - if (previewResource.spec !is Monikered) { - log.debug("Ignoring non-monikered resource ${this.id} since it might conflict with the base environment") - return null - } - log.debug("Copying resource ${this.id} to preview resource") - previewResource = previewResource as Resource - // add the branch detail to the moniker/name/id - previewResource = withBranchDetail(previewResource, branch) + // add the suffix to the moniker/name/id + previewResource = previewResource.deepRename(suffix) - // update artifact reference if applicable to match the branch filter of the preview environment + // update artifact reference if applicable to match the suffix filter of the preview environment if (previewResource.spec is ArtifactReferenceProvider) { log.debug("Attempting to replace artifact reference for resource ${this.id}") previewResource = previewResource.withBranchArtifact(deliveryConfig, previewEnvSpec) @@ -299,47 +334,24 @@ class PreviewEnvironmentCodeEventListener( // update dependency names that are part of the preview environment and so have new names if (previewResource.spec is Dependent) { log.debug("Attempting to update dependencies for resource ${this.id}") - previewResource = previewResource.withDependenciesRenamed(deliveryConfig, previewEnvSpec, branch) + previewResource = previewResource.withDependenciesRenamed(deliveryConfig, previewEnvSpec, suffix) } else { log.debug("Resource ${this.id} (${previewResource.spec::class.simpleName}) does not implement the Dependent interface") } log.debug("Copied resource ${this.id} to preview resource ${previewResource.id}") - return previewResource + return previewResource.basedOn(this) } - /** - * Adds the specified [branch] to the [Moniker.detail] field of the [ResourceSpec] while respecting resource - * naming constraints, and updates the resource ID to match. - */ - internal fun withBranchDetail(resource: Resource, branch: String): Resource { - with(resource) { - val updatedMoniker = spec.moniker.withBranchDetail(branch) - return copy( - spec = this.spec.toMutableMap().let { newSpec -> - newSpec["moniker"] = updatedMoniker - objectMapper.convertValue(newSpec, spec::class.java) - } - ).run { - val newId = generateId(this.kind, this.spec).let { - // account for the case where the ID is not synthesized from the moniker - if (!it.contains(updatedMoniker.detail!!)) "$it-${updatedMoniker.detail}" else it - } - this.copy(metadata = mapOf( - // this is so the resource ID is updated with the new name (which is in the spec) - "id" to newId, - "application" to this.application, - "basedOn" to this.id - )) - } - } - } + private fun Resource<*>.basedOn(resource: Resource<*>) = copy( + metadata = metadata + mapOf("basedOn" to resource.id) + ) /** * Replaces the artifact reference in the resource spec with the one matching the [PreviewEnvironmentSpec] branch * filter, if such an artifact is defined in the delivery config. */ - private fun Resource.withBranchArtifact( + private fun Resource.withBranchArtifact( deliveryConfig: DeliveryConfig, previewEnvSpec: PreviewEnvironmentSpec ): Resource { @@ -380,22 +392,19 @@ class PreviewEnvironmentCodeEventListener( } /** - * Adds the specified [branchDetail] to the [Moniker.detail] field of the [ResourceSpec] of each dependency - * of this resource that is also managed. - * - * Limitation: this method supports renaming only resources whose specs are [Monikered]. + * Adds the specified [suffix] to the name of each dependency of this resource that is also managed. */ - private fun Resource.withDependenciesRenamed( + private fun Resource.withDependenciesRenamed( deliveryConfig: DeliveryConfig, previewEnvSpec: PreviewEnvironmentSpec, - branchDetail: String + suffix: String ): Resource { val baseEnvironment = deliveryConfig.findBaseEnvironment(previewEnvSpec) val updatedSpec = if (spec is Dependent) { val renamedDeps = (spec as Dependent).dependsOn.map { dep -> val candidate = baseEnvironment.resources.find { - it.spec is Monikered && it.named(dep.name) && dep.matchesKind(it.kind) + it.named(dep.name) && dep.matchesKind(it.kind) } if (candidate != null) { log.debug("Checking if dependency needs renaming: kind '${candidate.kind.kind}', name '${candidate.name}', application '$application'") @@ -405,7 +414,7 @@ class PreviewEnvironmentCodeEventListener( log.debug("Skipping dependency rename for default security group ${candidate.name} in resource ${this.name}") dep } else { - val newName = withBranchDetail(candidate as Resource, branchDetail).name + val newName = candidate.deepRename(suffix).name log.debug("Renaming ${dep.type} dependency ${candidate.name} to $newName in resource ${this.name}") dep.renamed(newName) } diff --git a/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/utils.kt b/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/utils.kt index 1bfb2a5120..4b98f1d9a8 100644 --- a/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/utils.kt +++ b/keel-scm/src/main/kotlin/com/netflix/spinnaker/keel/preview/utils.kt @@ -181,37 +181,5 @@ private fun Collection?.namesForType(type: DependencyType): Set.toTags() = BasicTag(first, second) fun ApplicationEventPublisher.publishDeliveryConfigImportFailed( application: String, event: CodeEvent, + branch: String, timestamp: Instant, reason: String, link: String? @@ -46,7 +47,7 @@ fun ApplicationEventPublisher.publishDeliveryConfigImportFailed( DeliveryConfigImportFailed( triggeredAt = timestamp, application = application, - branch = targetBranch, + branch = branch, repoType = repoType, projectKey = projectKey, repoSlug = repoSlug, diff --git a/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListenerTests.kt b/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListenerTests.kt index 8c7ba39e9e..fabb7faab4 100644 --- a/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListenerTests.kt +++ b/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/PreviewEnvironmentCodeEventListenerTests.kt @@ -10,10 +10,11 @@ import com.netflix.spinnaker.keel.api.DependencyType.SECURITY_GROUP import com.netflix.spinnaker.keel.api.Dependent import com.netflix.spinnaker.keel.api.Environment import com.netflix.spinnaker.keel.api.Moniker -import com.netflix.spinnaker.keel.api.Monikered import com.netflix.spinnaker.keel.api.PreviewEnvironmentSpec import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.artifacts.ArtifactOriginFilter +import com.netflix.spinnaker.keel.api.artifacts.DEBIAN +import com.netflix.spinnaker.keel.api.artifacts.DOCKER import com.netflix.spinnaker.keel.api.artifacts.branchName import com.netflix.spinnaker.keel.api.artifacts.branchStartsWith import com.netflix.spinnaker.keel.api.ec2.ClusterDependencies @@ -60,12 +61,10 @@ import com.netflix.spinnaker.keel.scm.ScmUtils import com.netflix.spinnaker.keel.scm.toTags import com.netflix.spinnaker.keel.test.DummyArtifactReferenceResourceSpec import com.netflix.spinnaker.keel.test.DummyLocatableResourceSpec -import com.netflix.spinnaker.keel.test.DummyResourceSpec import com.netflix.spinnaker.keel.test.applicationLoadBalancer import com.netflix.spinnaker.keel.test.configuredTestObjectMapper import com.netflix.spinnaker.keel.test.debianArtifact import com.netflix.spinnaker.keel.test.dockerArtifact -import com.netflix.spinnaker.keel.test.locatableResource import com.netflix.spinnaker.keel.test.resource import com.netflix.spinnaker.keel.test.submittedResource import com.netflix.spinnaker.keel.test.titusCluster @@ -87,17 +86,18 @@ import strikt.api.expectThat import strikt.assertions.contains import strikt.assertions.containsExactlyInAnyOrder import strikt.assertions.containsKeys +import strikt.assertions.endsWith import strikt.assertions.isA import strikt.assertions.isEmpty import strikt.assertions.isEqualTo -import strikt.assertions.isLessThanOrEqualTo +import strikt.assertions.isTrue import strikt.assertions.one import java.time.Clock import java.time.Duration import io.mockk.coEvery as every import io.mockk.coVerify as verify -class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { +internal class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { class Fixture { private val objectMapper = configuredTestObjectMapper() private val clock: Clock = MutableClock() @@ -241,7 +241,7 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { application = "fnord", name = "myconfig", serviceAccount = "keel@keel.io", - artifacts = setOf(dockerFromMain, dockerFromBranch, debianFromMain, debianFromBranch), + artifacts = setOf(dockerFromMain, debianFromMain), environments = setOf( Environment( name = "test", @@ -266,7 +266,10 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { ) ) - val previewEnv = slot() + val previewEnvSpec = deliveryConfig.previewEnvironments.first() + val updatedConfig = slot() + val previewEnv by lazy { updatedConfig.captured.environments.find { it.isPreview }!! } + val previewArtifacts by lazy { updatedConfig.captured.artifacts.filter { it.isPreview } } fun setupMocks() { every { @@ -328,9 +331,9 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { ) } - every { repository.upsertResource(any(), any()) } just runs + every { repository.getDeliveryConfig(deliveryConfig.name) } returns deliveryConfig - every { repository.storeEnvironment(any(), capture(previewEnv)) } just runs + every { repository.upsertDeliveryConfig(capture(updatedConfig)) } answers { updatedConfig.captured } every { environmentDeletionRepository.markForDeletion(any()) } just runs @@ -441,131 +444,108 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { } } - test("a preview environment and associated resources are created/updated") { - previewEnv.captured.resources.forEach { previewResource -> - verify { - repository.upsertResource(previewResource, deliveryConfig.name) - } - } + test("the delivery config is updated in the database") { verify { - repository.storeEnvironment(deliveryConfig.name, previewEnv.captured) + repository.upsertDeliveryConfig(any()) + } + expectThat(updatedConfig.captured.environments.size > deliveryConfig.environments.size) + expectThat(updatedConfig.captured.artifacts.size > deliveryConfig.artifacts.size) + } + + test("the updated delivery config contains preview artifacts") { + expectThat(previewArtifacts) { + one { + get { name }.isEqualTo(dockerFromMain.name) + get { type }.isEqualTo(dockerFromMain.type) + get { from!!.branch }.isEqualTo(previewEnvSpec.branch) + } + one { + get { name }.isEqualTo(debianFromMain.name) + get { type }.isEqualTo(debianFromMain.type) + get { from!!.branch }.isEqualTo(previewEnvSpec.branch) + } } } + test("the updated delivery config contains the preview environment") { + expectThat(updatedConfig.captured.environments) + .one { + get { isPreview }.isTrue() + } + } + test("the preview environment has no constraints or post-deploy actions") { - expectThat(previewEnv.captured.constraints).isEmpty() - expectThat(previewEnv.captured.postDeploy).isEmpty() + expectThat(previewEnv.constraints).isEmpty() + expectThat(previewEnv.postDeploy).isEmpty() } test("the name of the preview environment is generated correctly") { val baseEnv = deliveryConfig.environments.first() - val branchDetail = prEvent.pullRequestBranch.toPreviewName() + val suffix = prEvent.pullRequestBranch.shortHash - expectThat(previewEnv.captured) { - get { name }.isEqualTo("${baseEnv.name}-$branchDetail") + expectThat(previewEnv) { + get { name }.isEqualTo("${baseEnv.name}-$suffix") } } test("relevant metadata is added to the preview environment") { - expectThat(previewEnv.captured.metadata).containsKeys("basedOn", "repoKey", "branch", "pullRequestId") + expectThat(previewEnv.metadata).containsKeys("basedOn", "repoKey", "branch", "pullRequestId") } test("resources are migrated to their latest version before processing") { - val baseEnv = deliveryConfig.environments.first() - verify(exactly = baseEnv.resources.size) { - resourceFactory.migrate(any()) + verify { + resourceFactory.migrate(clusterWithOldSpecVersion) } + @Suppress("DEPRECATION") expect { that(clusterWithOldSpecVersion.kind) .isEqualTo(EC2_CLUSTER_V1.kind) - that(previewEnv.captured.resources.find { it.basedOn == clusterWithOldSpecVersion.id }!!.kind) + that(previewEnv.resources.find { it.basedOn == clusterWithOldSpecVersion.id }!!.kind) .isEqualTo(EC2_CLUSTER_V1_1.kind) } } - test("the name of monikered resources is updated with branch detail") { + test("resource names/IDs are updated with branch hash") { val baseEnv = deliveryConfig.environments.first() - val baseResource = baseEnv.resources.first() as Resource - val previewResource = previewEnv.captured.resources.first() - - expectThat(previewResource.spec) - .isA() - .get { moniker } - .isEqualTo(subject.withBranchDetail(baseResource, prEvent.pullRequestBranch).spec.moniker) - } + val baseResource = baseEnv.resources.first() + val previewResource = previewEnv.resources.first() - test("updated resource names respect the max allowed length") { - // monikered resources with and without stack and detail - listOf( - locatableResource(moniker = Moniker(app = "fnord", stack = "stack", detail = "detail")), - locatableResource(moniker = Moniker(app = "fnord", stack = "stack")), - locatableResource(moniker = Moniker(app = "fnord")), - ).forEach { resource -> - val updatedName = subject.withBranchDetail(resource, "feature/a-very-long-branch-name").name - expectThat(updatedName.length) - .describedAs("length of preview resource name $updatedName (${updatedName.length})") - .isLessThanOrEqualTo(MAX_RESOURCE_NAME_LENGTH) + expectThat(previewResource).run { + isEqualTo(baseResource.deepRename(prEvent.pullRequestBranch.shortHash)) + get { name }.endsWith(prEvent.pullRequestBranch.shortHash) + get { id }.endsWith(prEvent.pullRequestBranch.shortHash) } } - test("updated resource names are DNS-compatible") { - val resource = locatableResource(moniker = Moniker(app = "fnord")) - val updatedName = subject.withBranchDetail(resource, "feature/a_branch_name_with_underscores").name - expectThat(updatedName).not().contains("_") - } - - test("the artifact reference in a resource is updated to match the preview environment branch filter") { - expectThat(previewEnv.captured.resources.find { it.basedOn == cluster.id }?.spec) + test("the artifact reference in a resource is updated to match the preview artifact") { + expectThat(previewEnv.resources.find { it.basedOn == cluster.id }?.spec) .isA() - .get { artifactReference }.isEqualTo(dockerFromBranch.reference) + .get { artifactReference }.isEqualTo(previewArtifacts.find { it.type == DOCKER }!!.reference) - expectThat(previewEnv.captured.resources.find { it.basedOn == clusterWithOldSpecVersion.id }?.spec) + expectThat(previewEnv.resources.find { it.basedOn == clusterWithOldSpecVersion.id }?.spec) // this also demonstrates that the old cluster spec gets migrated and now supports the standard artifact reference interface .isA() - .get { artifactReference }.isEqualTo(debianFromBranch.reference) + .get { artifactReference }.isEqualTo(previewArtifacts.find { it.type == DEBIAN }!!.reference) } test("the names of resource dependencies present in the preview environment are adjusted to match") { - val branchDetail = prEvent.pullRequestBranch.toPreviewName() + val suffix = prEvent.pullRequestBranch.shortHash val dependency = applicationLoadBalancer - expectThat(previewEnv.captured.resources.find { it.basedOn == clusterWithDependencies.id }?.spec) + expectThat(previewEnv.resources.find { it.basedOn == clusterWithDependencies.id }?.spec) .isA() .get { dependsOn.first { it.type == LOAD_BALANCER }.name } - .isEqualTo(dependency.spec.moniker.withBranchDetail(branchDetail).name) + .isEqualTo(dependency.spec.moniker.withSuffix(suffix).name) } test("the names of the default security groups are not changed in the dependencies") { - expectThat(previewEnv.captured.resources.find { it.basedOn == clusterWithDependencies.id }?.spec) + expectThat(previewEnv.resources.find { it.basedOn == clusterWithDependencies.id }?.spec) .isA() .get { dependsOn.filter { it.type == SECURITY_GROUP }.map { it.name }.toSet() } .containsExactlyInAnyOrder(defaultAppSecurityGroup.name, defaultElbSecurityGroup.name) } } - - context("without an artifact in the delivery config matching the branch filter") { - modifyFixture { - deliveryConfig = deliveryConfig.run { - copy( - artifacts = artifacts.map { - if (it == dockerFromBranch) dockerWithNonMatchingFilter else it - }.toSet() - ) - } - } - - before { - setupMocks() // to pick up the updated delivery config above - subject.handlePrEvent(prEvent) - } - - test("the artifact reference in a resource is not updated") { - expectThat(previewEnv.captured.resources.find { it.basedOn == cluster.id }?.spec) - .isA() - .get { artifactReference }.isEqualTo(dockerFromMain.reference) - } - } } context("an app with custom manifest path") { @@ -631,7 +611,7 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { every { repository.getDeliveryConfig(deliveryConfig.name) } returns deliveryConfig.copy( - environments = deliveryConfig.environments + setOf(previewEnv.captured) + environments = deliveryConfig.environments + setOf(previewEnv) ) subject.handlePrFinished(prEvent) @@ -639,7 +619,7 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { test("the matching preview environment is marked for deletion") { verify { - environmentDeletionRepository.markForDeletion(previewEnv.captured) + environmentDeletionRepository.markForDeletion(previewEnv) } } @@ -672,9 +652,7 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { test("event is ignored") { verify(exactly = 0) { - repository.upsertResource(any(), deliveryConfig.name) - repository.upsertResource(any(), deliveryConfig.name) - repository.storeEnvironment(deliveryConfig.name, any()) + repository.upsertDeliveryConfig(any()) } verify { importer wasNot called @@ -724,9 +702,11 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { } test("an event is published") { + val failureEvent = slot() verify { - eventPublisher.publishEvent(any()) + eventPublisher.publishEvent(capture(failureEvent)) } + expectThat(failureEvent.captured.branch).isEqualTo(prOpenedEvent.pullRequestBranch) } } @@ -752,10 +732,10 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { } } - context("failure to upsert preview environment") { + context("failure to upsert delivery config") { modifyFixture { every { - repository.storeEnvironment(any(), any()) + repository.upsertDeliveryConfig(any()) } throws SystemException("oh noes!") } @@ -779,9 +759,7 @@ class PreviewEnvironmentCodeEventListenerTests : JUnit5Minutests { private fun TestContextBuilder.testEventIgnored() { test("event is ignored") { verify(exactly = 0) { - repository.upsertResource(any(), deliveryConfig.name) - repository.upsertResource(any(), deliveryConfig.name) - repository.storeEnvironment(deliveryConfig.name, any()) + repository.upsertDeliveryConfig(any()) } verify { importer wasNot called diff --git a/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/ResourceRenamingTests.kt b/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/ResourceRenamingTests.kt new file mode 100644 index 0000000000..2694a8bee5 --- /dev/null +++ b/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/preview/ResourceRenamingTests.kt @@ -0,0 +1,121 @@ +package com.netflix.spinnaker.keel.preview + +import com.netflix.spinnaker.keel.api.ec2.AllPorts +import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec.TargetGroup +import com.netflix.spinnaker.keel.api.ec2.ReferenceRule +import com.netflix.spinnaker.keel.api.ec2.SecurityGroupRule.Protocol.TCP +import com.netflix.spinnaker.keel.api.ec2.SecurityGroupRule.Protocol.UDP +import com.netflix.spinnaker.keel.test.applicationLoadBalancer +import com.netflix.spinnaker.keel.test.classicLoadBalancer +import com.netflix.spinnaker.keel.test.ec2Cluster +import com.netflix.spinnaker.keel.test.randomString +import com.netflix.spinnaker.keel.test.securityGroup +import com.netflix.spinnaker.keel.test.titusCluster +import org.junit.jupiter.api.Test +import strikt.api.expectThat +import strikt.assertions.all +import strikt.assertions.endsWith +import strikt.assertions.isEqualTo +import strikt.assertions.isLessThanOrEqualTo + +internal class ResourceRenamingTests { + private val securityGroup = securityGroup().run { + copy( + spec = spec.copy( + inboundRules = setOf( + ReferenceRule(protocol = TCP, name = name, portRange = AllPorts), + ReferenceRule(protocol = UDP, name = name, portRange = AllPorts) + ) + ) + ) + } + + private val applicationLoadBalancer = applicationLoadBalancer().run { + copy( + spec = spec.copy( + targetGroups = setOf( + TargetGroup(name = "tg1", port = 80), + TargetGroup(name = "tg2", port = 443) + ) + ) + ) + } + + private val classicLoadBalancer = classicLoadBalancer() + private val titusCluster = titusCluster() + private val ec2Cluster = ec2Cluster() + + @Test + fun `deep renaming a security group renames the security group and self-referencing ingress rules`() { + expectThat(securityGroup.deepRename("suffix")) + .run { + get { name }.endsWith("-suffix") + get { id }.endsWith("-suffix") + get { metadata["id"] as String }.endsWith("-suffix") + get { spec.inboundRules.filterIsInstance() } + .all { get { name }.endsWith("-suffix") } + } + } + + @Test + fun `deep renaming an ALB renames the ALB and target groups`() { + expectThat(applicationLoadBalancer.deepRename("suffix")) + .run { + get { name }.endsWith("-suffix") + get { id }.endsWith("-suffix") + get { metadata["id"] as String }.endsWith("-suffix") + get { spec.targetGroups } + .all { get { name }.endsWith("-suffix") } + } + } + + @Test + fun `deep renaming an ALB respects max name length`() { + val albWithLongName = applicationLoadBalancer.run { + copy( + spec = spec.copy( + moniker = spec.moniker.copy( + detail = randomString(32 - spec.moniker.toName().length - 1) + ) + ) + ) + } + expectThat(albWithLongName.name.length).isEqualTo(32) // max length + expectThat(albWithLongName.deepRename("suffix")) + .run { + // still includes the suffix, but does not go over max length + get { name }.endsWith("-suffix") + get { name.length }.isLessThanOrEqualTo(32) + } + } + + @Test + fun `deep renaming a CLB renames the CLB`() { + expectThat(classicLoadBalancer.deepRename("suffix")) + .run { + get { name }.endsWith("-suffix") + get { id }.endsWith("-suffix") + get { metadata["id"] as String }.endsWith("-suffix") + } + } + + @Test + fun `deep renaming a Titus cluster renames the cluster`() { + expectThat(titusCluster.deepRename("suffix")) + .run { + get { name }.endsWith("-suffix") + get { id }.endsWith("-suffix") + get { metadata["id"] as String }.endsWith("-suffix") + } + } + + @Test + fun `deep renaming an EC2 cluster renames the cluster`() { + expectThat(ec2Cluster.deepRename("suffix")) + .run { + get { name }.endsWith("-suffix") + get { id }.endsWith("-suffix") + get { metadata["id"] as String }.endsWith("-suffix") + } + } +} diff --git a/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/scm/DeliveryConfigImportListenerTests.kt b/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/scm/DeliveryConfigImportListenerTests.kt index d66e439827..f9bca48cd6 100644 --- a/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/scm/DeliveryConfigImportListenerTests.kt +++ b/keel-scm/src/test/kotlin/com/netflix/spinnaker/keel/scm/DeliveryConfigImportListenerTests.kt @@ -27,10 +27,12 @@ import io.mockk.called import io.mockk.just import io.mockk.mockk import io.mockk.runs +import io.mockk.slot import org.springframework.context.ApplicationEventPublisher import org.springframework.core.env.Environment import strikt.api.expectThat import strikt.assertions.contains +import strikt.assertions.isEqualTo import strikt.assertions.one import io.mockk.coEvery as every import io.mockk.coVerify as verify @@ -317,30 +319,34 @@ class DeliveryConfigImportListenerTests : JUnit5Minutests { setupMocks() } - context("failure to retrieve delivery config") { - modifyFixture { - every { - importer.import(commitEvent, manifestPath = any()) - } throws SystemException("oh noes!") - } - - before { - subject.handleCodeEvent(commitEvent) - } + listOf(commitEvent, prMergedEvent).forEach { event -> + context("failure to retrieve delivery config for $event") { + modifyFixture { + every { + importer.import(event, manifestPath = any()) + } throws SystemException("oh noes!") + } - test("a delivery config retrieval error is counted") { - val tags = mutableListOf>() - verify { - spectator.counter(CODE_EVENT_COUNTER, capture(tags)) + before { + subject.handleCodeEvent(event) } - expectThat(tags).one { - contains(DELIVERY_CONFIG_RETRIEVAL_ERROR.toTags()) + + test("a delivery config retrieval error is counted") { + val tags = mutableListOf>() + verify { + spectator.counter(CODE_EVENT_COUNTER, capture(tags)) + } + expectThat(tags).one { + contains(DELIVERY_CONFIG_RETRIEVAL_ERROR.toTags()) + } } - } - test("an event is published") { - verify { - eventPublisher.publishEvent(any()) + test("an event is published") { + val failureEvent = slot() + verify { + eventPublisher.publishEvent(capture(failureEvent)) + } + expectThat(failureEvent.captured.branch).isEqualTo(event.targetBranch) } } } diff --git a/keel-sql/keel-sql.gradle b/keel-sql/keel-sql.gradle index 6a0279e319..f646c09821 100644 --- a/keel-sql/keel-sql.gradle +++ b/keel-sql/keel-sql.gradle @@ -1,5 +1,5 @@ plugins { - id("nu.studer.jooq") version "6.0" + id("nu.studer.jooq") version "6.0.1" id("org.liquibase.gradle") version "2.0.4" } @@ -214,6 +214,11 @@ jooq { enumConverter = true includeExpression = "TASK_TRACKING.SUBJECT_TYPE" } + forcedType { + userType = "com.netflix.spinnaker.keel.rollout.RolloutStatus" + enumConverter = true + includeExpression = "FEATURE_ROLLOUT.STATUS" + } } } target { diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt index f3020fbff4..63c7713546 100644 --- a/keel-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/config/SqlConfiguration.kt @@ -15,6 +15,7 @@ import com.netflix.spinnaker.keel.sql.SqlDiffFingerprintRepository import com.netflix.spinnaker.keel.sql.SqlDismissibleNotificationRepository import com.netflix.spinnaker.keel.sql.SqlEnvironmentDeletionRepository import com.netflix.spinnaker.keel.sql.SqlEnvironmentLeaseRepository +import com.netflix.spinnaker.keel.sql.SqlFeatureRolloutRepository import com.netflix.spinnaker.keel.sql.SqlLifecycleEventRepository import com.netflix.spinnaker.keel.sql.SqlLifecycleMonitorRepository import com.netflix.spinnaker.keel.sql.SqlNotificationRepository @@ -250,4 +251,10 @@ class SqlConfiguration clock: Clock, objectMapper: ObjectMapper ) = SqlWorkQueueRepository(jooq, clock, objectMapper, SqlRetry(sqlRetryProperties)) + + @Bean + fun featureRolloutRepository( + jooq: DSLContext, + clock: Clock + ) = SqlFeatureRolloutRepository(jooq, SqlRetry(sqlRetryProperties), clock) } diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlActionRepository.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlActionRepository.kt index 036da7c690..fbccf64af7 100644 --- a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlActionRepository.kt +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlActionRepository.kt @@ -400,6 +400,7 @@ class SqlActionRepository( link: String?, type: ActionType ) { + log.debug("Updating action state for ${context.shortName()}: $status") with(context) { jooq .insertInto(ACTION_STATE) diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlArtifactRepository.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlArtifactRepository.kt index 91cc38834e..74c6f94c45 100644 --- a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlArtifactRepository.kt +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlArtifactRepository.kt @@ -105,6 +105,7 @@ class SqlArtifactRepository( .set(DELIVERY_ARTIFACT.TYPE, artifact.type) .set(DELIVERY_ARTIFACT.REFERENCE, artifact.reference) .set(DELIVERY_ARTIFACT.DELIVERY_CONFIG_NAME, artifact.deliveryConfigName) + .set(DELIVERY_ARTIFACT.IS_PREVIEW, artifact.isPreview) .set(DELIVERY_ARTIFACT.DETAILS, artifact.detailsAsJson()) .onDuplicateKeyUpdate() .set(DELIVERY_ARTIFACT.NAME, artifact.name) @@ -354,6 +355,7 @@ class SqlArtifactRepository( PublishedArtifact( name = name, type = type, + reference = artifact.reference, version = version, status = status, createdAt = createdAt, @@ -1928,6 +1930,19 @@ class SqlArtifactRepository( .fetchSingleInto() } + override fun getLatestApprovedInEnvArtifactVersion( + config: DeliveryConfig, + artifact: DeliveryArtifact, + environmentName: String + ): PublishedArtifact? { + latestVersionApprovedIn(config, artifact, environmentName) + ?.let { version -> + return getArtifactVersion(artifact, version, null) + } + return null + } + + private fun priorVersionDeployedIn( environmentId: String, artifactId: String, diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlDeliveryConfigRepository.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlDeliveryConfigRepository.kt index 4cf2727a4e..aa37672c6f 100644 --- a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlDeliveryConfigRepository.kt +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlDeliveryConfigRepository.kt @@ -18,6 +18,7 @@ import com.netflix.spinnaker.keel.core.api.UID import com.netflix.spinnaker.keel.core.api.parseUID import com.netflix.spinnaker.keel.core.api.randomUID import com.netflix.spinnaker.keel.core.api.timestampAsInstant +import com.netflix.spinnaker.keel.events.ResourceState import com.netflix.spinnaker.keel.pause.PauseScope import com.netflix.spinnaker.keel.pause.PauseScope.APPLICATION import com.netflix.spinnaker.keel.persistence.DeliveryConfigRepository @@ -45,6 +46,7 @@ import com.netflix.spinnaker.keel.persistence.metamodel.Tables.EVENT import com.netflix.spinnaker.keel.persistence.metamodel.Tables.PAUSED import com.netflix.spinnaker.keel.persistence.metamodel.Tables.PREVIEW_ENVIRONMENT import com.netflix.spinnaker.keel.persistence.metamodel.Tables.RESOURCE +import com.netflix.spinnaker.keel.persistence.metamodel.Tables.RESOURCE_LAST_CHECKED import com.netflix.spinnaker.keel.persistence.metamodel.Tables.RESOURCE_VERSION import com.netflix.spinnaker.keel.resources.ResourceFactory import com.netflix.spinnaker.keel.sql.RetryCategory.READ @@ -532,6 +534,26 @@ class SqlDeliveryConfigRepository( } ?: emptySet() } + override fun resourceStatusesInEnvironment( + deliveryConfigName: String, + environmentName: String + ): Map = + jooq + .select(RESOURCE.ID, RESOURCE_LAST_CHECKED.STATUS) + .from(RESOURCE) + .join(RESOURCE_LAST_CHECKED) + .on(RESOURCE_LAST_CHECKED.RESOURCE_UID.eq(RESOURCE.UID)) + .join(ENVIRONMENT_RESOURCE) + .on(ENVIRONMENT_RESOURCE.RESOURCE_UID.eq(RESOURCE.UID)) + .join(ACTIVE_ENVIRONMENT) + .on(ACTIVE_ENVIRONMENT.UID.eq(ENVIRONMENT_RESOURCE.ENVIRONMENT_UID)) + .and(ACTIVE_ENVIRONMENT.VERSION.eq(ENVIRONMENT_RESOURCE.ENVIRONMENT_VERSION)) + .and(ACTIVE_ENVIRONMENT.NAME.eq(environmentName)) + .join(DELIVERY_CONFIG) + .on(DELIVERY_CONFIG.UID.eq(ACTIVE_ENVIRONMENT.DELIVERY_CONFIG_UID)) + .and(DELIVERY_CONFIG.NAME.eq(deliveryConfigName)) + .fetchMap(RESOURCE.ID, RESOURCE_LAST_CHECKED.STATUS) + override fun deliveryConfigFor(resourceId: String): DeliveryConfig = // TODO: this implementation could be more efficient by sharing code with get(name) sqlRetry.withRetry(READ) { @@ -780,40 +802,20 @@ class SqlDeliveryConfigRepository( override fun deleteConstraintState( deliveryConfigName: String, environmentName: String, + reference: String, + version: String, type: String - ) { + ): Int { val envUidSelect = envUid(deliveryConfigName, environmentName) - sqlRetry.withRetry(WRITE) { - jooq.select(CURRENT_CONSTRAINT.APPLICATION, CURRENT_CONSTRAINT.ENVIRONMENT_UID) - .from(CURRENT_CONSTRAINT) - .where( - CURRENT_CONSTRAINT.ENVIRONMENT_UID.eq(envUidSelect), - CURRENT_CONSTRAINT.TYPE.eq(type) - ) - .fetch { (application, envUid) -> - jooq.deleteFrom(CURRENT_CONSTRAINT) - .where( - CURRENT_CONSTRAINT.APPLICATION.eq(application), - CURRENT_CONSTRAINT.ENVIRONMENT_UID.eq(envUid), - CURRENT_CONSTRAINT.TYPE.eq(type) - ) - .execute() - } - - val ids: List = jooq.select(ENVIRONMENT_ARTIFACT_CONSTRAINT.UID) - .from(ENVIRONMENT_ARTIFACT_CONSTRAINT) + return sqlRetry.withRetry(WRITE) { + jooq.deleteFrom(ENVIRONMENT_ARTIFACT_CONSTRAINT) .where( ENVIRONMENT_ARTIFACT_CONSTRAINT.ENVIRONMENT_UID.eq(envUidSelect), - ENVIRONMENT_ARTIFACT_CONSTRAINT.TYPE.eq(type) + ENVIRONMENT_ARTIFACT_CONSTRAINT.TYPE.eq(type), + ENVIRONMENT_ARTIFACT_CONSTRAINT.ARTIFACT_VERSION.eq(version), + ENVIRONMENT_ARTIFACT_CONSTRAINT.ARTIFACT_REFERENCE.eq(reference), ) - .fetch(ENVIRONMENT_ARTIFACT_CONSTRAINT.UID) - .sorted() - - ids.chunked(DELETE_CHUNK_SIZE).forEach { - jooq.deleteFrom(ENVIRONMENT_ARTIFACT_CONSTRAINT) - .where(ENVIRONMENT_ARTIFACT_CONSTRAINT.UID.`in`(*it.toTypedArray())) - .execute() - } + .execute() } } @@ -1156,6 +1158,7 @@ class SqlDeliveryConfigRepository( } override fun triggerRecheck(application: String) { + log.info("Triggering delivery config recheck for application $application") val uid = sqlRetry.withRetry(READ) { jooq.select(DELIVERY_CONFIG.UID) .from(DELIVERY_CONFIG) diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepository.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepository.kt new file mode 100644 index 0000000000..a155ed0b8a --- /dev/null +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepository.kt @@ -0,0 +1,61 @@ +package com.netflix.spinnaker.keel.sql + +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.persistence.metamodel.Tables.FEATURE_ROLLOUT +import com.netflix.spinnaker.keel.rollout.RolloutStatus +import com.netflix.spinnaker.keel.rollout.RolloutStatus.IN_PROGRESS +import com.netflix.spinnaker.keel.rollout.RolloutStatus.NOT_STARTED +import com.netflix.spinnaker.keel.sql.RetryCategory.READ +import com.netflix.spinnaker.keel.sql.RetryCategory.WRITE +import org.jooq.DSLContext +import java.time.Clock + +class SqlFeatureRolloutRepository( + private val jooq: DSLContext, + private val sqlRetry: SqlRetry, + private val clock: Clock +) : FeatureRolloutRepository { + override fun markRolloutStarted(feature: String, resourceId: String) { + sqlRetry.withRetry(WRITE) { + jooq + .insertInto(FEATURE_ROLLOUT) + .set(FEATURE_ROLLOUT.FEATURE, feature) + .set(FEATURE_ROLLOUT.RESOURCE_ID, resourceId) + .set(FEATURE_ROLLOUT.STATUS, IN_PROGRESS) + .set(FEATURE_ROLLOUT.ATTEMPTS, 1) + .set(FEATURE_ROLLOUT.FIRST_ATTEMPT_AT, clock.instant()) + .set(FEATURE_ROLLOUT.LATEST_ATTEMPT_AT, clock.instant()) + .onDuplicateKeyUpdate() + .set(FEATURE_ROLLOUT.STATUS, IN_PROGRESS) + .set(FEATURE_ROLLOUT.ATTEMPTS, FEATURE_ROLLOUT.ATTEMPTS + 1) + .set(FEATURE_ROLLOUT.LATEST_ATTEMPT_AT, clock.instant()) + .execute() + } + } + + override fun rolloutStatus(feature: String, resourceId: String): Pair = + sqlRetry.withRetry(READ) { + jooq + .select(FEATURE_ROLLOUT.STATUS, FEATURE_ROLLOUT.ATTEMPTS) + .from(FEATURE_ROLLOUT) + .where(FEATURE_ROLLOUT.FEATURE.eq(feature)) + .and(FEATURE_ROLLOUT.RESOURCE_ID.eq(resourceId)) + .fetchOne { (status, attempts) -> status to attempts } ?: (NOT_STARTED to 0) + } + + override fun updateStatus(feature: String, resourceId: String, status: RolloutStatus) { + sqlRetry.withRetry(WRITE) { + jooq + .insertInto(FEATURE_ROLLOUT) + .set(FEATURE_ROLLOUT.FEATURE, feature) + .set(FEATURE_ROLLOUT.RESOURCE_ID, resourceId) + .set(FEATURE_ROLLOUT.STATUS, status) + .set(FEATURE_ROLLOUT.ATTEMPTS, 0) + .setNull(FEATURE_ROLLOUT.FIRST_ATTEMPT_AT) + .setNull(FEATURE_ROLLOUT.LATEST_ATTEMPT_AT) + .onDuplicateKeyUpdate() + .set(FEATURE_ROLLOUT.STATUS, status) + .execute() + } + } +} diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlResourceRepository.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlResourceRepository.kt index 4c74805395..e0ee7b37b6 100644 --- a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlResourceRepository.kt +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlResourceRepository.kt @@ -175,6 +175,7 @@ open class SqlResourceRepository( .set(RESOURCE_VERSION.CREATED_AT, clock.instant()) .execute() } catch(e: Exception) { + log.error("Failed to insert resource version for ${resource.id}: $e", e) spectator.counter(resourceVersionInsertId.withTags( "success", "false", "application", resource.application, // Capture the app on fail cases to help repro diff --git a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepository.kt b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepository.kt index 17dd8f4ab1..baa223d6a4 100644 --- a/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepository.kt +++ b/keel-sql/src/main/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepository.kt @@ -3,6 +3,8 @@ package com.netflix.spinnaker.keel.sql import com.netflix.spinnaker.config.RetentionProperties import com.netflix.spinnaker.keel.api.TaskStatus import com.netflix.spinnaker.keel.api.TaskStatus.RUNNING +import com.netflix.spinnaker.keel.api.actuation.SubjectType +import com.netflix.spinnaker.keel.persistence.TaskForResource import com.netflix.spinnaker.keel.persistence.TaskRecord import com.netflix.spinnaker.keel.persistence.TaskTrackingRepository import com.netflix.spinnaker.keel.persistence.metamodel.Tables.TASK_TRACKING @@ -32,6 +34,7 @@ class SqlTaskTrackingRepository( .set(TASK_TRACKING.APPLICATION, task.application) .set(TASK_TRACKING.ENVIRONMENT_NAME, task.environmentName) .set(TASK_TRACKING.RESOURCE_ID, task.resourceId) + .set(TASK_TRACKING.ARTIFACT_VERSION, task.artifactVersion) .onDuplicateKeyIgnore() .execute() } @@ -46,13 +49,103 @@ class SqlTaskTrackingRepository( TASK_TRACKING.SUBJECT_TYPE, TASK_TRACKING.APPLICATION, TASK_TRACKING.ENVIRONMENT_NAME, - TASK_TRACKING.RESOURCE_ID + TASK_TRACKING.RESOURCE_ID, + TASK_TRACKING.ARTIFACT_VERSION ) .from(TASK_TRACKING) .where(TASK_TRACKING.ENDED_AT.isNull) .fetch() - .map { (taskId, taskName, subjectType, application, environmentName, resourceId) -> - TaskRecord(taskId, taskName, subjectType, application, environmentName, resourceId) + .map { (taskId, taskName, subjectType, application, environmentName, resourceId, version) -> + TaskRecord(taskId, taskName, subjectType, application, environmentName, resourceId, version) + } + .toSet() + } + + override fun getTasks(resourceId: String, limit: Int): Set = + sqlRetry.withRetry(READ) { + jooq + .select( + TASK_TRACKING.TASK_ID, + TASK_TRACKING.TASK_NAME, + TASK_TRACKING.STARTED_AT, + TASK_TRACKING.ENDED_AT, + TASK_TRACKING.ARTIFACT_VERSION + ) + .from(TASK_TRACKING) + .where(TASK_TRACKING.RESOURCE_ID.eq(resourceId)) + .limit(limit) + .fetch() + .map { (taskId, taskName, startedAt, endedAt, version) -> + TaskForResource(taskId, taskName, resourceId, startedAt, endedAt, version) + } + .toSet() + } + + /** + * This is the list of tasks that we show to users in the UI. + * + * We only want to show one "batch" of tasks for a resource Id. + * Clusters have a recorded version, so a 'batch' is updates that have the same artifact_version. + * For resources without a version it's everything that was started within 30 seconds + * of the most recent task (because we launch groups of tasks at once). + */ + override fun getLatestBatchOfTasks(resourceId: String): Set { + val tasks = sqlRetry.withRetry(READ) { + jooq + .select( + TASK_TRACKING.TASK_ID, + TASK_TRACKING.TASK_NAME, + TASK_TRACKING.STARTED_AT, + TASK_TRACKING.ENDED_AT, + TASK_TRACKING.ARTIFACT_VERSION + ) + .from(TASK_TRACKING) + .where(TASK_TRACKING.RESOURCE_ID.eq(resourceId)) + .orderBy(TASK_TRACKING.STARTED_AT.desc()) + .limit(20) // probably more than the max number of tasks we launch at once? may need to tune + .fetch() + .map { (taskId, taskName, startedAt, endedAt, version) -> + TaskForResource(taskId, taskName, resourceId, startedAt, endedAt, version) + } + } + + val mostRecentStarted = tasks + .maxByOrNull { it.startedAt } + + // we start tasks within the same 30 seconds, so find the rest of the batch + // that goes with the most recently completed task. + val batchCutofTime = mostRecentStarted?.startedAt?.minusSeconds(30) + + val version = mostRecentStarted?.artifactVersion + return tasks + .getTaskBatch(batchCutofTime) + .filter { task -> + task.artifactVersion == null || task.artifactVersion == version + }.toSet() + } + + private fun List.getTaskBatch(cutoff: Instant?) = + filter { it.endedAt == null || it.startedAt.isAfter(cutoff) }.toSet() + + override fun getInFlightTasks(application: String, environmentName: String): Set = + sqlRetry.withRetry(READ) { + jooq + .select( + TASK_TRACKING.TASK_ID, + TASK_TRACKING.TASK_NAME, + TASK_TRACKING.RESOURCE_ID, + TASK_TRACKING.STARTED_AT, + TASK_TRACKING.ENDED_AT, + TASK_TRACKING.ARTIFACT_VERSION + ) + .from(TASK_TRACKING) + .where(TASK_TRACKING.APPLICATION.eq(application)) + .and(TASK_TRACKING.ENVIRONMENT_NAME.eq(environmentName)) + .and(TASK_TRACKING.SUBJECT_TYPE.eq(SubjectType.RESOURCE)) //todo eb: verifications/constraints as well? + .and(TASK_TRACKING.ENDED_AT.isNull) + .fetch() + .map { (taskId, taskName, resourceId, startedAt, endedAt, version) -> + TaskForResource(taskId, taskName, resourceId, startedAt, endedAt, version) } .toSet() } diff --git a/keel-sql/src/main/resources/db/changelog/20210917-feature-rollout-table.yml b/keel-sql/src/main/resources/db/changelog/20210917-feature-rollout-table.yml new file mode 100644 index 0000000000..d6ea1a203a --- /dev/null +++ b/keel-sql/src/main/resources/db/changelog/20210917-feature-rollout-table.yml @@ -0,0 +1,35 @@ +databaseChangeLog: +- changeSet: + id: feature-rollout-table + author: fletch + changes: + - createTable: + tableName: feature-rollout + columns: + - column: + name: feature + type: varchar(255) + constraints: + - primaryKey: true + - nullable: false + - column: + name: resource_id + type: varchar(255) + constraints: + - primaryKey: true + - nullable: false + - column: + name: attempts + type: integer + constraints: + - nullable: false + - column: + name: first_attempt_at + type: datetime(3) + constraints: + - nullable: false + - column: + name: latest_attempt_at + type: datetime(3) + constraints: + - nullable: false diff --git a/keel-sql/src/main/resources/db/changelog/20210923-rename-feature-rollout-table.yml b/keel-sql/src/main/resources/db/changelog/20210923-rename-feature-rollout-table.yml new file mode 100644 index 0000000000..7af77a411c --- /dev/null +++ b/keel-sql/src/main/resources/db/changelog/20210923-rename-feature-rollout-table.yml @@ -0,0 +1,8 @@ +databaseChangeLog: +- changeSet: + id: rename-feature-rollout-table + author: fletch + changes: + - renameTable: + oldTableName: feature-rollout + newTableName: feature_rollout diff --git a/keel-sql/src/main/resources/db/changelog/20210928-add-status-to-feature-rollout.yml b/keel-sql/src/main/resources/db/changelog/20210928-add-status-to-feature-rollout.yml new file mode 100644 index 0000000000..a91abed812 --- /dev/null +++ b/keel-sql/src/main/resources/db/changelog/20210928-add-status-to-feature-rollout.yml @@ -0,0 +1,23 @@ +databaseChangeLog: +- changeSet: + id: add-status-to-feature-rollout + author: fletch + changes: + - addColumn: + tableName: feature_rollout + columns: + - column: + name: status + type: varchar(64) + constraints: + - nullable: false + value: IN_PROGRESS + afterColumn: resource_id + - dropNotNullConstraint: + tableName: feature_rollout + columnName: first_attempt_at + columnDataType: datetime(3) + - dropNotNullConstraint: + tableName: feature_rollout + columnName: latest_attempt_at + columnDataType: datetime(3) diff --git a/keel-sql/src/main/resources/db/changelog/20211007-task-tracking-indecies.yml b/keel-sql/src/main/resources/db/changelog/20211007-task-tracking-indecies.yml new file mode 100644 index 0000000000..f58050eacc --- /dev/null +++ b/keel-sql/src/main/resources/db/changelog/20211007-task-tracking-indecies.yml @@ -0,0 +1,11 @@ +databaseChangeLog: + - changeSet: + id: task-tracking-indices + author: emjburns + changes: + - createIndex: + tableName: task_tracking + columns: + - column: + name: resource_id + indexName: task_tracking_resource_id_idx diff --git a/keel-sql/src/main/resources/db/changelog/20211008-version-task-tracking.yml b/keel-sql/src/main/resources/db/changelog/20211008-version-task-tracking.yml new file mode 100644 index 0000000000..0ac69ef9c0 --- /dev/null +++ b/keel-sql/src/main/resources/db/changelog/20211008-version-task-tracking.yml @@ -0,0 +1,14 @@ +databaseChangeLog: + - changeSet: + id: task-tracking-indices + author: emjburns + changes: + - addColumn: + tableName: task_tracking + columns: + - column: + name: artifact_version + type: varchar(255) + constraints: + - nullable: true + afterColumn: resource_id diff --git a/keel-sql/src/main/resources/db/changelog/20211014-artifact-is-preview-column.yml b/keel-sql/src/main/resources/db/changelog/20211014-artifact-is-preview-column.yml new file mode 100644 index 0000000000..beadeaa8ad --- /dev/null +++ b/keel-sql/src/main/resources/db/changelog/20211014-artifact-is-preview-column.yml @@ -0,0 +1,15 @@ +databaseChangeLog: + - changeSet: + id: artifact-is-preview-column + author: lpollo + changes: + - addColumn: + tableName: delivery_artifact + columns: + - column: + name: is_preview + type: boolean + defaultValue: false + constraints: + - nullable: false + afterColumn: reference diff --git a/keel-sql/src/main/resources/db/databaseChangeLog.yml b/keel-sql/src/main/resources/db/databaseChangeLog.yml index cdeacb889a..41ef442cf7 100644 --- a/keel-sql/src/main/resources/db/databaseChangeLog.yml +++ b/keel-sql/src/main/resources/db/databaseChangeLog.yml @@ -368,3 +368,21 @@ databaseChangeLog: - include: file: changelog/20210909-task-tracking-resource-and-environment.yml relativeToChangelogFile: true +- include: + file: changelog/20210917-feature-rollout-table.yml + relativeToChangelogFile: true +- include: + file: changelog/20210923-rename-feature-rollout-table.yml + relativeToChangelogFile: true +- include: + file: changelog/20210928-add-status-to-feature-rollout.yml + relativeToChangelogFile: true +- include: + file: changelog/20211007-task-tracking-indecies.yml + relativeToChangelogFile: true +- include: + file: changelog/20211008-version-task-tracking.yml + relativeToChangelogFile: true +- include: + file: changelog/20211014-artifact-is-preview-column.yml + relativeToChangelogFile: true diff --git a/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepositoryTests.kt b/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepositoryTests.kt new file mode 100644 index 0000000000..b34bd178a3 --- /dev/null +++ b/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlFeatureRolloutRepositoryTests.kt @@ -0,0 +1,97 @@ +package com.netflix.spinnaker.keel.sql + +import com.netflix.spinnaker.keel.rollout.RolloutStatus +import com.netflix.spinnaker.keel.rollout.RolloutStatus.IN_PROGRESS +import com.netflix.spinnaker.keel.rollout.RolloutStatus.NOT_STARTED +import com.netflix.spinnaker.kork.sql.config.RetryProperties +import com.netflix.spinnaker.kork.sql.config.SqlRetryProperties +import com.netflix.spinnaker.kork.sql.test.SqlTestUtil.cleanupDb +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.EnumSource +import strikt.api.expectThat +import strikt.assertions.isEqualTo +import java.time.Clock + +internal class SqlFeatureRolloutRepositoryTests { + private val jooq = testDatabase.context + private val sqlRetry = RetryProperties(1, 0).let { SqlRetry(SqlRetryProperties(it, it)) } + private val subject = SqlFeatureRolloutRepository(jooq, sqlRetry, Clock.systemDefaultZone()) + + private val feature = "commencement-of-eschaton" + private val resourceId = "titus:cluster:prod:fnord-main" + + @AfterEach + fun flush() { + cleanupDb(jooq) + } + + @Test + fun `if a feature has never been rolled out to a resource the status is NOT_STARTED and the count is zero`() { + subject.rolloutStatus(feature, resourceId) + .also { result -> + expectThat(result) isEqualTo (NOT_STARTED to 0) + } + } + + @Test + fun `if a feature has been rolled out once to a resource the status is IN_PROGRESS and the count is one`() { + with(subject) { + markRolloutStarted(feature, resourceId) + rolloutStatus(feature, resourceId) + .also { result -> + expectThat(result) isEqualTo (IN_PROGRESS to 1) + } + } + } + + @ParameterizedTest(name = "if a feature rollout completed with status {arguments}, its status reflects that") + @EnumSource(RolloutStatus::class, names = ["SKIPPED", "FAILED", "SUCCESSFUL"]) + fun `if a feature rollout completed its status reflects that`(status: RolloutStatus) { + with(subject) { + markRolloutStarted(feature, resourceId) + updateStatus(feature, resourceId, status) + rolloutStatus(feature, resourceId) + .also { result -> + expectThat(result) isEqualTo (status to 1) + } + } + } + + @Test + fun `multiple rollout attempts increment the count`() { + val n = 5 + with(subject) { + repeat(n) { + markRolloutStarted(feature, resourceId) + } + rolloutStatus(feature, resourceId) + .also { result -> + expectThat(result) isEqualTo (IN_PROGRESS to n) + } + } + } + + @Test + fun `does not mix the counts for different features`() { + with(subject) { + markRolloutStarted(feature, resourceId) + rolloutStatus("a-different-feature", resourceId) + .also { result -> + expectThat(result) isEqualTo (NOT_STARTED to 0) + } + } + } + + @Test + fun `does not mix the counts for different resources`() { + with(subject) { + markRolloutStarted(feature, resourceId) + rolloutStatus(feature, "titus:cluster:test:fnord-test") + .also { result -> + expectThat(result) isEqualTo (NOT_STARTED to 0) + } + } + } +} diff --git a/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepositoryTests.kt b/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepositoryTests.kt index b36107ecd1..fc34c1d879 100644 --- a/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepositoryTests.kt +++ b/keel-sql/src/test/kotlin/com/netflix/spinnaker/keel/sql/SqlTaskTrackingRepositoryTests.kt @@ -16,7 +16,7 @@ internal object SqlTaskTrackingRepositoryTests : TaskTrackingRepositoryTests = emptyMap(), @@ -75,7 +77,8 @@ data class TitusClusterSpec( dependencies: ClusterDependencies? = null, tags: Map = emptyMap(), scaling: TitusScalingSpec? = null, - overrides: Map = emptyMap() + overrides: Map = emptyMap(), + managedRollout: ManagedRolloutConfig = ManagedRolloutConfig() ) : this( moniker = moniker, deployWith = deployWith, @@ -95,7 +98,8 @@ data class TitusClusterSpec( scaling = scaling ), overrides = overrides, - container = container + container = container, + managedRollout = managedRollout ) override val id = "${locations.account}:$moniker" @@ -144,6 +148,9 @@ data class TitusClusterSpec( } deps }.toSet() + + override fun deepRename(suffix: String) = + copy(moniker = moniker.withSuffix(suffix)) } data class TitusServerGroupSpec( diff --git a/keel-titus-plugin/keel-titus-plugin.gradle b/keel-titus-plugin/keel-titus-plugin.gradle index e61e04ce7c..9cb1b36276 100644 --- a/keel-titus-plugin/keel-titus-plugin.gradle +++ b/keel-titus-plugin/keel-titus-plugin.gradle @@ -2,6 +2,7 @@ dependencies { api(project(":keel-api")) api(project(":keel-titus-api")) implementation(project(":keel-core")) // TODO: ideally not + implementation(project(":keel-optics")) implementation(project(":keel-clouddriver")) implementation(project(":keel-orca")) implementation(project(":keel-retrofit")) diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/config/TitusConfig.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/config/TitusConfig.kt index 44e400bec9..1d376b32eb 100644 --- a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/config/TitusConfig.kt +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/config/TitusConfig.kt @@ -22,10 +22,15 @@ import com.netflix.spinnaker.keel.api.plugins.Resolver import com.netflix.spinnaker.keel.api.support.EventPublisher import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.clouddriver.CloudDriverService +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder import com.netflix.spinnaker.keel.orca.ClusterExportHelper import com.netflix.spinnaker.keel.orca.OrcaService +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.titus.InstanceMetadataServiceResolver import com.netflix.spinnaker.keel.titus.TitusClusterHandler +import org.springframework.beans.factory.getBean import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty +import org.springframework.context.ApplicationContext import org.springframework.context.annotation.Bean import org.springframework.context.annotation.Configuration import java.time.Clock @@ -53,4 +58,25 @@ class TitusConfig { resolvers, clusterExportHelper ) + + @Bean + fun titusInstanceMetadataServiceResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + applicationContext: ApplicationContext, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher + ): InstanceMetadataServiceResolver { + // This is necessary to avoid a circular bean dependency as Resolver instances (like we're creating here) + // get wired into ResourceHandlers, but here the Resolver needs a capability provided by the ResourceHandler. + val clusterHandler by lazy { applicationContext.getBean() } + + return InstanceMetadataServiceResolver( + dependentEnvironmentFinder, + // although it looks like this could be optimized to clusterHandler::current that will cause the bean to get + // created right away, which will blow up with a circular dependency + { clusterHandler.current(it) }, + featureRolloutRepository, + eventPublisher + ) + } } diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/ContainerRunner.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/ContainerRunner.kt index 5cace543e3..c74a14738c 100644 --- a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/ContainerRunner.kt +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/ContainerRunner.kt @@ -37,6 +37,7 @@ class ContainerRunner( ): ActionState { @Suppress("UNCHECKED_CAST") val taskId = (oldState.metadata[TASKS] as Iterable?)?.last() + log.debug("Checking status for task $taskId") require(taskId is String) { "No task id found in previous container state" } diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolver.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolver.kt new file mode 100644 index 0000000000..c94cf61342 --- /dev/null +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolver.kt @@ -0,0 +1,57 @@ +package com.netflix.spinnaker.keel.titus + +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.api.titus.TITUS_CLUSTER_V1 +import com.netflix.spinnaker.keel.api.titus.TitusClusterSpec +import com.netflix.spinnaker.keel.api.titus.TitusServerGroup +import com.netflix.spinnaker.keel.api.titus.TitusServerGroupSpec +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder +import com.netflix.spinnaker.keel.optics.mapValueLens +import com.netflix.spinnaker.keel.optics.resourceSpecLens +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.rollout.RolloutAwareResolver +import com.netflix.spinnaker.keel.titus.optics.titusClusterSpecContainerAttributesLens + +class InstanceMetadataServiceResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + resourceToCurrentState: suspend (Resource) -> Map, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher +) : RolloutAwareResolver>( + dependentEnvironmentFinder, + resourceToCurrentState, + featureRolloutRepository, + eventPublisher +) { + override val supportedKind = TITUS_CLUSTER_V1 + override val featureName = "imdsv2" + + override fun isExplicitlySpecified(resource: Resource) = + titusClusterResourceImdsRequireTokenLens.get(resource) != null + + override fun isAppliedTo(actualResource: Map) = + actualResource.values.all { it.containerAttributes["titusParameter.agent.imds.requireToken"] == "true" } + + override fun activate(resource: Resource) = + titusClusterResourceImdsRequireTokenLens.set(resource, "true") + + override fun deactivate(resource: Resource) = + titusClusterResourceImdsRequireTokenLens.set(resource, null) + + override val Map.exists: Boolean + get() = isNotEmpty() +} + +/** + * Lens for getting/setting the IMDSv2 flag in [TitusServerGroupSpec.containerAttributes]. + */ +val titusClusterSpecImdsRequireTokenLens = + titusClusterSpecContainerAttributesLens + mapValueLens("titusParameter.agent.imds.requireToken") + +/** + * Composed lens that lets us get/set the deeply nested IMDSv2 flag in [TitusServerGroupSpec.containerAttributes] + * directly from the [Resource]. + */ +private val titusClusterResourceImdsRequireTokenLens = + resourceSpecLens() + titusClusterSpecImdsRequireTokenLens diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/TitusClusterHandler.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/TitusClusterHandler.kt index a1951aa003..2d4b5db010 100644 --- a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/TitusClusterHandler.kt +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/TitusClusterHandler.kt @@ -18,6 +18,9 @@ package com.netflix.spinnaker.keel.titus import com.fasterxml.jackson.module.kotlin.convertValue +import com.netflix.spinnaker.keel.actuation.RolloutLocation +import com.netflix.spinnaker.keel.actuation.RolloutTarget +import com.netflix.spinnaker.keel.api.ClusterDeployStrategy import com.netflix.spinnaker.keel.api.Exportable import com.netflix.spinnaker.keel.api.Moniker import com.netflix.spinnaker.keel.api.NoStrategy @@ -27,7 +30,6 @@ import com.netflix.spinnaker.keel.api.ResourceDiff import com.netflix.spinnaker.keel.api.SimpleLocations import com.netflix.spinnaker.keel.api.SimpleRegionSpec import com.netflix.spinnaker.keel.api.actuation.Job -import com.netflix.spinnaker.keel.api.actuation.Task import com.netflix.spinnaker.keel.api.actuation.TaskLauncher import com.netflix.spinnaker.keel.api.artifacts.DeliveryArtifact import com.netflix.spinnaker.keel.api.artifacts.TagVersionStrategy @@ -45,6 +47,7 @@ import com.netflix.spinnaker.keel.api.ec2.ServerGroup.InstanceCounts import com.netflix.spinnaker.keel.api.ec2.StepAdjustment import com.netflix.spinnaker.keel.api.ec2.StepScalingPolicy import com.netflix.spinnaker.keel.api.ec2.TargetTrackingPolicy +import com.netflix.spinnaker.keel.api.ec2.hasScalingPolicies import com.netflix.spinnaker.keel.api.plugins.BaseClusterHandler import com.netflix.spinnaker.keel.api.plugins.CurrentImages import com.netflix.spinnaker.keel.api.plugins.ImageInRegion @@ -73,10 +76,11 @@ import com.netflix.spinnaker.keel.clouddriver.model.StepAdjustmentModel import com.netflix.spinnaker.keel.clouddriver.model.TitusActiveServerGroup import com.netflix.spinnaker.keel.clouddriver.model.TitusScaling.Policy.StepPolicy import com.netflix.spinnaker.keel.clouddriver.model.TitusScaling.Policy.TargetPolicy +import com.netflix.spinnaker.keel.clouddriver.model.toActive import com.netflix.spinnaker.keel.core.api.DEFAULT_SERVICE_ACCOUNT import com.netflix.spinnaker.keel.core.orcaClusterMoniker import com.netflix.spinnaker.keel.core.serverGroup -import com.netflix.spinnaker.keel.diff.toIndividualDiffs +import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.docker.DigestProvider import com.netflix.spinnaker.keel.docker.ReferenceProvider import com.netflix.spinnaker.keel.events.ResourceHealthEvent @@ -134,16 +138,11 @@ class TitusClusterHandler( .byRegion() override suspend fun actuationInProgress(resource: Resource): Boolean = - resource - .spec - .locations - .regions - .map { it.name } - .any { region -> - orcaService - .getCorrelatedExecutions("${resource.id}:$region", resource.serviceAccount) - .isNotEmpty() - } + generateCorrelationIds(resource).any { correlationId -> + orcaService + .getCorrelatedExecutions(correlationId) + .isNotEmpty() + } override fun getDesiredRegion(diff: ResourceDiff): String = diff.desired.location.region @@ -179,67 +178,77 @@ class TitusClusterHandler( CurrentImages(supportedKind.kind, images, resource.id) } - override suspend fun upsert( - resource: Resource, - resourceDiff: ResourceDiff> - ): List = - coroutineScope { - resourceDiff - .toIndividualDiffs() - .filter { diff -> diff.hasChanges() } - .map { diff -> - val desired = diff.desired - var tags: Set = emptySet() - - var tagToUse: String? = null - val version = when { - diff.isCapacityOnly() -> null - else -> { - // calculate the version for the digest - tags = getTagsForDigest(desired.container, desired.location.account) - if (tags.size == 1) { - tagToUse = tags.first() // only one tag, so use it to deploy - tags.first() - } else { - log.debug("Container digest ${desired.container} has multiple tags: $tags") - // unclear which "version" to print if there is more than one, so use a shortened version of the digest - desired.container.digest.subSequence(0, 7) - } - } - } - val (description, stages) = when { - diff.isCapacityOnly() -> "Resize server group ${desired.moniker} in ${desired.location.account}/${desired.location.region}" to diff.resizeServerGroupJob() - diff.isAutoScalingOnly() -> "Modify auto-scaling of server group ${desired.moniker} in ${desired.location.account}/${desired.location.region}" to diff.modifyScalingPolicyJob() - diff.isEnabledOnly() -> diff.disableOtherServerGroupJob(resource, version.toString()).let { stages -> - "Disable extra active server group ${stages.first()["asgName"]} in ${desired.location.account}/${desired.location.region}" to stages - } - else -> "Deploy $version to server group ${desired.moniker} in ${desired.location.account}/${desired.location.region}" to diff.upsertServerGroupJob( - resource, - tagToUse - ) - } + override fun getDesiredAccount(diff: ResourceDiff): String = + diff.desired.location.account - log.info("Upserting server group using task: {}", stages) + override fun ResourceDiff.moniker(): Moniker = + desired.moniker - val result = async { - taskLauncher.submitJob( - resource = resource, - description = description, - correlationId = "${resource.id}:${desired.location.region}", - stages = stages - ) - } + override fun TitusServerGroup.moniker(): Moniker = + moniker - if (diff.willDeployNewVersion()) { - tags.forEach { tag -> - notifyArtifactDeploying(resource, tag) - } - } - return@map result - } - .map { it.await() } + override fun ResourceDiff.version(resource: Resource): String { + val tags = runBlocking { + getTagsForDigest(desired.container, desired.location.account) + } + return if (tags.size == 1) { + tags.first() // only one tag, so use it to deploy + } else { + log.debug("Container digest ${desired.container} has multiple tags: $tags") + // unclear which "version" to print if there is more than one, so use a shortened version of the digest + desired.container.digest.subSequence(0, 7).toString() } + } + + override fun ResourceDiff.getDeployingVersions(resource: Resource): Set = + runBlocking { + getTagsForDigest(desired.container, desired.location.account) + } + + override fun correlationId(resource: Resource, diff: ResourceDiff): String = + "${resource.id}:${diff.desired.location.region}" + + override fun Resource.isStaggeredDeploy(): Boolean = + spec.deployWith.isStaggered + + override fun Resource.isManagedRollout(): Boolean = + spec.managedRollout.enabled + + override fun Resource.regions(): List = + spec.locations.regions.map { it.name } + + override fun Resource.account(): String = + spec.locations.account + + override fun Resource.moniker() = + spec.moniker + + override fun ResourceDiff.hasScalingPolicies(): Boolean = + desired.scaling.hasScalingPolicies() + + override fun ResourceDiff.isCapacityOrAutoScalingOnly(): Boolean = + current != null && + affectedRootPropertyTypes.all { it == Capacity::class.java || it == Scaling::class.java } && + current!!.scaling.suspendedProcesses == desired.scaling.suspendedProcesses + + override fun ResourceDiff.hasScalingPolicyDiff(): Boolean = + current != null && affectedRootPropertyTypes.contains(Scaling::class.java) && + ( + current!!.scaling.targetTrackingPolicies != desired.scaling.targetTrackingPolicies || + current!!.scaling.stepScalingPolicies != desired.scaling.stepScalingPolicies + ) + + override suspend fun getServerGroupsByRegion(resource: Resource): Map> = + getExistingServerGroupsByRegion(resource) + .mapValues { regionalList -> + regionalList.value.map { serverGroup: ClouddriverTitusServerGroup -> + serverGroup.toActive().toTitusServerGroup() + } + } + + override fun Resource.getDeployWith(): ClusterDeployStrategy = + spec.deployWith override suspend fun export(exportable: Exportable): TitusClusterSpec { val serverGroups = cloudDriverService.getActiveServerGroups( @@ -359,14 +368,16 @@ class TitusClusterHandler( false } - private suspend fun ResourceDiff.disableOtherServerGroupJob( + override fun ResourceDiff.disableOtherServerGroupJob( resource: Resource, desiredVersion: String - ): List { + ): Job { val current = requireNotNull(current) { "Current server group must not be null when generating a disable job" } - val existingServerGroups: Map> = getExistingServerGroupsByRegion(resource) + val existingServerGroups: Map> = runBlocking { + getExistingServerGroupsByRegion(resource) + } val sgInRegion = existingServerGroups.getOrDefault(current.location.region, emptyList()).filterNot { it.disabled } if (sgInRegion.size < 2) { @@ -408,14 +419,39 @@ class TitusClusterHandler( "region" to sgToDisable.region, "serverGroupName" to sgToDisable.name, "asgName" to sgToDisable.name - ).let(::listOf) + ) } - private fun ResourceDiff.resizeServerGroupJob(): List { + override fun ResourceDiff.rollbackServerGroupJob( + resource: Resource, + rollbackServerGroup: TitusServerGroup + ): Job = + mutableMapOf( + "rollbackType" to "EXPLICIT", + "rollbackContext" to mapOf( + "rollbackServerGroupName" to current?.moniker?.serverGroup, + "restoreServerGroupName" to rollbackServerGroup.moniker.serverGroup, + "targetHealthyRollbackPercentage" to 100, + "delayBeforeDisableSeconds" to 0 + ), + "targetGroups" to desired.dependencies.targetGroups, + "securityGroups" to desired.dependencies.securityGroupNames, + "platformHealthOnlyShowOverride" to false, + "reason" to "rollin' back", + "type" to "rollbackServerGroup", + "moniker" to current?.moniker?.orcaClusterMoniker, + "region" to desired.location.region, + "credentials" to desired.location.account, + "cloudProvider" to TITUS_CLOUD_PROVIDER, + "user" to DEFAULT_SERVICE_ACCOUNT + ) + + override fun ResourceDiff.resizeServerGroupJob(): Job { val current = requireNotNull(current) { "Current server group must not be null when generating a resize job" } return mapOf( + "refId" to "1", "type" to "resizeServerGroup", "capacity" to mapOf( "min" to desired.capacity.min, @@ -427,7 +463,7 @@ class TitusClusterHandler( "moniker" to current.moniker.orcaClusterMoniker, "region" to current.location.region, "serverGroupName" to current.name - ).let(::listOf) + ) } /** @@ -437,7 +473,7 @@ class TitusClusterHandler( * Scaling policies are treated as immutable by keel once applied. If an existing * policy is modified, it will be deleted and reapplied via a single task. */ - private fun ResourceDiff.modifyScalingPolicyJob(startingRefId: Int = 0): List { + override fun ResourceDiff.modifyScalingPolicyJob(startingRefId: Int): List { var (refId, stages) = toDeletePolicyJob(startingRefId) val newTargetPolicies = current?.run { @@ -606,16 +642,11 @@ class TitusClusterHandler( } } - /** - * If a tag is provided, deploys by tag. - * Otherwise, deploys by digest. - */ - private fun ResourceDiff.upsertServerGroupJob( - resource: Resource, + private fun ResourceDiff.generateImageJson( tag: String? - ): List = + ) = with(desired) { - val image = if (tag == null) { + if (tag == null || desired.container.digest.startsWith(tag)) { mapOf( "digest" to container.digest, "imageId" to "${container.organization}/${container.image}:${container.digest}" @@ -626,9 +657,26 @@ class TitusClusterHandler( "imageId" to "${container.organization}/${container.image}:$tag" ) } + } + + /** + * If a tag is provided, deploys by tag. + * Otherwise, deploys by digest. + */ + override fun ResourceDiff.upsertServerGroupJob( + resource: Resource, + startingRefId: Int, + version: String? + ): Job = + with(desired) { + val image = generateImageJson(version) mapOf( - "refId" to "1", + "refId" to (startingRefId + 1).toString(), + "requisiteStageRefIds" to when (startingRefId) { + 0 -> emptyList() + else -> listOf(startingRefId.toString()) + }, "application" to moniker.app, "credentials" to location.account, "region" to location.region, @@ -679,28 +727,80 @@ class TitusClusterHandler( job + resource.spec.deployWith.toOrcaJobProperties("Titus") + mapOf("metadata" to mapOf("resource" to resource.id)) } - .let(::listOf) - .let { job -> - if (affectedRootPropertyTypes.any { it == Scaling::class.java }) { - job + modifyScalingPolicyJob(startingRefId = 2) - } else { - job - } - } - private fun ResourceDiff.willDeployNewVersion(): Boolean = - !isCapacityOnly() && !isEnabledOnly() && !isAutoScalingOnly() + override fun Resource.upsertServerGroupManagedRolloutJob( + diffs: List>, + version: String? + ): Job { + val image = diffs.first().generateImageJson(version) // image json should be the same for all regions. + + return mapOf( + "refId" to "1", + "type" to "managedRollout", + "input" to mapOf( + "selectionStrategy" to spec.managedRollout?.selectionStrategy, + "targets" to spec.generateRolloutTargets(diffs), + "clusterDefinitions" to listOf(toManagedRolloutClusterDefinition(image)) + ), + "reason" to "Diff detected at ${clock.instant().iso()}", + ) + } + + // todo eb: scaling policies? + private fun Resource.toManagedRolloutClusterDefinition(image: Map) = + with(spec) { + val dependencies = resolveDependencies() + mapOf( + "application" to moniker.app, + "stack" to moniker.stack, + "freeFormDetails" to moniker.detail, + "inService" to true, + "targetHealthyDeployPercentage" to 100, // TODO: any reason to do otherwise? + "cloudProvider" to TITUS_CLOUD_PROVIDER, + "network" to "default", + "registry" to runBlocking { getRegistryForTitusAccount(locations.account) }, + "capacity" to resolveCapacity(), + "capacityGroup" to resolveCapacityGroup(), + "securityGroups" to dependencies.securityGroupNames, + "loadBalancers" to dependencies.loadBalancerNames, + "targetGroups" to dependencies.targetGroups, + "entryPoint" to resolveEntryPoint(), + "env" to resolveEnv(), + "containerAttributes" to resolveContainerAttributes(), + "tags" to resolveTags(), + "resources" to resolveResources(), + "constraints" to resolveConstraints(), + "migrationPolicy" to resolveMigrationPolicy(), + "scaling" to resolveScaling(), //todo eb: is this even right? + "overrides" to spec.overrides + ) + image + spec.deployWith.toOrcaJobProperties("Titus") + } + + private fun TitusClusterSpec.generateRolloutTargets(diffs: List>): List> = + diffs + .map { + mapper.convertValue( + RolloutTarget( + TITUS_CLOUD_PROVIDER, + RolloutLocation( + locations.account, + getDesiredRegion(it), + emptyList() + ) + ) + ) + } /** * @return `true` if the only changes in the diff are to capacity. */ - private fun ResourceDiff.isCapacityOnly(): Boolean = + override fun ResourceDiff.isCapacityOnly(): Boolean = current != null && affectedRootPropertyTypes.all { Capacity::class.java.isAssignableFrom(it) } /** * @return `true` if the only changes in the diff are to scaling. */ - private fun ResourceDiff.isAutoScalingOnly(): Boolean = + override fun ResourceDiff.isAutoScalingOnly(): Boolean = current != null && affectedRootPropertyTypes.any { it == Scaling::class.java } && affectedRootPropertyTypes.all { Capacity::class.java.isAssignableFrom(it) || it == Scaling::class.java } && @@ -948,12 +1048,12 @@ class TitusClusterHandler( ) ) - private suspend fun getAwsAccountNameForTitusAccount(titusAccount: String): String = - cloudDriverService.getAccountInformation(titusAccount, DEFAULT_SERVICE_ACCOUNT)["awsAccount"]?.toString() + fun getAwsAccountNameForTitusAccount(titusAccount: String): String = + cloudDriverCache.credentialBy(titusAccount).attributes["awsAccount"] as? String ?: throw TitusAccountConfigurationException(titusAccount, "awsAccount") - private suspend fun getRegistryForTitusAccount(titusAccount: String): String = - cloudDriverService.getAccountInformation(titusAccount, DEFAULT_SERVICE_ACCOUNT)["registry"]?.toString() + fun getRegistryForTitusAccount(titusAccount: String): String = + cloudDriverCache.credentialBy(titusAccount).attributes["registry"] as? String ?: throw RegistryNotFoundException(titusAccount) fun TitusServerGroup.securityGroupIds(): Collection = diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/_titusClusters.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/_titusClusters.kt index 9267e1122a..6cb0f55937 100644 --- a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/_titusClusters.kt +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/_titusClusters.kt @@ -23,9 +23,10 @@ internal fun Iterable.byRegion(): Map AutoScalingCapacity(1, 1, 1) capacity == null -> DefaultCapacity(1, 1, 1) @@ -36,37 +37,55 @@ internal fun TitusClusterSpec.resolveCapacity(region: String): Capacity { internal val NETFLIX_CONTAINER_ENV_VARS = arrayOf("EC2_REGION", "NETFLIX_REGION", "NETFLIX_HOME_REGION") -internal fun TitusClusterSpec.resolveEnv(region: String) = - emptyMap() + defaults.env + overrides[region]?.env + - // These are Netflix-specific but wouldn't hurt elsewhere - NETFLIX_CONTAINER_ENV_VARS.associateWith { region } +internal fun TitusClusterSpec.resolveEnv(region: String? = null): Map { + val regionalVars: Map = region?.let { NETFLIX_CONTAINER_ENV_VARS.associateWith { region }} ?: emptyMap() + return defaults.env + regionalVars + (region?.let { overrides[it] }?.env ?: emptyMap()) +} -internal fun TitusClusterSpec.resolveContainerAttributes(region: String) = - emptyMap() + defaults.containerAttributes + overrides[region]?.containerAttributes +internal fun TitusClusterSpec.resolveContainerAttributes(region: String? = null): Map = + emptyMap() + + defaults.containerAttributes + + (region?.let { overrides[it] }?.containerAttributes ?: emptyMap()) -internal fun TitusClusterSpec.resolveResources(region: String): TitusServerGroup.Resources { +internal fun TitusClusterSpec.resolveResources(region: String? = null): TitusServerGroup.Resources { val default by lazy { Resources() } + return TitusServerGroup.Resources( - cpu = overrides[region]?.resources?.cpu ?: defaults.resources?.cpu ?: default.cpu, - disk = overrides[region]?.resources?.disk ?: defaults.resources?.disk ?: default.disk, - gpu = overrides[region]?.resources?.gpu ?: defaults.resources?.gpu ?: default.gpu, - memory = overrides[region]?.resources?.memory ?: defaults.resources?.memory ?: default.memory, - networkMbps = overrides[region]?.resources?.networkMbps ?: defaults.resources?.networkMbps - ?: default.networkMbps + cpu = region?.let { overrides[it] }?.resources?.cpu + ?: defaults.resources?.cpu + ?: default.cpu, + disk = region?.let { overrides[it] }?.resources?.disk + ?: defaults.resources?.disk + ?: default.disk, + gpu = region?.let { overrides[it] }?.resources?.gpu + ?: defaults.resources?.gpu + ?: default.gpu, + memory = region?.let { overrides[it] }?.resources?.memory + ?: defaults.resources?.memory + ?: default.memory, + networkMbps = region?.let { overrides[it] }?.resources?.networkMbps + ?: defaults.resources?.networkMbps + ?: default.networkMbps ) } internal fun TitusClusterSpec.resolveIamProfile(region: String) = overrides[region]?.iamProfile ?: defaults.iamProfile ?: moniker.app + "InstanceProfile" -internal fun TitusClusterSpec.resolveEntryPoint(region: String) = - overrides[region]?.entryPoint ?: defaults.entryPoint ?: "" +internal fun TitusClusterSpec.resolveEntryPoint(region: String? = null) = + when (region) { + null -> defaults.entryPoint ?: "" + else -> overrides[region]?.entryPoint ?: defaults.entryPoint ?: "" + } -internal fun TitusClusterSpec.resolveCapacityGroup(region: String) = - overrides[region]?.capacityGroup ?: defaults.capacityGroup ?: moniker.app +internal fun TitusClusterSpec.resolveCapacityGroup(region: String? = null) = + when (region) { + null -> defaults.capacityGroup ?: moniker.app + else -> overrides[region]?.capacityGroup ?: defaults.capacityGroup ?: moniker.app + } -internal fun TitusClusterSpec.resolveConstraints(region: String) = - overrides[region]?.constraints ?: defaults.constraints ?: TitusServerGroup.Constraints() +internal fun TitusClusterSpec.resolveConstraints(region: String? = null) = + region?.let { overrides[it] }?.constraints ?: defaults.constraints ?: TitusServerGroup.Constraints() internal fun resolveContainerProvider(container: ContainerProvider): DigestProvider { if (container is DigestProvider) { @@ -78,20 +97,20 @@ internal fun resolveContainerProvider(container: ContainerProvider): DigestProvi } } -internal fun TitusClusterSpec.resolveMigrationPolicy(region: String) = - overrides[region]?.migrationPolicy ?: defaults.migrationPolicy - ?: TitusServerGroup.MigrationPolicy() +internal fun TitusClusterSpec.resolveMigrationPolicy(region: String? = null) = + region?.let { overrides[it] }?.migrationPolicy ?: defaults.migrationPolicy ?: TitusServerGroup.MigrationPolicy() -internal fun TitusClusterSpec.resolveDependencies(region: String): ClusterDependencies = +internal fun TitusClusterSpec.resolveDependencies(region: String? = null): ClusterDependencies = ClusterDependencies( - loadBalancerNames = defaults.dependencies?.loadBalancerNames + overrides[region]?.dependencies?.loadBalancerNames, - securityGroupNames = defaults.dependencies?.securityGroupNames + overrides[region]?.dependencies?.securityGroupNames, - targetGroups = defaults.dependencies?.targetGroups + overrides[region]?.dependencies?.targetGroups + loadBalancerNames = defaults.dependencies?.loadBalancerNames + (region?.let { overrides[it] }?.dependencies?.loadBalancerNames ?: emptySet()), + securityGroupNames = defaults.dependencies?.securityGroupNames + (region?.let { overrides[it] }?.dependencies?.securityGroupNames ?: emptySet()), + targetGroups = defaults.dependencies?.targetGroups + (region?.let { overrides[it] }?.dependencies?.targetGroups ?: emptySet()) ) -private fun TitusClusterSpec.resolveScaling(region: String) = +fun TitusClusterSpec.resolveScaling(region: String? = null) = // TODO: could be smarter here and merge policies from defaults and override - (overrides[region]?.scaling ?: defaults.scaling)?.run { + (region?.let { overrides[it] }?.scaling ?: defaults.scaling) + ?.run { // we set the warmup to ZERO as Titus doesn't use the warmup setting Scaling( targetTrackingPolicies = targetTrackingPolicies.map { it.copy(warmup = null, scaleOutCooldown = it.scaleOutCooldown ?: DEFAULT_AUTOSCALE_SCALE_OUT_COOLDOWN, scaleInCooldown = it.scaleInCooldown ?: DEFAULT_AUTOSCALE_SCALE_IN_COOLDOWN) }.toSet(), @@ -99,6 +118,10 @@ private fun TitusClusterSpec.resolveScaling(region: String) = ) } ?: Scaling() + +fun TitusClusterSpec.resolveTags(region: String? = null) = + defaults.tags + (region?.let { overrides[it] }?.tags ?: emptyMap()) + internal fun TitusClusterSpec.resolve(): Set = locations.regions.map { TitusServerGroup( @@ -118,7 +141,7 @@ internal fun TitusClusterSpec.resolve(): Set = containerAttributes = resolveContainerAttributes(it.name), migrationPolicy = resolveMigrationPolicy(it.name), resources = resolveResources(it.name), - tags = defaults.tags + overrides[it.name]?.tags, + tags = resolveTags(it.name), artifactName = artifactName, artifactVersion = artifactVersion, scaling = resolveScaling(it.name) diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/optics/titusOptics.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/optics/titusOptics.kt new file mode 100644 index 0000000000..47e2450762 --- /dev/null +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/optics/titusOptics.kt @@ -0,0 +1,58 @@ +package com.netflix.spinnaker.keel.titus.optics + +import arrow.optics.Lens +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.SimpleLocations +import com.netflix.spinnaker.keel.api.titus.TitusClusterSpec +import com.netflix.spinnaker.keel.api.titus.TitusServerGroupSpec +import com.netflix.spinnaker.keel.optics.monikerStackLens +import com.netflix.spinnaker.keel.optics.simpleLocationsAccountLens + +/** + * Lens for getting/setting [TitusClusterSpec.moniker]. + */ +val titusClusterSpecMonikerLens: Lens = Lens( + get = TitusClusterSpec::moniker, + set = { spec, moniker -> spec.copy(moniker = moniker) } +) + +/** + * Composed lens for getting/setting the [Moniker.stack] of a [TitusClusterSpec]. + */ +val titusClusterSpecStackLens = titusClusterSpecMonikerLens + monikerStackLens + +/** + * Lens for getting/setting [TitusClusterSpec.locations]. + */ +val titusClusterSpecLocationsLens: Lens = Lens( + get = TitusClusterSpec::locations, + set = { spec, locations -> spec.copy(locations = locations) } +) + +/** + * Lens for getting/setting [TitusClusterSpec.defaults]. + */ +val titusClusterSpecDefaultsLens: Lens = Lens( + get = TitusClusterSpec::defaults, + set = { spec, defaults -> spec.copy(_defaults = defaults) } +) + +/** + * Composed lens for getting/setting the [SimpleLocations.account] of a [TitusClusterSpec]. + */ +val titusClusterSpecAccountLens = + titusClusterSpecLocationsLens + simpleLocationsAccountLens + +/** + * Lens for getting/setting [TitusServerGroupSpec.containerAttributes]. + */ +val titusServerGroupSpecContainerAttributesLens: Lens> = Lens( + get = { it.containerAttributes ?: emptyMap() }, + set = { titusServerGroupSpec, containerAttributes -> titusServerGroupSpec.copy(containerAttributes = containerAttributes) } +) + +/** + * Composed lens for getting/setting the [TitusServerGroupSpec.containerAttributes] of a [TitusClusterSpec.moniker]. + */ +val titusClusterSpecContainerAttributesLens = + titusClusterSpecDefaultsLens + titusServerGroupSpecContainerAttributesLens diff --git a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/verification/TestContainerVerificationEvaluator.kt b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/verification/TestContainerVerificationEvaluator.kt index 544e41ee0c..db17042945 100644 --- a/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/verification/TestContainerVerificationEvaluator.kt +++ b/keel-titus-plugin/src/main/kotlin/com/netflix/spinnaker/keel/titus/verification/TestContainerVerificationEvaluator.kt @@ -52,7 +52,10 @@ class TestContainerVerificationEvaluator( verification: Verification, oldState: ActionState ): ActionState = - runBlocking { containerRunner.getNewState(oldState, linkStrategy) } + runBlocking { + log.debug("Getting new verification state for ${context.shortName()}") + containerRunner.getNewState(oldState, linkStrategy) + } override fun start(context: ArtifactInEnvironmentContext, verification: Verification): Map { require(verification is TestContainerVerification) { diff --git a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolverTests.kt b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolverTests.kt new file mode 100644 index 0000000000..6633b4f52f --- /dev/null +++ b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/InstanceMetadataServiceResolverTests.kt @@ -0,0 +1,97 @@ +package com.netflix.spinnaker.keel.titus + +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.Resource +import com.netflix.spinnaker.keel.api.SimpleLocations +import com.netflix.spinnaker.keel.api.SimpleRegionSpec +import com.netflix.spinnaker.keel.api.ec2.Capacity.DefaultCapacity +import com.netflix.spinnaker.keel.api.support.EventPublisher +import com.netflix.spinnaker.keel.api.titus.TITUS_CLUSTER_V1 +import com.netflix.spinnaker.keel.api.titus.TitusClusterSpec +import com.netflix.spinnaker.keel.api.titus.TitusServerGroup +import com.netflix.spinnaker.keel.api.titus.TitusServerGroup.Location +import com.netflix.spinnaker.keel.docker.DigestProvider +import com.netflix.spinnaker.keel.docker.ReferenceProvider +import com.netflix.spinnaker.keel.environments.DependentEnvironmentFinder +import com.netflix.spinnaker.keel.persistence.FeatureRolloutRepository +import com.netflix.spinnaker.keel.rollout.RolloutAwareResolverTests +import com.netflix.spinnaker.keel.titus.optics.titusClusterSpecAccountLens +import com.netflix.spinnaker.keel.titus.optics.titusClusterSpecStackLens +import strikt.api.Assertion +import strikt.assertions.get +import strikt.assertions.hasEntry +import strikt.assertions.isNotEqualTo +import strikt.assertions.isNotNull +import java.util.UUID + +internal class InstanceMetadataServiceResolverTests : + RolloutAwareResolverTests, InstanceMetadataServiceResolver>() { + + override fun createResolver( + dependentEnvironmentFinder: DependentEnvironmentFinder, + resourceToCurrentState: suspend (Resource) -> Map, + featureRolloutRepository: FeatureRolloutRepository, + eventPublisher: EventPublisher + ) = InstanceMetadataServiceResolver( + dependentEnvironmentFinder, + resourceToCurrentState, + featureRolloutRepository, + eventPublisher + ) + + override val kind = TITUS_CLUSTER_V1.kind + + override val spec = TitusClusterSpec( + moniker = Moniker( + app = "fnord" + ), + locations = SimpleLocations( + account = "prod", + regions = setOf( + SimpleRegionSpec(name = "us-west-2") + ) + ), + container = ReferenceProvider(reference = "fnord-docker") + ) + + override val previousEnvironmentSpec = + titusClusterSpecAccountLens.set(titusClusterSpecStackLens.set(spec, "test"), "test") + + override val nonExistentResolvedResource = emptyMap() + + override fun TitusClusterSpec.withFeatureApplied() = + titusClusterSpecImdsRequireTokenLens.set(this, "true") + + override fun TitusClusterSpec.withFeatureNotApplied() = + titusClusterSpecImdsRequireTokenLens.set(this, "false") + + override fun TitusClusterSpec.toResolvedType(featureActive: Boolean) = + locations.regions.map(SimpleRegionSpec::name).associateWith { region -> + TitusServerGroup( + name = "${moniker}-v001", + location = Location( + account = locations.account, + region = region + ), + capacity = DefaultCapacity(1, 1, 1), + capacityGroup = "capacityGroup", + container = DigestProvider("fnord", "docker", UUID.randomUUID().toString()), + id = UUID.randomUUID().toString(), + containerAttributes = if (featureActive) mapOf("titusParameter.agent.imds.requireToken" to "true") else emptyMap() + ) + } + + override fun Assertion.Builder>.featureIsApplied() = + apply { + get { spec.defaults.containerAttributes } + .isNotNull() + .hasEntry("titusParameter.agent.imds.requireToken", "true") + } + + override fun Assertion.Builder>.featureIsNotApplied(): Assertion.Builder> = + apply { + get { spec.defaults.containerAttributes } + .isNotNull() + .get("titusParameter.agent.imds.requireToken") isNotEqualTo "true" // it can be false or it can be not present + } +} diff --git a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusBaseClusterHandlerTests.kt b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusBaseClusterHandlerTests.kt index 5b227f8438..61e83db00b 100644 --- a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusBaseClusterHandlerTests.kt +++ b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusBaseClusterHandlerTests.kt @@ -1,10 +1,15 @@ package com.netflix.spinnaker.keel.titus.resource +import com.netflix.spinnaker.keel.api.Highlander +import com.netflix.spinnaker.keel.api.ManagedRolloutConfig import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.RedBlack import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.ResourceDiff +import com.netflix.spinnaker.keel.api.SelectionStrategy import com.netflix.spinnaker.keel.api.SimpleLocations import com.netflix.spinnaker.keel.api.SimpleRegionSpec +import com.netflix.spinnaker.keel.api.StaggeredRegion import com.netflix.spinnaker.keel.api.actuation.TaskLauncher import com.netflix.spinnaker.keel.api.ec2.Capacity import com.netflix.spinnaker.keel.api.ec2.ClusterSpec @@ -17,6 +22,9 @@ import com.netflix.spinnaker.keel.api.titus.TitusServerGroup import com.netflix.spinnaker.keel.api.titus.TitusServerGroupSpec import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.clouddriver.CloudDriverService +import com.netflix.spinnaker.keel.clouddriver.model.Credential +import com.netflix.spinnaker.keel.clouddriver.model.DockerImage +import com.netflix.spinnaker.keel.core.serverGroup import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.docker.DigestProvider import com.netflix.spinnaker.keel.orca.ClusterExportHelper @@ -24,15 +32,30 @@ import com.netflix.spinnaker.keel.orca.OrcaService import com.netflix.spinnaker.keel.titus.TitusClusterHandler import com.netflix.spinnaker.keel.titus.byRegion import com.netflix.spinnaker.keel.titus.resolve +import io.mockk.coEvery +import io.mockk.every import io.mockk.mockk import io.mockk.spyk import java.time.Clock +import java.time.Duration class TitusBaseClusterHandlerTests : BaseClusterHandlerTests() { - private val cloudDriverService: CloudDriverService = mockk() - private val cloudDriverCache: CloudDriverCache = mockk() + val cloudDriverService: CloudDriverService = mockk(relaxed = true) { + coEvery { findDockerImages(any(),any(),any()) } returns listOf( + DockerImage( + account = "account", repository = "repo", tag = "butter", digest = "1234567890" + ) + ) + } + val cloudDriverCache: CloudDriverCache = mockk() { + every { credentialBy("account") } returns Credential( + name = "account", + type = "titus", + environment = "testenv", + attributes = mutableMapOf("awsAccount" to "awsAccount", "registry" to "testregistry") + ) + } private val orcaService: OrcaService = mockk() - private val taskLauncher: TaskLauncher = mockk() private val clusterExportHelper: ClusterExportHelper = mockk() val metadata = mapOf("id" to "1234", "application" to "waffles", "serviceAccount" to "me@you.com" ) @@ -43,16 +66,23 @@ class TitusBaseClusterHandlerTests : BaseClusterHandlerTests): List = resource.spec.locations.regions.map { it.name }.toList() - override fun createSpyHandler(resolvers: List>, clock: Clock, eventPublisher: EventPublisher): TitusClusterHandler = + override fun createSpyHandler( + resolvers: List>, + clock: Clock, + eventPublisher: EventPublisher, + taskLauncher: TaskLauncher, + ): TitusClusterHandler = spyk(TitusClusterHandler( cloudDriverService = cloudDriverService, cloudDriverCache = cloudDriverCache, @@ -76,7 +106,29 @@ class TitusBaseClusterHandlerTests : BaseClusterHandlerTests { + val spec = baseSpec.copy( + locations = SimpleLocations( + account = "account", + regions = setOf(SimpleRegionSpec("east"), SimpleRegionSpec("west")) + ), + deployWith = RedBlack( + stagger = listOf( + StaggeredRegion( + region = "east", + pauseTime = Duration.ofMinutes(1) + ) + ) ) ) return Resource( @@ -86,22 +138,103 @@ class TitusBaseClusterHandlerTests : BaseClusterHandlerTests> { - val currentServerGroups = getSingleRegionCluster().spec.resolve() + override fun getMultiRegionManagedRolloutCluster(): Resource { + val spec = baseSpec.copy( + locations = SimpleLocations( + account = "account", + regions = setOf(SimpleRegionSpec("east"), SimpleRegionSpec("west")) + ), + managedRollout = ManagedRolloutConfig(enabled = true, selectionStrategy = SelectionStrategy.ALPHABETICAL) + ) + return Resource( + kind = TITUS_CLUSTER_V1.kind, + metadata = metadata, + spec = spec + ) + } + + override fun getDiffInMoreThanEnabled(resource: Resource): ResourceDiff> { + val currentServerGroups = resource.spec.resolve() .byRegion() - val desiredServerGroups = getSingleRegionCluster().spec.resolve() + val desiredServerGroups = resource.spec.resolve() .map { it.withDoubleCapacity().withManyEnabled() }.byRegion() return DefaultResourceDiff(desiredServerGroups, currentServerGroups) } - override fun getDiffOnlyInEnabled(): ResourceDiff> { - val currentServerGroups = getSingleRegionCluster().spec.resolve() + override fun getDiffOnlyInEnabled(resource: Resource): ResourceDiff> { + val currentServerGroups = resource.spec.resolve() .byRegion() - val desiredServerGroups = getSingleRegionCluster().spec.resolve() + val desiredServerGroups = resource.spec.resolve() .map { it.withManyEnabled() }.byRegion() return DefaultResourceDiff(desiredServerGroups, currentServerGroups) } + override fun getDiffInCapacity(resource: Resource): ResourceDiff> { + val current = resource.spec.resolve().byRegion() + val desired = resource.spec.resolve().map { it.withDoubleCapacity() }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getDiffInImage(resource: Resource, version: String?): ResourceDiff> { + val current = resource.spec.resolve().byRegion() + val desired = resource.spec.resolve().map { it.withADifferentImage(version ?: "1255555555555555") }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getCreateAndModifyDiff(resource: Resource): ResourceDiff> { + val current = resource.spec.resolve().byRegion() + val desired = resource.spec.resolve().map { + when(it.location.region) { + "east" -> it.withADifferentImage("1255555555555555") + else -> it.withDoubleCapacity() + } + }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getDiffForRollback( + resource: Resource, + version: String, + currentMoniker: Moniker + ): ResourceDiff> { + val current = resource.spec.resolve().map { it.withMoniker(currentMoniker) }.byRegion() + val desired = resource.spec.resolve().map { it.withADifferentImage(version) }.byRegion() + return DefaultResourceDiff(desired, current) + } + + override fun getDiffForRollbackPlusCapacity( + resource: Resource, + version: String, + currentMoniker: Moniker + ): ResourceDiff> { + val current = resource.spec.resolve().map { it.withMoniker(currentMoniker) }.byRegion() + val desired = resource.spec.resolve().map { it.withADifferentImage(version).withZeroCapacity() }.byRegion() + return DefaultResourceDiff(desired, current) + } + + // this needs to return server groups with an actual server group moniker + override fun getRollbackServerGroupsByRegion( + resource: Resource, + version: String, + rollbackMoniker: Moniker + ): Map> = + resource + .spec + .resolve() + .map { it.withADifferentImage(version).withMoniker(rollbackMoniker) } + .associate { it.location.region to listOf(it) } + + override fun getRollbackServerGroupsByRegionZeroCapacity( + resource: Resource, + version: String, + rollbackMoniker: Moniker + ): Map> = + resource + .spec + .resolve() + .map { it.withADifferentImage(version).withMoniker(rollbackMoniker).withZeroCapacity() } + .associate { it.location.region to listOf(it) } + private fun TitusServerGroup.withDoubleCapacity(): TitusServerGroup = copy( capacity = Capacity.DefaultCapacity( @@ -111,8 +244,23 @@ class TitusBaseClusterHandlerTests : BaseClusterHandlerTests() - val cloudDriverCache = mockk() + val cloudDriverCache = mockk() { + every { credentialBy(titusAccount) } returns titusAccountCredential + } val orcaService = mockk() val resolvers = emptyList>() val repository = mockk() @@ -83,9 +96,6 @@ internal class TitusClusterExportTests : JUnit5Minutests { val sg1East = SecurityGroupSummary("keel", "sg-279585936", "vpc-1") val sg2East = SecurityGroupSummary("keel-elb", "sg-610264122", "vpc-1") - val titusAccount = "titustest" - val awsAccount = "test" - val container = DigestProvider( organization = "spinnaker", image = "keel", @@ -183,10 +193,7 @@ internal class TitusClusterExportTests : JUnit5Minutests { every { securityGroupByName(awsAccount, "us-east-1", sg1East.name) } returns sg1East every { securityGroupByName(awsAccount, "us-east-1", sg2East.name) } returns sg2East - coEvery { cloudDriverService.getAccountInformation(titusAccount) } returns mapOf( - "awsAccount" to awsAccount, - "registry" to awsAccount + "registry" - ) + every { cloudDriverCache.credentialBy(titusAccount) } returns titusAccountCredential } coEvery { orcaService.orchestrate(resource.serviceAccount, any()) } returns TaskRefResponse("/tasks/${UUID.randomUUID()}") every { repository.environmentFor(any()) } returns Environment("test") @@ -209,7 +216,7 @@ internal class TitusClusterExportTests : JUnit5Minutests { coEvery { cloudDriverService.titusActiveServerGroup(any(), "us-east-1") } returns activeServerGroupResponseEast coEvery { cloudDriverService.titusActiveServerGroup(any(), "us-west-2") } returns activeServerGroupResponseWest coEvery { cloudDriverService.findDockerImages("testregistry", (spec.container as DigestProvider).repository()) } returns images - coEvery { cloudDriverService.getAccountInformation(titusAccount) } returns mapOf("registry" to "testregistry") + every { cloudDriverCache.credentialBy(titusAccount) } returns titusAccountCredential } context("exported titus cluster spec") { @@ -323,7 +330,7 @@ internal class TitusClusterExportTests : JUnit5Minutests { .withDoubleCapacity() coEvery { cloudDriverService.findDockerImages("testregistry", container.repository()) } returns images - coEvery { cloudDriverService.getAccountInformation(titusAccount) } returns mapOf("registry" to "testregistry") + every { cloudDriverCache.credentialBy(titusAccount) } returns titusAccountCredential } context("exported titus cluster spec") { diff --git a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterHandlerTests.kt b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterHandlerTests.kt index 08e9cb8de9..b3463e5f17 100644 --- a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterHandlerTests.kt +++ b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterHandlerTests.kt @@ -43,9 +43,11 @@ import com.netflix.spinnaker.keel.api.titus.TitusServerGroup import com.netflix.spinnaker.keel.api.titus.TitusServerGroupSpec import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.clouddriver.CloudDriverService +import com.netflix.spinnaker.keel.clouddriver.model.Credential import com.netflix.spinnaker.keel.clouddriver.model.DockerImage import com.netflix.spinnaker.keel.clouddriver.model.SecurityGroupSummary import com.netflix.spinnaker.keel.clouddriver.model.ServerGroupCollection +import com.netflix.spinnaker.keel.core.orcaClusterMoniker import com.netflix.spinnaker.keel.diff.DefaultResourceDiff import com.netflix.spinnaker.keel.docker.DigestProvider import com.netflix.spinnaker.keel.events.ResourceHealthEvent @@ -94,8 +96,21 @@ import io.mockk.coVerify as verify // todo eb: we could probably have generic cluster tests // where you provide the correct info for the spec and active server groups class TitusClusterHandlerTests : JUnit5Minutests { - val cloudDriverService = mockk() - val cloudDriverCache = mockk() + val titusAccount = "titustest" + val awsAccount = "test" + val titusAccountCredential = Credential( + name = titusAccount, + type = "titus", + environment = "testenv", + attributes = mutableMapOf("awsAccount" to awsAccount, "registry" to awsAccount + "registry") + ) + + val cloudDriverService = mockk() { + every { listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) + } + val cloudDriverCache = mockk() { + every { credentialBy(titusAccount) } returns titusAccountCredential + } val orcaService = mockk() val resolvers = emptyList>() val repository = mockk() @@ -116,9 +131,6 @@ class TitusClusterHandlerTests : JUnit5Minutests { val sg1East = SecurityGroupSummary("keel", "sg-279585936", "vpc-1") val sg2East = SecurityGroupSummary("keel-elb", "sg-610264122", "vpc-1") - val titusAccount = "titustest" - val awsAccount = "test" - val digestProvider = DigestProvider( organization = "spinnaker", image = "keel", @@ -226,11 +238,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { every { securityGroupById(awsAccount, "us-east-1", sg2East.id) } returns sg2East every { securityGroupByName(awsAccount, "us-east-1", sg1East.name) } returns sg1East every { securityGroupByName(awsAccount, "us-east-1", sg2East.name) } returns sg2East - - every { cloudDriverService.getAccountInformation(titusAccount) } returns mapOf( - "awsAccount" to awsAccount, - "registry" to awsAccount + "registry" - ) + every { cloudDriverCache.credentialBy(titusAccount) } returns titusAccountCredential } every { repository.environmentFor(any()) } returns Environment("test") every { @@ -428,8 +436,14 @@ class TitusClusterHandlerTests : JUnit5Minutests { } context("a diff has been detected") { - context("the diff is only in capacity") { + before { + every { cloudDriverService.findDockerImages("testregistry", "spinnaker/keel", any(), any(), any()) } returns + listOf( + DockerImage("testregistry", "spinnaker/keel", "master-h2.blah", "sha:1111") + ) + } + context("the diff is only in capacity") { val modified = setOf( serverGroupEast.copy(name = activeServerGroupResponseEast.name), serverGroupWest.copy(name = activeServerGroupResponseWest.name).withDoubleCapacity() @@ -442,6 +456,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("resolving diff resizes the current server group") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID())} + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) runBlocking { upsert(resource, diff) @@ -523,6 +538,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("resolving diff clones the current server group by tag") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) runBlocking { upsert(resource, diff) @@ -556,6 +572,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("events are fired for the artifact deploying") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) runBlocking { upsert(resource, diff) @@ -568,6 +585,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("resolving diff clones the current server group by digest") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) runBlocking { upsert(resource, diff) @@ -590,6 +608,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("the default deploy strategy is used") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) val deployWith = RedBlack() runBlocking { @@ -609,6 +628,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("the deploy strategy is configured") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) val deployWith = RedBlack( resizePreviousToZero = true, @@ -633,6 +653,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { test("a different deploy strategy is used") { val slot = slot() every { orcaService.orchestrate(resource.serviceAccount, capture(slot)) } answers { TaskRefResponse(ULID().nextULID()) } + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) runBlocking { upsert(resource.copy(spec = resource.spec.copy(deployWith = Highlander())), diff) @@ -653,6 +674,7 @@ class TitusClusterHandlerTests : JUnit5Minutests { before { every { cloudDriverService.findDockerImages("testregistry", "spinnaker/keel", any(), any(), any()) } returns listOf(DockerImage("testregistry", "spinnaker/keel", "master-h2.blah", "sha:1111")) + every { cloudDriverService.listTitusServerGroups(any(), any(), any(), any(), any()) } returns ServerGroupCollection(titusAccount, emptySet()) } val modified = setOf( serverGroupEast.copy(name = activeServerGroupResponseEast.name).withDifferentRuntimeOptions(), @@ -732,13 +754,13 @@ class TitusClusterHandlerTests : JUnit5Minutests { mapOf( "type" to "destroyServerGroup", "asgName" to it.name, - "moniker" to it.moniker, + "moniker" to it.moniker.orcaClusterMoniker, "serverGroupName" to it.name, "region" to it.region, "credentials" to allServerGroups.accountName, "cloudProvider" to "titus", "user" to resource.serviceAccount, - "completeOtherBranchesThenFail" to false, + "completeOtherBranchesThenFail" to true, "continuePipeline" to false, "failPipeline" to false, ) diff --git a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterScalingPolicyTests.kt b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterScalingPolicyTests.kt index f12bfb4b23..b90d3f3921 100644 --- a/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterScalingPolicyTests.kt +++ b/keel-titus-plugin/src/test/kotlin/com/netflix/spinnaker/keel/titus/resource/TitusClusterScalingPolicyTests.kt @@ -16,9 +16,11 @@ import com.netflix.spinnaker.keel.api.titus.TITUS_CLOUD_PROVIDER import com.netflix.spinnaker.keel.api.titus.TITUS_CLUSTER_V1 import com.netflix.spinnaker.keel.api.titus.TitusClusterSpec import com.netflix.spinnaker.keel.api.titus.TitusScalingSpec +import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.clouddriver.CloudDriverService import com.netflix.spinnaker.keel.clouddriver.model.Capacity import com.netflix.spinnaker.keel.clouddriver.model.Constraints +import com.netflix.spinnaker.keel.clouddriver.model.Credential import com.netflix.spinnaker.keel.clouddriver.model.CustomizedMetricSpecificationModel import com.netflix.spinnaker.keel.clouddriver.model.DockerImage import com.netflix.spinnaker.keel.clouddriver.model.InstanceCounts @@ -164,11 +166,6 @@ class TitusClusterScalingPolicyTests { ) val cloudDriverService = mockk() { - every { getAccountInformation(account, any()) } returns mapOf( - "awsAccount" to "test", - "registry" to "testregistry" - ) - every { findDockerImages("testregistry", "fnord/fnord", any(), any(), any(), any()) } returns listOf( DockerImage( account = "testregistry", @@ -179,6 +176,15 @@ class TitusClusterScalingPolicyTests { ) } + val cloudDriverCache: CloudDriverCache = mockk { + every { credentialBy(account) } returns Credential( + name = account, + type = "titus", + environment = "testenv", + attributes = mutableMapOf("awsAccount" to "test", "registry" to "testregistry") + ) + } + fun CloudDriverService.stubActiveServerGroup(serverGroup: TitusActiveServerGroup) { every { titusActiveServerGroup( @@ -208,7 +214,7 @@ class TitusClusterScalingPolicyTests { val stages = slot>() val taskLauncher = mockk() { every { - submitJob(any(), any(), any(), capture(stages)) + submitJob(any(), any(), any(), capture(stages), any()) } answers { Task(randomUID().toString(), arg(1)) } @@ -216,7 +222,7 @@ class TitusClusterScalingPolicyTests { val handler = TitusClusterHandler( cloudDriverService = cloudDriverService, - cloudDriverCache = mockk(), + cloudDriverCache = cloudDriverCache, orcaService = mockk(), clock = Clock.systemDefaultZone(), taskLauncher = taskLauncher, diff --git a/keel-web/keel-web.gradle b/keel-web/keel-web.gradle index d3f715d9e4..a674fe9cf2 100644 --- a/keel-web/keel-web.gradle +++ b/keel-web/keel-web.gradle @@ -26,6 +26,7 @@ dependencies { api(project(":keel-lemur")) api(project(":keel-notifications")) api(project(":keel-front50")) + api(project(":keel-orca")) implementation(project(":keel-bakery-plugin")) implementation(project(":keel-ec2-plugin")) @@ -47,8 +48,11 @@ dependencies { implementation("io.spinnaker.kork:kork-plugins") implementation("com.slack.api:bolt-servlet:1.6.0") implementation("com.graphql-java:graphql-java-extended-scalars:16.0.1") - implementation(platform('com.netflix.graphql.dgs:graphql-dgs-platform-dependencies:4.3.2')) + + // DGS dependencies + implementation(platform('com.netflix.graphql.dgs:graphql-dgs-platform-dependencies:4.9.0')) implementation("com.netflix.graphql.dgs:graphql-dgs-spring-boot-starter") + implementation 'com.fasterxml.jackson.datatype:jackson-datatype-joda' runtimeOnly("io.spinnaker.kork:kork-runtime") runtimeOnly("io.springfox:springfox-boot-starter:3.0.0") diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/config/KeelConfigurationFinalizer.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/config/KeelConfigurationFinalizer.kt index 746ee1220c..8a1dda9096 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/config/KeelConfigurationFinalizer.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/config/KeelConfigurationFinalizer.kt @@ -11,7 +11,6 @@ import com.netflix.spinnaker.keel.api.ResourceSpec import com.netflix.spinnaker.keel.api.RollingPush import com.netflix.spinnaker.keel.api.constraints.StatefulConstraintEvaluator import com.netflix.spinnaker.keel.api.constraints.StatelessConstraintEvaluator -import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec import com.netflix.spinnaker.keel.api.ec2.ApplicationLoadBalancerSpec.Action import com.netflix.spinnaker.keel.api.plugins.ArtifactSupplier import com.netflix.spinnaker.keel.api.plugins.ConstraintEvaluator diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/services/AdminService.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/admin/AdminService.kt similarity index 97% rename from keel-web/src/main/kotlin/com/netflix/spinnaker/keel/services/AdminService.kt rename to keel-web/src/main/kotlin/com/netflix/spinnaker/keel/admin/AdminService.kt index efeb6ae963..6597d605ee 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/services/AdminService.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/admin/AdminService.kt @@ -1,9 +1,9 @@ -package com.netflix.spinnaker.keel.services +package com.netflix.spinnaker.keel.admin import com.netflix.spinnaker.keel.api.ArtifactInEnvironmentContext import com.netflix.spinnaker.keel.api.StatefulConstraint -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummary -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummaryService +import com.netflix.spinnaker.keel.actuation.ExecutionSummary +import com.netflix.spinnaker.keel.actuation.ExecutionSummaryService import com.netflix.spinnaker.keel.api.constraints.ConstraintStatus.OVERRIDE_FAIL import com.netflix.spinnaker.keel.api.plugins.ArtifactSupplier import com.netflix.spinnaker.keel.api.plugins.supporting @@ -67,7 +67,7 @@ class AdminService( * Removes the stored state we have for any stateful constraints for an environment * so they will evaluate again */ - fun forceConstraintReevaluation(application: String, environment: String, type: String? = null) { + fun forceConstraintReevaluation(application: String, environment: String, reference: String, version: String, type: String? = null) { log.info("[app=$application, env=$environment] Forcing reevaluation of stateful constraints.") if (type != null) { log.info("[app=$application, env=$environment] Forcing only type $type") @@ -80,7 +80,7 @@ class AdminService( if (constraint is StatefulConstraint) { if (type == null || type == constraint.type) { log.info("[app=$application, env=$environment] Deleting constraint state for ${constraint.type}.") - repository.deleteConstraintState(deliveryConfig.name, environment, constraint.type) + repository.deleteConstraintState(deliveryConfig.name, environment, reference, version, constraint.type) } } } diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationContext.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationContext.kt index ea939c3476..c43e753fa7 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationContext.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationContext.kt @@ -23,7 +23,9 @@ class ApplicationContext() { var allVersions: Map> = emptyMap() fun getArtifactVersions(deliveryArtifact: DeliveryArtifact, environmentName: String): List? { - return allVersions[ArtifactAndEnvironment(artifact = deliveryArtifact, environmentName = environmentName)] + return if (allVersions.isNotEmpty()) { + allVersions[ArtifactAndEnvironment(artifact = deliveryArtifact, environmentName = environmentName)] + } else null } fun getConfig(): DeliveryConfig { diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationFetcher.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationFetcher.kt index 409901b10b..0c18f40824 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationFetcher.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ApplicationFetcher.kt @@ -8,7 +8,7 @@ import com.netflix.graphql.dgs.context.DgsContext import com.netflix.graphql.dgs.exceptions.DgsEntityNotFoundException import com.netflix.spinnaker.keel.api.Environment import com.netflix.spinnaker.keel.api.action.ActionType -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummaryService +import com.netflix.spinnaker.keel.actuation.ExecutionSummaryService import com.netflix.spinnaker.keel.artifacts.ArtifactVersionLinks import com.netflix.spinnaker.keel.auth.AuthorizationSupport import com.netflix.spinnaker.keel.core.api.DependsOnConstraint @@ -24,7 +24,6 @@ import com.netflix.spinnaker.keel.graphql.types.MdComparisonLinks import com.netflix.spinnaker.keel.graphql.types.MdConstraint import com.netflix.spinnaker.keel.graphql.types.MdEnvironment import com.netflix.spinnaker.keel.graphql.types.MdEnvironmentState -import com.netflix.spinnaker.keel.graphql.types.MdExecutionSummary import com.netflix.spinnaker.keel.graphql.types.MdGitMetadata import com.netflix.spinnaker.keel.graphql.types.MdLifecycleStep import com.netflix.spinnaker.keel.graphql.types.MdNotification @@ -32,15 +31,12 @@ import com.netflix.spinnaker.keel.graphql.types.MdPackageDiff import com.netflix.spinnaker.keel.graphql.types.MdPausedInfo import com.netflix.spinnaker.keel.graphql.types.MdPinnedVersion import com.netflix.spinnaker.keel.graphql.types.MdPullRequest -import com.netflix.spinnaker.keel.graphql.types.MdResource -import com.netflix.spinnaker.keel.graphql.types.MdResourceActuationState -import com.netflix.spinnaker.keel.graphql.types.MdResourceActuationStatus -import com.netflix.spinnaker.keel.graphql.types.MdResourceTask import com.netflix.spinnaker.keel.graphql.types.MdVersionVeto import com.netflix.spinnaker.keel.pause.ActuationPauser import com.netflix.spinnaker.keel.persistence.DismissibleNotificationRepository import com.netflix.spinnaker.keel.persistence.KeelRepository import com.netflix.spinnaker.keel.persistence.NoDeliveryConfigForApplication +import com.netflix.spinnaker.keel.persistence.TaskTrackingRepository import com.netflix.spinnaker.keel.scm.ScmUtils import com.netflix.spinnaker.keel.services.ResourceStatusService import graphql.execution.DataFetcherResult @@ -59,16 +55,17 @@ import java.util.concurrent.CompletableFuture class ApplicationFetcher( private val authorizationSupport: AuthorizationSupport, private val keelRepository: KeelRepository, - private val resourceStatusService: ResourceStatusService, private val actuationPauser: ActuationPauser, private val artifactVersionLinks: ArtifactVersionLinks, private val applicationFetcherSupport: ApplicationFetcherSupport, private val notificationRepository: DismissibleNotificationRepository, private val scmUtils: ScmUtils, - private val executionSummaryService: ExecutionSummaryService ) { - @DgsData(parentType = DgsConstants.QUERY.TYPE_NAME, field = DgsConstants.QUERY.Application) + @DgsData.List( + DgsData(parentType = DgsConstants.QUERY.TYPE_NAME, field = DgsConstants.QUERY.Application), + DgsData(parentType = DgsConstants.QUERY.TYPE_NAME, field = DgsConstants.QUERY.Md_application) + ) @PreAuthorize("""@authorizationSupport.hasApplicationPermission('READ', 'APPLICATION', #appName)""") fun application(dfe: DataFetchingEnvironment, @InputArgument("appName") appName: String): MdApplication { val config = try { @@ -86,7 +83,10 @@ class ApplicationFetcher( ) } - @DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.Environments) + @DgsData.List( + DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.Environments), + DgsData(parentType = DgsConstants.MD_APPLICATION.TYPE_NAME, field = DgsConstants.MD_APPLICATION.Environments) + ) fun environments(dfe: DgsDataFetchingEnvironment): List> { val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) return config.environments.sortedWith { env1, env2 -> @@ -123,7 +123,10 @@ class ApplicationFetcher( } } - @DgsData(parentType = DgsConstants.MDENVIRONMENT.TYPE_NAME, field = DgsConstants.MDENVIRONMENT.GitMetadata) + @DgsData.List( + DgsData(parentType = DgsConstants.MDENVIRONMENT.TYPE_NAME, field = DgsConstants.MDENVIRONMENT.GitMetadata), + DgsData(parentType = DgsConstants.MD_ENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ENVIRONMENT.GitMetadata) + ) fun environmentGitMetadata(dfe: DgsDataFetchingEnvironment): MdGitMetadata? { val env: Environment = dfe.getLocalContext() return if (env.isPreview) { @@ -145,40 +148,28 @@ class ApplicationFetcher( } } - @DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.IsPaused) + @DgsData.List( + DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.IsPaused), + DgsData(parentType = DgsConstants.MD_APPLICATION.TYPE_NAME, field = DgsConstants.MD_APPLICATION.IsPaused), + ) fun isPaused(dfe: DgsDataFetchingEnvironment): Boolean { val app: MdApplication = dfe.getSource() return actuationPauser.applicationIsPaused(app.name) } - @DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.PausedInfo) + @DgsData.List( + DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.PausedInfo), + DgsData(parentType = DgsConstants.MD_APPLICATION.TYPE_NAME, field = DgsConstants.MD_APPLICATION.PausedInfo), + ) fun pausedInfo(dfe: DgsDataFetchingEnvironment): MdPausedInfo? { val app: MdApplication = dfe.getSource() return actuationPauser.getApplicationPauseInfo(app.name)?.toDgsPaused() } - @DgsData(parentType = DgsConstants.MDRESOURCE.TYPE_NAME, field = DgsConstants.MDRESOURCE.State) - fun resourceStatus(dfe: DgsDataFetchingEnvironment): MdResourceActuationState { - val resource: MdResource = dfe.getSource() - val state = resourceStatusService.getActuationState(resource.id) - return MdResourceActuationState( - status = MdResourceActuationStatus.valueOf(state.status.name), - reason = state.reason, - event = state.eventMessage, - tasks = state.tasks?.map { - MdResourceTask(id = it.id, name = it.name) - } - ) - } - - @DgsData(parentType = DgsConstants.MDRESOURCETASK.TYPE_NAME, field = DgsConstants.MDRESOURCETASK.Summary) - fun taskSummary(dfe: DgsDataFetchingEnvironment): MdExecutionSummary { - val task: MdResourceTask = dfe.getSource() - val summary = executionSummaryService.getSummary(task.id) - return summary.toDgs() - } - - @DgsData(parentType = DgsConstants.MDARTIFACT.TYPE_NAME, field = DgsConstants.MDARTIFACT.Versions) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACT.TYPE_NAME, field = DgsConstants.MDARTIFACT.Versions), + DgsData(parentType = DgsConstants.MD_ARTIFACT.TYPE_NAME, field = DgsConstants.MD_ARTIFACT.Versions), + ) fun versions( dfe: DataFetchingEnvironment, @InputArgument("statuses", collectionType = MdArtifactStatusInEnvironment::class) statuses: List?, @@ -213,7 +204,10 @@ class ApplicationFetcher( } } - @DgsData(parentType = DgsConstants.MDGITMETADATA.TYPE_NAME, field = DgsConstants.MDGITMETADATA.ComparisonLinks) + @DgsData.List( + DgsData(parentType = DgsConstants.MDGITMETADATA.TYPE_NAME, field = DgsConstants.MDGITMETADATA.ComparisonLinks), + DgsData(parentType = DgsConstants.MD_GITMETADATA.TYPE_NAME, field = DgsConstants.MD_GITMETADATA.ComparisonLinks), + ) fun comparisonLinks(dfe: DataFetchingEnvironment): MdComparisonLinks? { val diffContext = applicationFetcherSupport.getDiffContext(dfe) @@ -233,15 +227,24 @@ class ApplicationFetcher( } } - @DgsData( - parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, - field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.PackageDiff + @DgsData.List( + DgsData( + parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, + field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.PackageDiff + ), + DgsData( + parentType = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, + field = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.PackageDiff + ), ) fun packageDiff(dfe: DataFetchingEnvironment): MdPackageDiff? { return applicationFetcherSupport.getDebianPackageDiff(dfe) } - @DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.LifecycleSteps) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.LifecycleSteps), + DgsData(parentType = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.LifecycleSteps), + ) fun lifecycleSteps(dfe: DataFetchingEnvironment): CompletableFuture>? { val dataLoader: DataLoader> = dfe.getDataLoader(LifecycleEventsByVersionDataLoader.Descriptor.name) val artifact: MdArtifactVersionInEnvironment = dfe.getSource() @@ -255,7 +258,10 @@ class ApplicationFetcher( ) } - @DgsData(parentType = DgsConstants.MDARTIFACT.TYPE_NAME, field = DgsConstants.MDARTIFACT.PinnedVersion) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACT.TYPE_NAME, field = DgsConstants.MDARTIFACT.PinnedVersion), + DgsData(parentType = DgsConstants.MD_ARTIFACT.TYPE_NAME, field = DgsConstants.MD_ARTIFACT.PinnedVersion), + ) fun pinnedVersion(dfe: DataFetchingEnvironment): CompletableFuture? { val dataLoader: DataLoader = dfe.getDataLoader(PinnedVersionInEnvironmentDataLoader.Descriptor.name) val artifact: MdArtifact = dfe.getSource() @@ -267,7 +273,29 @@ class ApplicationFetcher( )) } - @DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Constraints) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACT.TYPE_NAME, field = DgsConstants.MDARTIFACT.LatestApprovedVersion), + DgsData(parentType = DgsConstants.MD_ARTIFACT.TYPE_NAME, field = DgsConstants.MD_ARTIFACT.LatestApprovedVersion), + ) + fun latestApprovedVersion(dfe: DataFetchingEnvironment): MdArtifactVersionInEnvironment? { + val artifact: MdArtifact = dfe.getSource() + val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) + val deliveryArtifact = config.matchingArtifactByReference(artifact.reference) ?: return null + + //[gyardeni + rhorev] please note - some values (like MdComparisonLinks) will not be retrieved for MdArtifactVersionInEnvironment + //due to our current dgs model. + keelRepository.getLatestApprovedInEnvArtifactVersion(config, deliveryArtifact, artifact.environment) + ?.let { + return it.toDgs(artifact.environment) + } + + return null + } + + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Constraints), + DgsData(parentType = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.Constraints), + ) fun artifactConstraints(dfe: DataFetchingEnvironment): CompletableFuture>? { val dataLoader: DataLoader> = dfe.getDataLoader(ConstraintsDataLoader.Descriptor.name) val artifact: MdArtifactVersionInEnvironment = dfe.getSource() @@ -282,7 +310,10 @@ class ApplicationFetcher( } } - @DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Verifications) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Verifications), + DgsData(parentType = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.Verifications), + ) fun artifactVerifications(dfe: DataFetchingEnvironment): CompletableFuture>? { val dataLoader: DataLoader> = dfe.getDataLoader(ActionsDataLoader.Descriptor.name) val artifact: MdArtifactVersionInEnvironment = dfe.getSource() @@ -298,7 +329,10 @@ class ApplicationFetcher( } } - @DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.PostDeploy) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.PostDeploy), + DgsData(parentType = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.PostDeploy), + ) fun artifactPostDeploy(dfe: DataFetchingEnvironment): CompletableFuture>? { val dataLoader: DataLoader> = dfe.getDataLoader(ActionsDataLoader.Descriptor.name) val artifact: MdArtifactVersionInEnvironment = dfe.getSource() @@ -314,16 +348,10 @@ class ApplicationFetcher( } } - @DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Resources) - fun artifactResources(dfe: DataFetchingEnvironment): List? { - val artifact: MdArtifactVersionInEnvironment = dfe.getSource() - val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) - return artifact.environment?.let { - config.resourcesUsing(artifact.reference, artifact.environment).map { it.toDgs(config, artifact.environment) } - } - } - - @DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Veto) + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MDARTIFACTVERSIONINENVIRONMENT.Veto), + DgsData(parentType = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ARTIFACTVERSIONINENVIRONMENT.Veto), + ) fun versionVetoed(dfe: DataFetchingEnvironment): CompletableFuture? { val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) val dataLoader: DataLoader = dfe.getDataLoader(VetoedDataLoader.Descriptor.name) @@ -342,14 +370,20 @@ class ApplicationFetcher( /** * Fetches the list of dismissible notifications for the application in context. */ - @DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.Notifications) + @DgsData.List( + DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.Notifications), + DgsData(parentType = DgsConstants.MD_APPLICATION.TYPE_NAME, field = DgsConstants.MD_APPLICATION.Notifications), + ) fun applicationNotifications(dfe: DataFetchingEnvironment): List? { val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) return notificationRepository.notificationHistory(config.application, true, setOf(WARNING, ERROR)) .map { it.toDgs() } } - @DgsData(parentType = DgsConstants.MDENVIRONMENT.TYPE_NAME, field = DgsConstants.MDENVIRONMENT.IsDeleting) + @DgsData.List( + DgsData(parentType = DgsConstants.MDENVIRONMENT.TYPE_NAME, field = DgsConstants.MDENVIRONMENT.IsDeleting), + DgsData(parentType = DgsConstants.MD_ENVIRONMENT.TYPE_NAME, field = DgsConstants.MD_ENVIRONMENT.IsDeleting), + ) fun environmentIsDeleting(dfe: DataFetchingEnvironment): CompletableFuture? { val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) val dataLoader: DataLoader = dfe.getDataLoader(EnvironmentDeletionStatusLoader.NAME) @@ -358,8 +392,6 @@ class ApplicationFetcher( } return environment?.let { dataLoader.load(environment) } } - -// add function for putting the resources on the artifactVersion } fun Environment.dependsOn(another: Environment) = diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ConfigFetcher.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ConfigFetcher.kt index 2b6aee68f4..8eb5dfdddb 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ConfigFetcher.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ConfigFetcher.kt @@ -18,18 +18,33 @@ class ConfigFetcher( private val deliveryConfigImporter: DeliveryConfigImporter ) { - @DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.Config) + @DgsData.List( + DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.Config), + DgsData(parentType = DgsConstants.MD_APPLICATION.TYPE_NAME, field = DgsConstants.MD_APPLICATION.Config), + ) fun config(dfe: DgsDataFetchingEnvironment): MdConfig { val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) return MdConfig( id = "${config.application}-${config.name}", updatedAt = config.updatedAt, rawConfig = config.rawConfig, - processedConfig = yamlMapper.writeValueAsString(config.copy(rawConfig = null)) + previewEnvironmentsConfigured = config.previewEnvironments.isNotEmpty() ) } - @DgsData(parentType = DgsConstants.MDCONFIG.TYPE_NAME, field = DgsConstants.MDCONFIG.RawConfig) + @DgsData.List( + DgsData(parentType = DgsConstants.MDCONFIG.TYPE_NAME, field = DgsConstants.MDCONFIG.ProcessedConfig), + DgsData(parentType = DgsConstants.MD_CONFIG.TYPE_NAME, field = DgsConstants.MD_CONFIG.ProcessedConfig), + ) + fun processedConfig(dfe: DgsDataFetchingEnvironment): String? { + val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) + return yamlMapper.writeValueAsString(config.copy(rawConfig = null)) + } + + @DgsData.List( + DgsData(parentType = DgsConstants.MDCONFIG.TYPE_NAME, field = DgsConstants.MDCONFIG.RawConfig), + DgsData(parentType = DgsConstants.MD_CONFIG.TYPE_NAME, field = DgsConstants.MD_CONFIG.RawConfig), + ) fun rawConfig(dfe: DgsDataFetchingEnvironment): String? { val rawConfig = dfe.getSource().rawConfig val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/GitIntegration.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/GitIntegration.kt index a4b49fbf55..6945322aeb 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/GitIntegration.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/GitIntegration.kt @@ -35,7 +35,10 @@ class GitIntegration( private val deliveryConfigUpserter: DeliveryConfigUpserter, private val importer: DeliveryConfigImporter, ) { - @DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.GitIntegration) + @DgsData.List( + DgsData(parentType = DgsConstants.MDAPPLICATION.TYPE_NAME, field = DgsConstants.MDAPPLICATION.GitIntegration), + DgsData(parentType = DgsConstants.MD_APPLICATION.TYPE_NAME, field = DgsConstants.MD_APPLICATION.GitIntegration), + ) fun gitIntegration(dfe: DgsDataFetchingEnvironment): MdGitIntegration { val app: MdApplication = dfe.getSource() val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) @@ -44,7 +47,10 @@ class GitIntegration( }.toGitIntegration() } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.UpdateGitIntegration) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.UpdateGitIntegration), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_updateGitIntegration), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -69,7 +75,10 @@ class GitIntegration( return updatedFront50App.toGitIntegration() } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.ImportDeliveryConfig) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.ImportDeliveryConfig), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_importDeliveryConfig), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #application)""" @@ -78,7 +87,7 @@ class GitIntegration( @InputArgument application: String, ): Boolean { val front50App = runBlocking { - front50Cache.applicationByName(application) + front50Cache.applicationByName(application, invalidateCache = true) } val defaultBranch = scmUtils.getDefaultBranch(front50App) val deliveryConfig = @@ -94,14 +103,19 @@ class GitIntegration( } private fun Application.toGitIntegration(): MdGitIntegration { - val branch = scmUtils.getDefaultBranch(this) - return MdGitIntegration( - id = "${name}-git-integration", - repository = "${repoProjectKey}/${repoSlug}", - branch = branch, - isEnabled = managedDelivery?.importDeliveryConfig, - manifestPath = managedDelivery?.manifestPath ?: DEFAULT_MANIFEST_PATH, - link = scmUtils.getBranchLink(repoType, repoProjectKey, repoSlug, branch), - ) + try { + scmUtils.getDefaultBranch(this) + } catch (e: Exception) { + throw DgsEntityNotFoundException("Unable to retrieve your app's git repo details. Please check the app config.") + }.let { branch -> + return MdGitIntegration( + id = "${name}-git-integration", + repository = "${repoProjectKey}/${repoSlug}", + branch = branch, + isEnabled = managedDelivery?.importDeliveryConfig, + manifestPath = managedDelivery?.manifestPath ?: DEFAULT_MANIFEST_PATH, + link = scmUtils.getBranchLink(repoType, repoProjectKey, repoSlug, branch), + ) + } } } diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/Mutations.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/Mutations.kt index fb19d4444d..5fa16f1cad 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/Mutations.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/Mutations.kt @@ -3,6 +3,7 @@ package com.netflix.spinnaker.keel.dgs import com.netflix.graphql.dgs.DgsComponent import com.netflix.graphql.dgs.DgsData import com.netflix.graphql.dgs.InputArgument +import com.netflix.graphql.dgs.exceptions.DgsEntityNotFoundException import com.netflix.spinnaker.keel.api.ArtifactInEnvironmentContext import com.netflix.spinnaker.keel.api.action.ActionType import com.netflix.spinnaker.keel.api.constraints.ConstraintStatus @@ -12,12 +13,14 @@ import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactPin import com.netflix.spinnaker.keel.core.api.EnvironmentArtifactVeto import com.netflix.spinnaker.keel.exceptions.InvalidConstraintException import com.netflix.spinnaker.keel.graphql.DgsConstants +import com.netflix.spinnaker.keel.graphql.types.MD_ConstraintStatusPayload import com.netflix.spinnaker.keel.graphql.types.MdAction import com.netflix.spinnaker.keel.graphql.types.MdArtifactVersionActionPayload import com.netflix.spinnaker.keel.graphql.types.MdConstraintStatus import com.netflix.spinnaker.keel.graphql.types.MdConstraintStatusPayload import com.netflix.spinnaker.keel.graphql.types.MdDismissNotificationPayload import com.netflix.spinnaker.keel.graphql.types.MdMarkArtifactVersionAsGoodPayload +import com.netflix.spinnaker.keel.graphql.types.MdRestartConstraintEvaluationPayload import com.netflix.spinnaker.keel.graphql.types.MdRetryArtifactActionPayload import com.netflix.spinnaker.keel.graphql.types.MdToggleResourceManagementPayload import com.netflix.spinnaker.keel.graphql.types.MdUnpinArtifactVersionPayload @@ -48,14 +51,28 @@ class Mutations( private val log by lazy { LoggerFactory.getLogger(Mutations::class.java) } } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = "recheckUnhappyResource") - fun recheckUnhappyResource( - @InputArgument resourceId: String - ) { - unhappyVeto.clearVeto(resourceId) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.RestartConstraintEvaluation), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_restartConstraintEvaluation), + ) + @PreAuthorize( + """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) + and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" + ) + fun restartConstraintEvaluation( + @InputArgument payload: MdRestartConstraintEvaluationPayload, + ): Boolean { + val config = deliveryConfigRepository.getByApplication(payload.application) + if (deliveryConfigRepository.getConstraintState(config.name, payload.environment, payload.version, payload.type, payload.reference) == null) { + throw DgsEntityNotFoundException("Constraint ${payload.type} not found for version ${payload.version} of ${payload.reference}") + } + return deliveryConfigRepository.deleteConstraintState(config.name, payload.environment, payload.reference, payload.version, payload.type) > 0 } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.UpdateConstraintStatus) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.UpdateConstraintStatus), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_updateConstraintStatus), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -77,7 +94,10 @@ class Mutations( } } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.ToggleManagement) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.ToggleManagement), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_toggleManagement) + ) @PreAuthorize("@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #application)") fun toggleManagement( @InputArgument application: String, @@ -93,7 +113,10 @@ class Mutations( return true } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.PinArtifactVersion) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.PinArtifactVersion), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_pinArtifactVersion), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -106,7 +129,10 @@ class Mutations( return true } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.UnpinArtifactVersion) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.UnpinArtifactVersion), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_unpinArtifactVersion), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -124,7 +150,10 @@ class Mutations( return true } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.MarkArtifactVersionAsBad) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.MarkArtifactVersionAsBad), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_markArtifactVersionAsBad), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -137,7 +166,10 @@ class Mutations( return true } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.MarkArtifactVersionAsGood) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.MarkArtifactVersionAsGood), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_markArtifactVersionAsGood) + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -155,7 +187,10 @@ class Mutations( return true } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.RetryArtifactVersionAction) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.RetryArtifactVersionAction), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_retryArtifactVersionAction), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -193,7 +228,10 @@ class Mutations( /** * Dismisses a notification, given it's ID. */ - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.DismissNotification) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.DismissNotification), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_dismissNotification), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'APPLICATION', #payload.application) and @authorizationSupport.hasServiceAccountAccess('APPLICATION', #payload.application)""" @@ -206,7 +244,10 @@ class Mutations( return notificationRepository.dismissNotificationById(payload.application, ULID.parseULID(payload.id), user) } - @DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.ToggleResourceManagement) + @DgsData.List( + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.ToggleResourceManagement), + DgsData(parentType = DgsConstants.MUTATION.TYPE_NAME, field = DgsConstants.MUTATION.Md_toggleResourceManagement), + ) @PreAuthorize( """@authorizationSupport.hasApplicationPermission('WRITE', 'RESOURCE', #payload.id) and @authorizationSupport.hasServiceAccountAccess('RESOURCE', #payload.id)""" diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceDetailsFetcher.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceDetailsFetcher.kt index dedc4f922f..d67162c412 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceDetailsFetcher.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceDetailsFetcher.kt @@ -18,7 +18,10 @@ class ResourceDetailsFetcher( /** * Returns the raw definition of the resource in scope in YAML format. This will include metadata added by Keel. */ - @DgsData(parentType = DgsConstants.MDRESOURCE.TYPE_NAME, field = DgsConstants.MDRESOURCE.RawDefinition) + @DgsData.List( + DgsData(parentType = DgsConstants.MDRESOURCE.TYPE_NAME, field = DgsConstants.MDRESOURCE.RawDefinition), + DgsData(parentType = DgsConstants.MD_RESOURCE.TYPE_NAME, field = DgsConstants.MD_RESOURCE.RawDefinition), + ) fun rawDefinition(dfe: DgsDataFetchingEnvironment): String? { val resource: MdResource = dfe.getSource() val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceFetcher.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceFetcher.kt new file mode 100644 index 0000000000..b0670ea1ea --- /dev/null +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/ResourceFetcher.kt @@ -0,0 +1,88 @@ +package com.netflix.spinnaker.keel.dgs + +import com.netflix.graphql.dgs.DgsComponent +import com.netflix.graphql.dgs.DgsData +import com.netflix.graphql.dgs.DgsDataFetchingEnvironment +import com.netflix.spinnaker.keel.actuation.ExecutionSummaryService +import com.netflix.spinnaker.keel.artifacts.ArtifactVersionLinks +import com.netflix.spinnaker.keel.auth.AuthorizationSupport +import com.netflix.spinnaker.keel.graphql.DgsConstants +import com.netflix.spinnaker.keel.graphql.types.MdArtifact +import com.netflix.spinnaker.keel.graphql.types.MdExecutionSummary +import com.netflix.spinnaker.keel.graphql.types.MdResource +import com.netflix.spinnaker.keel.graphql.types.MdResourceActuationState +import com.netflix.spinnaker.keel.graphql.types.MdResourceActuationStatus +import com.netflix.spinnaker.keel.graphql.types.MdResourceTask +import com.netflix.spinnaker.keel.pause.ActuationPauser +import com.netflix.spinnaker.keel.persistence.DismissibleNotificationRepository +import com.netflix.spinnaker.keel.persistence.KeelRepository +import com.netflix.spinnaker.keel.persistence.TaskTrackingRepository +import com.netflix.spinnaker.keel.scm.ScmUtils +import com.netflix.spinnaker.keel.services.ResourceStatusService +import graphql.schema.DataFetchingEnvironment + +/** + * Fetches details about resources, as defined in [schema.graphql] + */ +@DgsComponent +class ResourceFetcher( + private val authorizationSupport: AuthorizationSupport, + private val keelRepository: KeelRepository, + private val resourceStatusService: ResourceStatusService, + private val actuationPauser: ActuationPauser, + private val artifactVersionLinks: ArtifactVersionLinks, + private val applicationFetcherSupport: ApplicationFetcherSupport, + private val notificationRepository: DismissibleNotificationRepository, + private val scmUtils: ScmUtils, + private val executionSummaryService: ExecutionSummaryService, + private val taskTrackingRepository: TaskTrackingRepository +) { + @DgsData.List( + DgsData(parentType = DgsConstants.MDARTIFACT.TYPE_NAME, field = DgsConstants.MDARTIFACT.Resources), + DgsData(parentType = DgsConstants.MD_ARTIFACT.TYPE_NAME, field = DgsConstants.MD_ARTIFACT.Resources), + ) + fun artifactResources(dfe: DataFetchingEnvironment): List? { + val artifact: MdArtifact = dfe.getSource() + val config = applicationFetcherSupport.getDeliveryConfigFromContext(dfe) + return artifact.environment?.let { + config.resourcesUsing(artifact.reference, artifact.environment).map { it.toDgs(config, artifact.environment) } + } + } + + @DgsData.List( + DgsData(parentType = DgsConstants.MDRESOURCE.TYPE_NAME, field = DgsConstants.MDRESOURCE.State), + DgsData(parentType = DgsConstants.MD_RESOURCE.TYPE_NAME, field = DgsConstants.MD_RESOURCE.State), + ) + fun resourceStatus(dfe: DgsDataFetchingEnvironment): MdResourceActuationState { + val resource: MdResource = dfe.getSource() + val state = resourceStatusService.getActuationState(resource.id) + return MdResourceActuationState( + resourceId = resource.id, + status = MdResourceActuationStatus.valueOf(state.status.name), + reason = state.reason, + event = state.eventMessage + ) + } + + @DgsData.List( + DgsData(parentType = DgsConstants.MDRESOURCEACTUATIONSTATE.TYPE_NAME, field = DgsConstants.MDRESOURCEACTUATIONSTATE.Tasks), + DgsData(parentType = DgsConstants.MD_RESOURCEACTUATIONSTATE.TYPE_NAME, field = DgsConstants.MD_RESOURCEACTUATIONSTATE.Tasks), + ) + fun resourceTask(dfe: DgsDataFetchingEnvironment): List { + val resourcceState: MdResourceActuationState = dfe.getSource() + val tasks = taskTrackingRepository.getLatestBatchOfTasks(resourceId = resourcceState.resourceId) + return tasks.map { it.toDgs() } + } + + + @DgsData.List( + DgsData(parentType = DgsConstants.MDRESOURCETASK.TYPE_NAME, field = DgsConstants.MDRESOURCETASK.Summary), + DgsData(parentType = DgsConstants.MD_RESOURCETASK.TYPE_NAME, field = DgsConstants.MD_RESOURCETASK.Summary), + ) + fun taskSummary(dfe: DgsDataFetchingEnvironment): MdExecutionSummary { + val task: MdResourceTask = dfe.getSource() + val summary = executionSummaryService.getSummary(task.id) + return summary.toDgs() + } + +} diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/conversions.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/conversions.kt index 54152acd16..0b556f6dd9 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/conversions.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/dgs/conversions.kt @@ -8,21 +8,21 @@ import com.netflix.spinnaker.keel.api.Resource import com.netflix.spinnaker.keel.api.SimpleLocations import com.netflix.spinnaker.keel.api.SubnetAwareLocations import com.netflix.spinnaker.keel.api.TaskStatus -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummary -import com.netflix.spinnaker.keel.api.actuation.RolloutStatus -import com.netflix.spinnaker.keel.api.actuation.RolloutTarget -import com.netflix.spinnaker.keel.api.actuation.RolloutTargetWithStatus -import com.netflix.spinnaker.keel.api.actuation.Stage +import com.netflix.spinnaker.keel.actuation.ExecutionSummary +import com.netflix.spinnaker.keel.actuation.RolloutStatus +import com.netflix.spinnaker.keel.actuation.RolloutTargetWithStatus +import com.netflix.spinnaker.keel.actuation.Stage import com.netflix.spinnaker.keel.api.artifacts.GitMetadata +import com.netflix.spinnaker.keel.api.artifacts.PublishedArtifact import com.netflix.spinnaker.keel.bakery.diff.PackageDiff import com.netflix.spinnaker.keel.graphql.types.MdArtifact +import com.netflix.spinnaker.keel.graphql.types.MdArtifactVersionInEnvironment import com.netflix.spinnaker.keel.graphql.types.MdCommitInfo import com.netflix.spinnaker.keel.graphql.types.MdDeployLocation import com.netflix.spinnaker.keel.graphql.types.MdDeployTarget import com.netflix.spinnaker.keel.graphql.types.MdEventLevel import com.netflix.spinnaker.keel.graphql.types.MdExecutionSummary import com.netflix.spinnaker.keel.graphql.types.MdGitMetadata -import com.netflix.spinnaker.keel.graphql.types.MdLifecycleEventStatus import com.netflix.spinnaker.keel.graphql.types.MdLocation import com.netflix.spinnaker.keel.graphql.types.MdMoniker import com.netflix.spinnaker.keel.graphql.types.MdNotification @@ -32,11 +32,13 @@ import com.netflix.spinnaker.keel.graphql.types.MdPackageDiff import com.netflix.spinnaker.keel.graphql.types.MdPausedInfo import com.netflix.spinnaker.keel.graphql.types.MdPullRequest import com.netflix.spinnaker.keel.graphql.types.MdResource +import com.netflix.spinnaker.keel.graphql.types.MdResourceTask import com.netflix.spinnaker.keel.graphql.types.MdRolloutTargetStatus import com.netflix.spinnaker.keel.graphql.types.MdStageDetail import com.netflix.spinnaker.keel.graphql.types.MdTaskStatus import com.netflix.spinnaker.keel.notifications.DismissibleNotification import com.netflix.spinnaker.keel.pause.Pause +import com.netflix.spinnaker.keel.persistence.TaskForResource fun GitMetadata.toDgs(): MdGitMetadata = @@ -131,13 +133,22 @@ fun DismissibleNotification.toDgs() = dismissedBy = dismissedBy ) -fun ExecutionSummary.toDgs() = MdExecutionSummary( - status = status.toDgs(), - currentStage = currentStage?.toDgs(), - stages = stages.map { it.toDgs() }, - deployTargets = deployTargets.map { it.toDgs() }, - error = error -) +fun ExecutionSummary.toDgs() = + MdExecutionSummary( + id = "$id:summary", + status = status.toDgs(), + currentStage = currentStage?.toDgs(), + stages = stages.map { it.toDgs() }, + deployTargets = deployTargets.map { it.toDgs() }, + error = error + ) + +fun TaskForResource.toDgs() = + MdResourceTask( + id = id, + name = name, + running = endedAt == null + ) fun RolloutTargetWithStatus.toDgs() = MdDeployTarget( @@ -181,3 +192,18 @@ fun TaskStatus.toDgs(): MdTaskStatus = TaskStatus.BUFFERED -> MdTaskStatus.BUFFERED TaskStatus.SKIPPED -> MdTaskStatus.SKIPPED } + +fun PublishedArtifact.toDgs(environmentName: String) = + MdArtifactVersionInEnvironment( + id = "latest-approved-${environmentName}-${reference}-${version}", + version = version, + buildNumber = buildNumber, + createdAt = createdAt, + gitMetadata = if (gitMetadata == null) { + null + } else { + gitMetadata?.toDgs() + }, + environment = environmentName, + reference = reference, + ) diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/export/ExportService.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/export/ExportService.kt new file mode 100644 index 0000000000..7e55ee11fb --- /dev/null +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/export/ExportService.kt @@ -0,0 +1,413 @@ +package com.netflix.spinnaker.keel.export + +import com.fasterxml.jackson.annotation.JsonIgnore +import com.fasterxml.jackson.dataformat.yaml.YAMLMapper +import com.netflix.spinnaker.config.BaseUrlConfig +import com.netflix.spinnaker.keel.api.Constraint +import com.netflix.spinnaker.keel.api.Exportable +import com.netflix.spinnaker.keel.api.artifacts.DeliveryArtifact +import com.netflix.spinnaker.keel.api.ec2.EC2_CLUSTER_V1_1 +import com.netflix.spinnaker.keel.api.plugins.ResourceHandler +import com.netflix.spinnaker.keel.api.plugins.supporting +import com.netflix.spinnaker.keel.api.titus.TITUS_CLUSTER_V1 +import com.netflix.spinnaker.keel.artifacts.DebianArtifact +import com.netflix.spinnaker.keel.core.api.DEFAULT_SERVICE_ACCOUNT +import com.netflix.spinnaker.keel.core.api.DependsOnConstraint +import com.netflix.spinnaker.keel.core.api.ManualJudgementConstraint +import com.netflix.spinnaker.keel.core.api.SubmittedDeliveryConfig +import com.netflix.spinnaker.keel.core.api.SubmittedEnvironment +import com.netflix.spinnaker.keel.core.api.SubmittedResource +import com.netflix.spinnaker.keel.core.parseMoniker +import com.netflix.spinnaker.keel.filterNotNullValues +import com.netflix.spinnaker.keel.front50.Front50Cache +import com.netflix.spinnaker.keel.front50.model.DeployStage +import com.netflix.spinnaker.keel.front50.model.Pipeline +import com.netflix.spinnaker.keel.orca.ExecutionDetailResponse +import com.netflix.spinnaker.keel.orca.OrcaService +import kotlinx.coroutines.runBlocking +import org.slf4j.LoggerFactory +import org.springframework.stereotype.Component +import java.time.Duration +import java.time.Instant + +/** + * Encapsulates logic to export delivery configs from pipelines. + */ +@Component +class ExportService( + private val handlers: List>, + private val front50Cache: Front50Cache, + private val orcaService: OrcaService, + private val baseUrlConfig: BaseUrlConfig, + private val yamlMapper: YAMLMapper +) { + private val log by lazy { LoggerFactory.getLogger(javaClass) } + private val prettyPrinter by lazy { yamlMapper.writerWithDefaultPrettyPrinter() } + + companion object { + /** + * List of pipeline "shapes" that we know how to export. See [Pipeline.shape]. + */ + val EXPORTABLE_PIPELINE_SHAPES = listOf( + listOf("deploy"), + listOf("manualJudgment", "deploy"), + listOf("bake", "deploy", "manualJudgment", "deploy", "manualJudgment", "deploy"), + listOf("findImage", "deploy", "manualJudgment", "deploy", "manualJudgment", "deploy"), + listOf("deploy", "manualJudgment", "deploy", "manualJudgment", "deploy"), + listOf("deploy", "deploy", "deploy"), + listOf("findImage", "deploy"), + listOf("findImage", "manualJudgement", "deploy"), + listOf("findImageFromTags", "deploy"), + listOf("findImageFromTags", "manualJudgement", "deploy"), + listOf("bake", "deploy") + ) + + val SUPPORTED_TRIGGER_TYPES = listOf("docker", "jenkins", "rocket", "pipeline") + + val PROVIDERS_TO_CLUSTER_KINDS = mapOf( + "aws" to EC2_CLUSTER_V1_1.kind, + "titus" to TITUS_CLUSTER_V1.kind + ) + + val SPEL_REGEX = Regex("\\$\\{.+\\}") + } + + /** + * Given an application name, look up all the associated pipelines and attempt to build a delivery config + * that represents the corresponding environments, artifacts and delivery flow. + * + * Supports only a sub-set of well-known pipeline patterns (see [EXPORTABLE_PIPELINE_SHAPES]). + */ + suspend fun exportFromPipelines( + applicationName: String, + maxAgeDays: Long = 6 * 30 + ): ExportResult { + log.info("Exporting delivery config from pipelines for application $applicationName (max age: $maxAgeDays days)") + + val pipelines = runBlocking { + front50Cache.pipelinesByApplication(applicationName) + } + val application = runBlocking { + front50Cache.applicationByName(applicationName) + } + val serviceAccount = application.email ?: DEFAULT_SERVICE_ACCOUNT + val nonExportablePipelines = pipelines.findNonExportable(maxAgeDays) + val exportablePipelines = pipelines - nonExportablePipelines.keys + val pipelinesToDeploysAndClusters = exportablePipelines.toDeploysAndClusters(serviceAccount) + val environments = mutableSetOf() + val artifacts = mutableSetOf() + + // Map clusters to the environments they represent, saving the corresponding artifacts in the process + val pipelinesToEnvironments = pipelinesToDeploysAndClusters.mapValues { (pipeline, deploysAndClusters) -> + deploysAndClusters.mapNotNull { (deploy, cluster) -> + log.debug("Attempting to build environment for cluster ${cluster.moniker}") + + val provider = cluster.cloudProvider + val kind = PROVIDERS_TO_CLUSTER_KINDS[provider] ?: error("Unsupported cluster cloud provider '$provider'") + val handler = handlers.supporting(kind) + val spec = try { + handler.export(cluster) + } catch (e: Exception) { + log.error("Unable to export cluster ${cluster.moniker}: $e. Ignoring.") + return@mapNotNull null + } + + if (spec == null) { + log.warn("No cluster exported for kind $kind") + return@mapNotNull null + } + + val environmentName = with(cluster) { + when { + !moniker.stack.isNullOrEmpty() -> moniker.stack!! + "test" in account -> "testing" + "prod" in account -> "production" + else -> account + } + } + + val manualJudgement = if (pipeline.hasManualJudgment(deploy)) { + log.debug("Adding manual judgment constraint for environment $environmentName based on manual judgment stage in pipeline ${pipeline.name}") + setOf(ManualJudgementConstraint()) + } else { + emptySet() + } + + val environment = SubmittedEnvironment( + name = environmentName, + resources = setOf( + SubmittedResource( + kind = kind, + spec = spec + ) + ), + constraints = manualJudgement + ) + + log.debug("Exported environment $environmentName from cluster [${cluster.moniker}]") + environments.add(environment) + + // Export the artifact matching the cluster while we're at it + val artifact = handler.exportArtifact(cluster) + log.debug("Exported artifact $artifact from cluster ${cluster.moniker}") + artifacts.add(artifact) + + environment + } + } + + // Now look at the pipeline triggers and dependencies between pipelines and add constraints + val finalEnvironments = pipelinesToEnvironments.flatMap { (pipeline, envs) -> + envs.map { environment -> + val constraints = triggersToEnvironmentConstraints(applicationName, pipeline, environment, pipelinesToEnvironments) + environment.copy( + constraints = environment.constraints + constraints + ).addMetadata( + "exportedFrom" to pipeline.link(baseUrlConfig.baseUrl) + ) + } + }.toSet() + .dedupe() + + val finalArtifacts = artifacts.dedupe() + + val deliveryConfig = SubmittedDeliveryConfig( + name = applicationName, + application = applicationName, + serviceAccount = serviceAccount, + artifacts = finalArtifacts, + environments = finalEnvironments.sensibleOrder() + ) + + val result = PipelineExportResult( + deliveryConfig = deliveryConfig, + exported = pipelinesToEnvironments, + skipped = nonExportablePipelines, + baseUrl = baseUrlConfig.baseUrl + ) + + log.info("Successfully exported delivery config:\n${prettyPrinter.writeValueAsString(result)}") + return result + } + + /** + * Finds non-exportable pipelines in the list and associates them with the reason why they're not. + */ + private fun List.findNonExportable(maxAgeDays: Long): Map { + val lastExecutions = runBlocking { + associateWith { pipeline -> + orcaService.getExecutions(pipeline.id).firstOrNull() + } + } + + return associateWith { pipeline -> + val lastExecution = lastExecutions[pipeline] + log.debug("Checking pipeline [${pipeline.name}], last execution: ${lastExecution?.buildTime}") + + when { + pipeline.disabled -> "pipeline disabled" + pipeline.fromTemplate -> "pipeline is from template" + pipeline.hasParallelStages -> "pipeline has parallel stages" + pipeline.shape !in EXPORTABLE_PIPELINE_SHAPES -> "pipeline doesn't match supported flows" + (lastExecution == null || lastExecution.olderThan(maxAgeDays)) -> "pipeline hasn't run in the last $maxAgeDays days" + else -> null + } + }.filterNotNullValues() + } + + + /** + * Extracts cluster information from deploy stages in the list of pipelines, and returns the pipelines + * mapped to those deploy stages and respective clusters represented as [Exportable] objects. + */ + private fun List.toDeploysAndClusters(serviceAccount: String) = + associateWith { pipeline -> + pipeline + .stages + .filterIsInstance() + .flatMap { deploy -> + deploy + .clusters + .groupBy { "${it.provider}/${it.account}/${it.name}" } + .map { (key, clusters) -> + val regions = clusters.mapNotNull { it.region }.toSet() + val (provider, account, name) = key.split("/") + deploy to Exportable( + cloudProvider = provider, + account = account, + moniker = parseMoniker(name.replace(SPEL_REGEX, "REDACTED_SPEL")), + regions = regions, + kind = PROVIDERS_TO_CLUSTER_KINDS[provider]!!, + user = serviceAccount + ) + } + } + } + + /** + * Attempts to extract [Constraint]s from the pipeline triggers for the specified [environment]. + */ + private fun triggersToEnvironmentConstraints( + application: String, + pipeline: Pipeline, + environment: SubmittedEnvironment, + pipelinesToEnvironments: Map> + ): Set { + val constraints = mutableSetOf() + + val triggers = pipeline.triggers + .filter { it.type in SUPPORTED_TRIGGER_TYPES && it.enabled } + + if (triggers.isEmpty()) { + // if there's no supported trigger, the pipeline is triggered manually, i.e. the equivalent of a manual judgment + log.debug("Pipeline '${pipeline.name}' for environment ${environment.name} has no supported triggers enabled. " + + "Adding manual-judgment constraint.") + constraints.add(ManualJudgementConstraint()) + return constraints + } + + triggers.forEach { trigger -> + when (trigger.type) { + "docker", "jenkins", "rocket" -> { + log.debug("Pipeline '${pipeline.name}' for environment ${environment.name} has CI trigger. " + + "This will be handled automatically by artifact detection and approval.") + } + "pipeline" -> { + // if trigger is a pipeline trigger, find the upstream environment matching that pipeline to make a depends-on + // constraint + val upstreamEnvironment = pipelinesToEnvironments.entries.find { (pipeline, _) -> + application == trigger.application + pipeline.id == trigger.pipeline + } + ?.let { (_, envs) -> + // use the last environment within the matching pipeline (which would match the last deploy, + // in case there's more than one) + envs.last() + } + + if (upstreamEnvironment != null) { + log.debug("Pipeline '${pipeline.name}' for environment ${environment.name} has pipeline trigger. " + + "Adding matching depends-on constraint on upstream environment ${upstreamEnvironment.name}.") + constraints.add(DependsOnConstraint(upstreamEnvironment.name)) + } else { + log.warn("Pipeline '${pipeline.name}' for environment ${environment.name} has pipeline trigger, " + + "but upstream environment not found. Adding manual-judgement constraint.") + constraints.add(ManualJudgementConstraint()) + } + } + else -> log.warn("Ignoring unsupported trigger type ${trigger.type} in pipeline '${pipeline.name}' for export") + } + } + + return constraints + } + + /** + * Removes duplicate environments in the set by merging together environments with the same name. + */ + @JvmName("dedupeEnvironments") + private fun Set.dedupe(): Set { + val dedupedEnvironments = mutableSetOf() + forEach { environment -> + val previouslyFound = dedupedEnvironments.find { it.name == environment.name } + val dedupedEnvironment = when { + previouslyFound != null -> { + log.debug("Merging matching environments $environment and $previouslyFound") + dedupedEnvironments.remove(previouslyFound) + previouslyFound.run { + copy( + resources = resources + environment.resources, + constraints = constraints + environment.constraints + ) + } + } + else -> environment + } + dedupedEnvironments.add(dedupedEnvironment) + } + return dedupedEnvironments + } + + /** + * Removes duplicate artifacts in the set by merging together artifacts with the same type, name and origin. + */ + private fun Set.dedupe(): Set { + val dedupedArtifacts = mutableSetOf() + forEach { artifact -> + val previouslyFound = dedupedArtifacts.find { it.type == artifact.type && it.name == artifact.name && it.from == artifact.from } + val dedupedArtifact = when { + previouslyFound is DebianArtifact && artifact is DebianArtifact -> { + log.debug("Merging matching artifacts $artifact and $previouslyFound") + dedupedArtifacts.remove(previouslyFound) + previouslyFound.run { + copy( + vmOptions = vmOptions.copy(regions = vmOptions.regions + artifact.vmOptions.regions) + ) + } + } + else -> artifact + } + dedupedArtifacts.add(dedupedArtifact) + } + return dedupedArtifacts + } + + /** + * Attempts to order the set of environments in a more sensible fashion than just randomly. + */ + private fun Set.sensibleOrder() = + toSortedSet { env1, env2 -> + val env1DependsOn = env1.constraints.filterIsInstance().firstOrNull() + val env2DependsOn = env2.constraints.filterIsInstance().firstOrNull() + when { + env1DependsOn?.environment == env2.name -> 1 + env2DependsOn?.environment == env1.name -> -1 + else -> when { + env1.constraints.isEmpty() -> -1 + env2.constraints.isEmpty() -> 1 + else -> env1.name.compareTo(env2.name) + } + } + } + + private fun ExecutionDetailResponse.olderThan(days: Long) = + buildTime.isBefore(Instant.now() - Duration.ofDays(days)) +} + +interface ExportResult + +data class ExportErrorResult( + val error: String, + val detail: Any? = null +) : Exception(error, detail as? Throwable), ExportResult + +data class PipelineExportResult( + val deliveryConfig: SubmittedDeliveryConfig, + @JsonIgnore + val exported: Map>, + @JsonIgnore + val skipped: Map, + @JsonIgnore + val baseUrl: String +): ExportResult { + val pipelines: Map = mapOf( + "exported" to exported.entries.map { (pipeline, environments) -> + mapOf( + "name" to pipeline.name, + "link" to pipeline.link(baseUrl), + "shape" to pipeline.shape.joinToString(" -> "), + "environments" to environments.map { it.name } + ) + }, + "skipped" to skipped.map { (pipeline, reason) -> + mapOf( + "name" to pipeline.name, + "link" to pipeline.link(baseUrl), + "shape" to pipeline.shape.joinToString(" -> "), + "reason" to reason + ) + } + ) +} + +private fun Pipeline.link(baseUrl: String) = "$baseUrl/#/applications/${application}/executions/configure/${id}" diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/AdminController.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/AdminController.kt index 42b10f6358..724d1ac33e 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/AdminController.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/AdminController.kt @@ -1,6 +1,6 @@ package com.netflix.spinnaker.keel.rest -import com.netflix.spinnaker.keel.services.AdminService +import com.netflix.spinnaker.keel.admin.AdminService import com.netflix.spinnaker.keel.yaml.APPLICATION_YAML_VALUE import kotlinx.coroutines.GlobalScope import kotlinx.coroutines.launch @@ -62,9 +62,11 @@ class AdminController( fun forceConstraintReevaluation( @PathVariable("application") application: String, @PathVariable("environment") environment: String, + @PathVariable("reference") reference: String, + @PathVariable("version") version: String, @RequestParam("type", required = false) type: String? = null ) { - adminService.forceConstraintReevaluation(application, environment, type) + adminService.forceConstraintReevaluation(application, environment, reference, version, type) } data class ReferencePayload( diff --git a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/ExportController.kt b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/ExportController.kt index 9735567c2f..7fef3a25c9 100644 --- a/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/ExportController.kt +++ b/keel-web/src/main/kotlin/com/netflix/spinnaker/keel/rest/ExportController.kt @@ -8,46 +8,72 @@ import com.netflix.spinnaker.keel.api.plugins.supporting import com.netflix.spinnaker.keel.clouddriver.CloudDriverCache import com.netflix.spinnaker.keel.core.api.SubmittedResource import com.netflix.spinnaker.keel.core.parseMoniker +import com.netflix.spinnaker.keel.export.ExportErrorResult +import com.netflix.spinnaker.keel.export.ExportResult +import com.netflix.spinnaker.keel.export.ExportService import com.netflix.spinnaker.keel.logging.TracingSupport.Companion.blankMDC import com.netflix.spinnaker.keel.logging.TracingSupport.Companion.withTracingContext +import com.netflix.spinnaker.keel.retrofit.isNotFound import com.netflix.spinnaker.keel.yaml.APPLICATION_YAML_VALUE import kotlinx.coroutines.runBlocking import org.apache.maven.artifact.versioning.DefaultArtifactVersion import org.slf4j.LoggerFactory +import org.springframework.http.HttpStatus.INTERNAL_SERVER_ERROR +import org.springframework.http.HttpStatus.NOT_FOUND import org.springframework.http.MediaType.APPLICATION_JSON_VALUE +import org.springframework.http.ResponseEntity +import org.springframework.security.access.prepost.PreAuthorize import org.springframework.util.comparator.NullSafeComparator +import org.springframework.web.bind.annotation.ExceptionHandler import org.springframework.web.bind.annotation.GetMapping import org.springframework.web.bind.annotation.PathVariable import org.springframework.web.bind.annotation.RequestHeader import org.springframework.web.bind.annotation.RequestMapping +import org.springframework.web.bind.annotation.RequestParam import org.springframework.web.bind.annotation.RestController @RestController @RequestMapping(path = ["/export"]) class ExportController( private val handlers: List>, - private val cloudDriverCache: CloudDriverCache + private val cloudDriverCache: CloudDriverCache, + private val exportService: ExportService ) { private val log by lazy { LoggerFactory.getLogger(javaClass) } - /** - * Assist for mapping between Deck and Clouddriver cloudProvider names - * and Keel's plugin namespace. - */ - private val cloudProviderOverrides = mapOf( - "aws" to "ec2" - ) + companion object { + val versionSuffix = """@v(\d+)$""".toRegex() + private val versionPrefix = """^v""".toRegex() + val versionComparator: Comparator = NullSafeComparator( + Comparator { s1, s2 -> + DefaultArtifactVersion(s1?.replace(versionPrefix, "")).compareTo( + DefaultArtifactVersion(s2?.replace(versionPrefix, "")) + ) + }, + true // null is considered lower + ) - private val typeToKind = mapOf( - "classicloadbalancer" to "classic-load-balancer", - "classicloadbalancers" to "classic-load-balancer", - "applicationloadbalancer" to "application-load-balancer", - "applicationloadbalancers" to "application-load-balancer", - "securitygroup" to "security-group", - "securitygroups" to "security-group", - "cluster" to "cluster", - "clusters" to "cluster" - ) + /** + * Assist for mapping between Deck and Clouddriver cloudProvider names + * and Keel's plugin namespace. + */ + private val CLOUD_PROVIDER_OVERRIDES = mapOf( + "aws" to "ec2" + ) + + private val TYPE_TO_KIND = mapOf( + "classicloadbalancer" to "classic-load-balancer", + "classicloadbalancers" to "classic-load-balancer", + "applicationloadbalancer" to "application-load-balancer", + "applicationloadbalancers" to "application-load-balancer", + "securitygroup" to "security-group", + "securitygroups" to "security-group", + "cluster" to "cluster", + "clusters" to "cluster" + ) + + const val DEFAULT_PIPELINE_EXPORT_MAX_DAYS: Long = 180 + } /** * This route is location-less; given a resource name that can be monikered, @@ -107,9 +133,32 @@ class ExportController( } } + @GetMapping( + path = ["/{application}"], + produces = [APPLICATION_JSON_VALUE, APPLICATION_YAML_VALUE] + ) + @PreAuthorize("""@authorizationSupport.hasApplicationPermission('READ', 'APPLICATION', #application)""") + fun get( + @PathVariable("application") application: String, + @RequestParam("maxAgeDays") maxAgeDays: Long? + ): ExportResult { + return runBlocking { + exportService.exportFromPipelines(application, maxAgeDays ?: DEFAULT_PIPELINE_EXPORT_MAX_DAYS) + } + } + + @ExceptionHandler(Exception::class) + fun onException(e: Exception): ResponseEntity { + log.error(e.message, e) + return when (e.cause?.isNotFound ?: e.isNotFound) { + true -> ResponseEntity.status(NOT_FOUND).body(ExportErrorResult(e.message ?: "not found")) + else -> ResponseEntity.status(INTERNAL_SERVER_ERROR).body(ExportErrorResult(e.message ?: "unknown error")) + } + } + fun parseKind(cloudProvider: String, type: String) = type.toLowerCase().let { t1 -> - val group = cloudProviderOverrides[cloudProvider] ?: cloudProvider + val group = CLOUD_PROVIDER_OVERRIDES[cloudProvider] ?: cloudProvider var version: String? = null val normalizedType = if (versionSuffix.containsMatchIn(t1)) { version = versionSuffix.find(t1)!!.groups[1]?.value @@ -117,7 +166,7 @@ class ExportController( } else { t1 }.let { t2 -> - typeToKind.getOrDefault(t2, t2) + TYPE_TO_KIND.getOrDefault(t2, t2) } if (version == null) { @@ -149,19 +198,4 @@ class ExportController( kind = kind ) } - - companion object { - val versionSuffix = - """@v(\d+)$""".toRegex() - private val versionPrefix = - """^v""".toRegex() - val versionComparator: Comparator = NullSafeComparator( - Comparator { s1, s2 -> - DefaultArtifactVersion(s1?.replace(versionPrefix, "")).compareTo( - DefaultArtifactVersion(s2?.replace(versionPrefix, "")) - ) - }, - true // null is considered lower - ) - } } diff --git a/keel-web/src/main/resources/schema/schema.graphql b/keel-web/src/main/resources/schema/schema.graphql index 68160a1ae5..778d546884 100644 --- a/keel-web/src/main/resources/schema/schema.graphql +++ b/keel-web/src/main/resources/schema/schema.graphql @@ -3,8 +3,446 @@ scalar JSON type Query @extends { application(appName: String!): MdApplication + md_application(appName: String!): MD_Application } +type MD_Application { + id: String! + name: String! + account: String! + isPaused: Boolean + pausedInfo: MD_PausedInfo + environments: [MD_Environment!]! + notifications: [MD_Notification!] + gitIntegration: MD_GitIntegration + config: MD_Config +} + +type MD_GitIntegration { + id: String! + repository: String + branch: String + isEnabled: Boolean + manifestPath: String + link: String +} + +type MD_Config { + id: ID! + updatedAt: InstantTime + rawConfig: String + processedConfig: String + previewEnvironmentsConfigured: Boolean +} + +type MD_Environment { + id: ID! + name: String! + state: MD_EnvironmentState! + isPreview: Boolean + isDeleting: Boolean + gitMetadata: MD_GitMetadata + basedOn: String +} + +type MD_EnvironmentState { + id: String! + resources: [MD_Resource!] + artifacts: [MD_Artifact!] +} + +type MD_PinnedVersion { + id: String! + name: String! + reference: String! + version: String! + gitMetadata: MD_GitMetadata + buildNumber: String + pinnedAt: InstantTime + pinnedBy: String + comment: String +} + +type MD_PausedInfo { + id: String! + by: String + at: InstantTime + comment: String +} + +type MD_Artifact { + id: String! + environment: String! + name: String! + type: String! + reference: String! + versions(statuses: [MD_ArtifactStatusInEnvironment!], versions: [String!], limit: Int): [MD_ArtifactVersionInEnvironment!] + pinnedVersion: MD_PinnedVersion + latestApprovedVersion: MD_ArtifactVersionInEnvironment + resources: [MD_Resource!] +} + +type MD_ArtifactVersionInEnvironment { + id: String! + version: String! + buildNumber: String + createdAt: InstantTime + deployedAt: InstantTime + gitMetadata: MD_GitMetadata + packageDiff: MD_PackageDiff + environment: String + reference: String! + status: MD_ArtifactStatusInEnvironment + lifecycleSteps: [MD_LifecycleStep!] + constraints: [MD_Constraint!] + verifications: [MD_Action!] + postDeploy: [MD_Action!] + veto: MD_VersionVeto + isCurrent: Boolean +} + +type MD_VersionVeto { + vetoedBy: String + vetoedAt: InstantTime + comment: String +} + +enum MD_LifecycleEventScope { + PRE_DEPLOYMENT +} + +enum MD_LifecycleEventType { + BAKE, + BUILD +} + +enum MD_LifecycleEventStatus { + NOT_STARTED, + RUNNING, + SUCCEEDED, + FAILED, + ABORTED, + UNKNOWN +} + +type MD_LifecycleStep { + scope: MD_LifecycleEventScope + type: MD_LifecycleEventType! + id: String + status: MD_LifecycleEventStatus! + text: String + link: String + startedAt: InstantTime + completedAt: InstantTime + artifactVersion: String +} + +type MD_GitMetadata { + commit: String + author: String + project: String + branch: String + repoName: String + pullRequest: MD_PullRequest + commitInfo: MD_CommitInfo + comparisonLinks: MD_ComparisonLinks +} + +type MD_ComparisonLinks { + toPreviousVersion: String + toCurrentVersion: String +} + +type MD_PullRequest { + number: String + link: String +} + +type MD_CommitInfo { + sha: String + link: String + message: String +} + +type MD_PackageDiff { + added: [MD_PackageAndVersion!] + removed: [MD_PackageAndVersion!] + changed: [MD_PackageAndVersionChange!] +} + +type MD_PackageAndVersion { + package: String! + version: String! +} + +type MD_PackageAndVersionChange { + package: String! + oldVersion: String! + newVersion: String! +} + +enum MD_ResourceActuationStatus { + PROCESSING + UP_TO_DATE + ERROR + WAITING + NOT_MANAGED + DELETING +} + +type MD_ResourceActuationState { + resourceId: String! + status: MD_ResourceActuationStatus! + reason: String + event: String + tasks: [MD_ResourceTask!] +} + +type MD_ResourceTask { + id: String! + name: String! + running: Boolean! + summary: MD_ExecutionSummary +} + +type MD_ExecutionSummary { + status: MD_TaskStatus! + currentStage: MD_StageDetail, + stages: [MD_StageDetail!] + deployTargets: [MD_DeployTarget!] + error: String +} + +type MD_DeployTarget { + cloudProvider: String + location: MD_DeployLocation + status: MD_RolloutTargetStatus +} +enum MD_RolloutTargetStatus { + NOT_STARTED, RUNNING, SUCCEEDED, FAILED +} + +type MD_DeployLocation { + account: String + region: String + sublocations: [String!] +} + +type MD_StageDetail { + id: String + type: String + name: String + startTime: InstantTime + endTime: InstantTime + status: MD_TaskStatus + refId: String + requisiteStageRefIds: [String!] +} + +type MD_Resource { + id: String! + kind: String! + moniker: MD_Moniker + state: MD_ResourceActuationState + artifact: MD_Artifact + displayName: String + location: MD_Location + rawDefinition: String +} + +type MD_Moniker { + app: String + stack: String + detail: String +} + +type MD_Location { + account: String + regions: [String!] +} + +enum MD_ConstraintStatus { + BLOCKED + PENDING + PASS + FAIL + FORCE_PASS +} + +enum MD_ArtifactStatusInEnvironment { + PENDING, + APPROVED, + DEPLOYING, + CURRENT, + PREVIOUS + VETOED, + SKIPPED +} + +type MD_Constraint { + type: String! + status: MD_ConstraintStatus! + startedAt: InstantTime + judgedAt: InstantTime + judgedBy: String + comment: String + attributes: JSON +} + +enum MD_ActionStatus { + NOT_EVALUATED + PENDING + PASS + FAIL + FORCE_PASS +} + +enum MD_ActionType { + VERIFICATION + POST_DEPLOY +} + +type MD_Action { + id: String! + actionId: String! + type: String! # Deprecated + status: MD_ActionStatus! + startedAt: InstantTime + completedAt: InstantTime + link: String + actionType: MD_ActionType! +} + +type Mutation @extends { + md_updateConstraintStatus(payload: MD_ConstraintStatusPayload!): Boolean + md_restartConstraintEvaluation(payload: MD_RestartConstraintEvaluationPayload!): Boolean + md_toggleManagement(application: ID!, isPaused: Boolean!, comment: String): Boolean + md_pinArtifactVersion(payload: MD_ArtifactVersionActionPayload!): Boolean + md_markArtifactVersionAsBad(payload: MD_ArtifactVersionActionPayload!): Boolean + md_unpinArtifactVersion(payload: MD_UnpinArtifactVersionPayload!): Boolean + md_markArtifactVersionAsGood(payload: MD_MarkArtifactVersionAsGoodPayload!): Boolean + md_retryArtifactVersionAction(payload: MD_RetryArtifactActionPayload): MD_Action + md_dismissNotification(payload: MD_DismissNotificationPayload!): Boolean + md_updateGitIntegration(payload: MD_UpdateGitIntegrationPayload): MD_GitIntegration + md_toggleResourceManagement(payload: MD_ToggleResourceManagementPayload): Boolean + md_importDeliveryConfig(application: String!): Boolean + + #### Deprecated + + updateConstraintStatus(payload: MdConstraintStatusPayload!): Boolean + restartConstraintEvaluation(payload: MdRestartConstraintEvaluationPayload!): Boolean + toggleManagement(application: ID!, isPaused: Boolean!, comment: String): Boolean + pinArtifactVersion(payload: MdArtifactVersionActionPayload!): Boolean + markArtifactVersionAsBad(payload: MdArtifactVersionActionPayload!): Boolean + unpinArtifactVersion(payload: MdUnpinArtifactVersionPayload!): Boolean + markArtifactVersionAsGood(payload: MdMarkArtifactVersionAsGoodPayload!): Boolean + retryArtifactVersionAction(payload: MdRetryArtifactActionPayload): MdAction + dismissNotification(payload: MdDismissNotificationPayload!): Boolean + updateGitIntegration(payload: MdUpdateGitIntegrationPayload): MdGitIntegration + toggleResourceManagement(payload: MdToggleResourceManagementPayload): Boolean + importDeliveryConfig(application: String!): Boolean +} + +input MD_RestartConstraintEvaluationPayload { + application: String! + environment: String! + type: String! + reference: String! + version: String! +} + + +input MD_ToggleResourceManagementPayload { + id: ID! + isPaused: Boolean! +} + +input MD_UpdateGitIntegrationPayload { + application: String! + isEnabled: Boolean + manifestPath: String +} + +input MD_RetryArtifactActionPayload { + application: String! + environment: String! + reference: String! + version: String! + actionId: String! + actionType: MD_ActionType! +} + +input MD_ConstraintStatusPayload { + application: String! + environment: String! + type: String! + version: String! + reference: String! + status: MD_ConstraintStatus! +} + +input MD_ArtifactVersionActionPayload { + application: String! + environment: String! + reference: String! + comment: String! + version: String! +} + +input MD_MarkArtifactVersionAsGoodPayload { + application: String! + environment: String! + reference: String! + version: String! +} + +input MD_UnpinArtifactVersionPayload { + application: String! + environment: String! + reference: String! +} + +type MD_Notification { + id: String! + level: MD_EventLevel! + message: String! + triggeredAt: InstantTime + triggeredBy: String + environment: String + link: String + isActive: Boolean + dismissedAt: InstantTime + dismissedBy: String +} + +enum MD_EventLevel { + SUCCESS, INFO, WARNING, ERROR +} + +enum MD_TaskStatus { + NOT_STARTED, + RUNNING, + PAUSED, + SUSPENDED, + SUCCEEDED, + FAILED_CONTINUE, + TERMINAL, + CANCELED, + REDIRECT, + STOPPED, + BUFFERED, + SKIPPED +} + +input MD_DismissNotificationPayload { + application: String! + id: String! +} + + +########### Deprecated + + type MdApplication { id: String! name: String! @@ -31,6 +469,7 @@ type MdConfig { updatedAt: InstantTime rawConfig: String processedConfig: String + previewEnvironmentsConfigured: Boolean } type MdEnvironment { @@ -76,6 +515,8 @@ type MdArtifact { reference: String! versions(statuses: [MdArtifactStatusInEnvironment!], versions: [String!], limit: Int): [MdArtifactVersionInEnvironment!] pinnedVersion: MdPinnedVersion + latestApprovedVersion: MdArtifactVersionInEnvironment + resources: [MdResource!] } type MdArtifactVersionInEnvironment { @@ -84,7 +525,6 @@ type MdArtifactVersionInEnvironment { buildNumber: String createdAt: InstantTime deployedAt: InstantTime - resources: [MdResource!] gitMetadata: MdGitMetadata packageDiff: MdPackageDiff environment: String @@ -188,6 +628,7 @@ enum MdResourceActuationStatus { } type MdResourceActuationState { + resourceId: String! status: MdResourceActuationStatus! reason: String event: String @@ -197,15 +638,16 @@ type MdResourceActuationState { type MdResourceTask { id: String! name: String! + running: Boolean! summary: MdExecutionSummary } type MdExecutionSummary { + id: ID! status: MdTaskStatus! currentStage: MdStageDetail, stages: [MdStageDetail!] deployTargets: [MdDeployTarget!] - completedDeployTargets: [MdDeployTarget!] error: String } @@ -309,20 +751,15 @@ type MdAction { actionType: MdActionType! } -type Mutation @extends { - updateConstraintStatus(payload: MdConstraintStatusPayload!): Boolean - toggleManagement(application: ID!, isPaused: Boolean!, comment: String): Boolean - pinArtifactVersion(payload: MdArtifactVersionActionPayload!): Boolean - markArtifactVersionAsBad(payload: MdArtifactVersionActionPayload!): Boolean - unpinArtifactVersion(payload: MdUnpinArtifactVersionPayload!): Boolean - markArtifactVersionAsGood(payload: MdMarkArtifactVersionAsGoodPayload!): Boolean - retryArtifactVersionAction(payload: MdRetryArtifactActionPayload): MdAction - dismissNotification(payload: MdDismissNotificationPayload!): Boolean - updateGitIntegration(payload: MdUpdateGitIntegrationPayload): MdGitIntegration - toggleResourceManagement(payload: MdToggleResourceManagementPayload): Boolean - importDeliveryConfig(application: String!): Boolean +input MdRestartConstraintEvaluationPayload { + application: String! + environment: String! + type: String! + reference: String! + version: String! } + input MdToggleResourceManagementPayload { id: ID! isPaused: Boolean! diff --git a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/services/AdminServiceTests.kt b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/admin/AdminServiceTests.kt similarity index 89% rename from keel-web/src/test/kotlin/com/netflix/spinnaker/keel/services/AdminServiceTests.kt rename to keel-web/src/test/kotlin/com/netflix/spinnaker/keel/admin/AdminServiceTests.kt index de417dd8cb..d763299588 100644 --- a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/services/AdminServiceTests.kt +++ b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/admin/AdminServiceTests.kt @@ -1,8 +1,8 @@ -package com.netflix.spinnaker.keel.services +package com.netflix.spinnaker.keel.admin import com.netflix.spinnaker.keel.api.DeliveryConfig import com.netflix.spinnaker.keel.api.Environment -import com.netflix.spinnaker.keel.api.actuation.ExecutionSummaryService +import com.netflix.spinnaker.keel.actuation.ExecutionSummaryService import com.netflix.spinnaker.keel.api.artifacts.DeliveryArtifact import com.netflix.spinnaker.keel.api.artifacts.PublishedArtifact import com.netflix.spinnaker.keel.api.plugins.ArtifactSupplier @@ -14,6 +14,7 @@ import com.netflix.spinnaker.keel.core.api.TimeWindowConstraint import com.netflix.spinnaker.keel.front50.Front50Cache import com.netflix.spinnaker.keel.front50.Front50Service import com.netflix.spinnaker.keel.front50.model.Application +import com.netflix.spinnaker.keel.front50.model.GenericStage import com.netflix.spinnaker.keel.front50.model.ManagedDeliveryConfig import com.netflix.spinnaker.keel.front50.model.Pipeline import com.netflix.spinnaker.keel.front50.model.Stage @@ -60,8 +61,10 @@ class AdminServiceTests : JUnit5Minutests { ) ) + val artifactReference = "myartifact" + val artifact = mockk { - every { reference } returns "myartifact" + every { reference } returns artifactReference } val deliveryConfig = DeliveryConfig( @@ -86,7 +89,7 @@ class AdminServiceTests : JUnit5Minutests { application = front50Application.name, disabled = false, triggers = listOf(Trigger(type = "trigger", enabled = true, application = front50Application.name)), - _stages = listOf(Stage(type = "importDeliveryConfig", name = "Import config", refId = "1")) + _stages = listOf(GenericStage(type = "importDeliveryConfig", name = "Import config", refId = "1")) ) val executionSummaryService: ExecutionSummaryService = mockk() @@ -126,20 +129,22 @@ class AdminServiceTests : JUnit5Minutests { } context("forcing environment constraint reevaluation") { + val version = "v0" test("clears state only for stateful constraints") { - subject.forceConstraintReevaluation(application, environment.name) - verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, "manual-judgement") } - verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, "pipeline") } - verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, "allowed-times") } + subject.forceConstraintReevaluation(application, environment.name, artifactReference, version) + + verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name,artifactReference, version, "manual-judgement") } + verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, artifactReference, version, "pipeline") } + verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, artifactReference, version, "allowed-times") } } test("clears a specific constraint type when asked to") { - subject.forceConstraintReevaluation(application, environment.name, "pipeline") + subject.forceConstraintReevaluation(application, environment.name, artifact.reference, version, "pipeline") - verify(exactly = 0) { repository.deleteConstraintState(deliveryConfig.name, environment.name, "manual-judgement") } - verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, "pipeline") } - verify(exactly = 0) { repository.deleteConstraintState(deliveryConfig.name, environment.name, "allowed-times") } + verify(exactly = 0) { repository.deleteConstraintState(deliveryConfig.name, environment.name, artifactReference, version, "manual-judgement") } + verify(exactly = 1) { repository.deleteConstraintState(deliveryConfig.name, environment.name, artifactReference, version, "pipeline") } + verify(exactly = 0) { repository.deleteConstraintState(deliveryConfig.name, environment.name, artifactReference, version, "allowed-times") } } } @@ -212,7 +217,7 @@ class AdminServiceTests : JUnit5Minutests { before { every { front50Cache.pipelinesByApplication(front50Application.name) } returns listOf( importPipeline.copy( - _stages = listOf(Stage(type = "coolStage", name = "cool", refId = "1")) + _stages = listOf(GenericStage(type = "coolStage", name = "cool", refId = "1")) ) ) } diff --git a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/BasicQueryTests.kt b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/BasicQueryTests.kt new file mode 100644 index 0000000000..e1eb9867f6 --- /dev/null +++ b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/BasicQueryTests.kt @@ -0,0 +1,199 @@ +package com.netflix.spinnaker.keel.dgs + +import com.fasterxml.jackson.dataformat.yaml.YAMLMapper +import com.netflix.graphql.dgs.DgsQueryExecutor +import com.netflix.graphql.dgs.autoconfig.DgsAutoConfiguration +import com.netflix.spinnaker.keel.actuation.ExecutionSummaryService +import com.netflix.spinnaker.keel.api.Moniker +import com.netflix.spinnaker.keel.api.SubnetAwareLocations +import com.netflix.spinnaker.keel.api.SubnetAwareRegionSpec +import com.netflix.spinnaker.keel.api.artifacts.VirtualMachineOptions +import com.netflix.spinnaker.keel.api.ec2.ClusterSpec +import com.netflix.spinnaker.keel.api.ec2.EC2_CLUSTER_V1_1 +import com.netflix.spinnaker.keel.artifacts.ArtifactVersionLinks +import com.netflix.spinnaker.keel.artifacts.DebianArtifact +import com.netflix.spinnaker.keel.auth.AuthorizationSupport +import com.netflix.spinnaker.keel.core.api.PromotionStatus +import com.netflix.spinnaker.keel.core.api.PublishedArtifactInEnvironment +import com.netflix.spinnaker.keel.front50.Front50Cache +import com.netflix.spinnaker.keel.front50.Front50Service +import com.netflix.spinnaker.keel.igor.DeliveryConfigImporter +import com.netflix.spinnaker.keel.lifecycle.LifecycleEventRepository +import com.netflix.spinnaker.keel.pause.ActuationPauser +import com.netflix.spinnaker.keel.persistence.DeliveryConfigRepository +import com.netflix.spinnaker.keel.persistence.DismissibleNotificationRepository +import com.netflix.spinnaker.keel.persistence.EnvironmentDeletionRepository +import com.netflix.spinnaker.keel.persistence.KeelRepository +import com.netflix.spinnaker.keel.persistence.TaskTrackingRepository +import com.netflix.spinnaker.keel.scm.ScmUtils +import com.netflix.spinnaker.keel.services.ApplicationService +import com.netflix.spinnaker.keel.services.ResourceStatusService +import com.netflix.spinnaker.keel.test.deliveryConfig +import com.netflix.spinnaker.keel.test.resource +import com.netflix.spinnaker.keel.upsert.DeliveryConfigUpserter +import com.netflix.spinnaker.keel.veto.unhappy.UnhappyVeto +import com.ninjasquad.springmockk.MockkBean +import io.mockk.every +import org.junit.jupiter.api.BeforeEach +import org.junit.jupiter.api.Test +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.boot.test.context.SpringBootTest +import strikt.api.expectCatching +import strikt.assertions.isEqualTo +import strikt.assertions.isSuccess + +@SpringBootTest( + classes = [DgsAutoConfiguration::class, DgsTestConfig::class], +) +class BasicQueryTests { + + @Autowired + lateinit var dgsQueryExecutor: DgsQueryExecutor + + @MockkBean + lateinit var authorizationSupport: AuthorizationSupport + + @MockkBean + lateinit var keelRepository: KeelRepository + + @MockkBean + lateinit var actuationPauser: ActuationPauser + + @MockkBean + lateinit var artifactVersionLinks: ArtifactVersionLinks + + @MockkBean + lateinit var notificationRepository: DismissibleNotificationRepository + + @MockkBean + lateinit var scmUtils: ScmUtils + + @MockkBean + lateinit var executionSummaryService: ExecutionSummaryService + + @MockkBean + lateinit var yamlMapper: YAMLMapper + + @MockkBean + lateinit var deliveryConfigImporter: DeliveryConfigImporter + + @MockkBean + lateinit var environmentDeletionRepository: EnvironmentDeletionRepository + + @MockkBean + lateinit var front50Service: Front50Service + + @MockkBean + lateinit var front50Cache: Front50Cache + + @MockkBean + lateinit var deliveryConfigUpserter: DeliveryConfigUpserter + + @MockkBean + lateinit var lifecycleEventRepository: LifecycleEventRepository + + @MockkBean + lateinit var applicationService: ApplicationService + + @MockkBean + lateinit var unhappyVeto: UnhappyVeto + + @MockkBean + lateinit var deliveryConfigRepository: DeliveryConfigRepository + + @MockkBean + lateinit var resourceStatusService: ResourceStatusService + + @MockkBean + lateinit var taskTrackingRepository: TaskTrackingRepository + + private val artifact = DebianArtifact( + name = "fnord", + deliveryConfigName = "fnord", + vmOptions = VirtualMachineOptions(baseOs = "bionic", regions = setOf("us-west-2")) + ) + + private val resource = resource( + kind = EC2_CLUSTER_V1_1.kind, + spec = ClusterSpec( + moniker = Moniker("fnord"), + artifactReference = "fnord", + locations = SubnetAwareLocations( + account = "test", + vpc = "vpc0", + subnet = "internal (vpc0)", + regions = setOf( + SubnetAwareRegionSpec( + name = "us-east-1", + availabilityZones = setOf() + ) + ) + ) + ) + ) + private val deliveryConfig = deliveryConfig(artifact = artifact, resources = setOf(resource)) + + @BeforeEach + fun setup() { + every { + keelRepository.getDeliveryConfigForApplication(any()) + } returns deliveryConfig + + every { + keelRepository.getAllVersionsForEnvironment(artifact, deliveryConfig, "test") + } returns listOf( + PublishedArtifactInEnvironment( + artifact.toArtifactVersion(version = "v1"), + status = PromotionStatus.CURRENT, + environmentName = "test" + ) + ) + } + + fun getQuery(path: String) = javaClass.getResource(path).readText().trimIndent() + + + @Test + fun basicTest() { + expectCatching { + dgsQueryExecutor.executeAndExtractJsonPath( + getQuery("/dgs/basicQuery.graphql"), + "data.md_application.environments[0].name", + mapOf("appName" to "fnord") + ) + }.isSuccess().isEqualTo("test") + } + + @Test + fun BasicTestQueryDeprecated() { + expectCatching { + dgsQueryExecutor.executeAndExtractJsonPath( + getQuery("/dgs/deprecatedBasicQuery.graphql"), + "data.application.environments[0].name", + mapOf("appName" to "fnord") + ) + }.isSuccess().isEqualTo("test") + } + + @Test + fun artifactVersionStatus() { + expectCatching { + dgsQueryExecutor.executeAndExtractJsonPath( + getQuery("/dgs/basicQuery.graphql"), + "data.md_application.environments[0].state.artifacts[0].versions[0].status", + mapOf("appName" to "fnord") + ) + }.isSuccess().isEqualTo("CURRENT") + } + + @Test + fun artifactVersionStatusDeprecated() { + expectCatching { + dgsQueryExecutor.executeAndExtractJsonPath( + getQuery("/dgs/deprecatedBasicQuery.graphql"), + "data.application.environments[0].state.artifacts[0].versions[0].status", + mapOf("appName" to "fnord") + ) + }.isSuccess().isEqualTo("CURRENT") + } +} diff --git a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/DgsTestConfig.kt b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/DgsTestConfig.kt index 2183df41a3..ccc18440ed 100644 --- a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/DgsTestConfig.kt +++ b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/dgs/DgsTestConfig.kt @@ -1,8 +1,24 @@ package com.netflix.spinnaker.keel.dgs +import com.netflix.spinnaker.keel.auth.AuthorizationSupport +import com.netflix.spinnaker.keel.bakery.BakeryMetadataService +import com.netflix.spinnaker.keel.clouddriver.CloudDriverService +import com.netflix.spinnaker.keel.persistence.KeelRepository +import com.ninjasquad.springmockk.MockkBean +import io.mockk.mockk +import org.springframework.context.annotation.Bean import org.springframework.context.annotation.ComponentScan import org.springframework.context.annotation.Configuration @Configuration @ComponentScan(basePackages = ["com.netflix.spinnaker.keel.dgs"]) -class DgsTestConfig +class DgsTestConfig { + + val cloudDriverService: CloudDriverService = mockk(relaxUnitFun = true) + val bakeryMetadataService: BakeryMetadataService = mockk(relaxUnitFun = true) + + @Bean + fun applicationFetcherSupport() = ApplicationFetcherSupport(cloudDriverService, bakeryMetadataService) + +} + diff --git a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/AdminControllerTests.kt b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/AdminControllerTests.kt index adfda30229..458f4c86c3 100644 --- a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/AdminControllerTests.kt +++ b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/AdminControllerTests.kt @@ -3,7 +3,7 @@ package com.netflix.spinnaker.keel.rest import com.netflix.spinnaker.keel.core.api.ApplicationSummary import com.netflix.spinnaker.keel.pause.ActuationPauser import com.netflix.spinnaker.keel.persistence.KeelRepository -import com.netflix.spinnaker.keel.services.AdminService +import com.netflix.spinnaker.keel.admin.AdminService import com.ninjasquad.springmockk.MockkBean import dev.minutest.junit.JUnit5Minutests import dev.minutest.rootContext diff --git a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/ExportControllerTests.kt b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/ExportControllerTests.kt index bd6f0cf9c8..998ba6a4c5 100644 --- a/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/ExportControllerTests.kt +++ b/keel-web/src/test/kotlin/com/netflix/spinnaker/keel/rest/ExportControllerTests.kt @@ -13,7 +13,8 @@ internal class ExportControllerTests : JUnit5Minutests { class Fixture { val subject = ExportController( handlers = listOf(DummyResourceHandlerV1, DummyResourceHandlerV2), - cloudDriverCache = mockk(relaxed = true) + cloudDriverCache = mockk(relaxed = true), + exportService = mockk(relaxed = true) ) } diff --git a/keel-web/src/test/resources/dgs/basicQuery.graphql b/keel-web/src/test/resources/dgs/basicQuery.graphql new file mode 100644 index 0000000000..776661f684 --- /dev/null +++ b/keel-web/src/test/resources/dgs/basicQuery.graphql @@ -0,0 +1,18 @@ +query fetchApplication($appName: String!) { + md_application(appName: $appName) { + name + environments { + name + state { + artifacts { + name + type + versions(statuses: [CURRENT]) { + status + version + } + } + } + } + } +} diff --git a/keel-web/src/test/resources/dgs/deprecatedBasicQuery.graphql b/keel-web/src/test/resources/dgs/deprecatedBasicQuery.graphql new file mode 100644 index 0000000000..457341426c --- /dev/null +++ b/keel-web/src/test/resources/dgs/deprecatedBasicQuery.graphql @@ -0,0 +1,18 @@ +query fetchMdApplication($appName: String!) { + application(appName: $appName) { + name + environments { + name + state { + artifacts { + name + type + versions(statuses: [CURRENT]) { + status + version + } + } + } + } + } +} diff --git a/settings.gradle b/settings.gradle index e35c898230..92e1e69953 100644 --- a/settings.gradle +++ b/settings.gradle @@ -42,6 +42,7 @@ include( "keel-lemur", "keel-network", "keel-notifications", + "keel-optics", "keel-orca", "keel-retrofit", "keel-retrofit-test-support",