From d8c250f96da005b21bf407333206db0ba06c641d Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Wed, 31 May 2023 11:19:37 +0300 Subject: [PATCH 001/152] DCA-2021: Check if product is run on proper hardware to be able to execute given concurrency --- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 4 +-- app/util/pre_run/environment_checker.py | 48 +++++++++++++++++++++++++ 5 files changed, 53 insertions(+), 5 deletions(-) diff --git a/app/bitbucket.yml b/app/bitbucket.yml index cd6c0880f..716ae4d50 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -24,7 +24,7 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py + - python util/pre_run/environment_checker.py bitbucket - python util/pre_run/check_for_updates.py - python util/pre_run/git_client_check.py - python util/data_preparation/bitbucket_prepare_data.py diff --git a/app/confluence.yml b/app/confluence.yml index 8c93c1deb..162670c48 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -40,7 +40,7 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py + - python util/pre_run/environment_checker.py confluence - python util/pre_run/check_for_updates.py - python util/data_preparation/confluence_prepare_data.py shutdown: diff --git a/app/jira.yml b/app/jira.yml index 9ba30ae3b..469284a36 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -40,7 +40,7 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py + - python util/pre_run/environment_checker.py jira - python util/pre_run/check_for_updates.py - python util/data_preparation/jira_prepare_data.py shutdown: diff --git a/app/jsm.yml b/app/jsm.yml index 45ea3c745..201f0b470 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -19,7 +19,7 @@ settings: ramp-up: 3m # time to spin all concurrent users total_actions_per_hour_agents: 5000 total_actions_per_hour_customers: 15000 - insight: False # Set True to enable Insight specific tests + insight: True # Set True to enable Insight specific tests WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 @@ -56,7 +56,7 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py + - python util/pre_run/environment_checker.py jsm - python util/pre_run/check_for_updates.py - python util/data_preparation/jsm_prepare_data.py shutdown: diff --git a/app/util/pre_run/environment_checker.py b/app/util/pre_run/environment_checker.py index 9d0c987aa..9a2326433 100644 --- a/app/util/pre_run/environment_checker.py +++ b/app/util/pre_run/environment_checker.py @@ -1,5 +1,18 @@ +import sys from sys import version_info +from util.analytics.analytics_utils import get_first_elem +from util.analytics.application_info import ApplicationSelector +from util.analytics.analytics import MIN_DEFAULTS +from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS + +APPS_SETTINGS = { + "JIRA": {"settings": JIRA_SETTINGS, 'processors': 6}, + "CONFLUENCE": {"settings": CONFLUENCE_SETTINGS, 'processors': 4}, + "BITBUCKET": {"settings": BITBUCKET_SETTINGS, 'processors': 4}, + "JSM": {"settings": JSM_SETTINGS, 'processors': 6} + } + SUPPORTED_PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11"] python_full_version = '.'.join(map(str, version_info[0:3])) @@ -13,3 +26,38 @@ from util.conf import TOOLKIT_VERSION # noqa E402 print("Data Center App Performance Toolkit version: {}".format(TOOLKIT_VERSION)) + + +def get_application_info(app_name): + app_name_upper = app_name.upper() + if app_name_upper in APPS_SETTINGS: + app = ApplicationSelector(app_name).application + deployment_type = app.deployment + processors = int(app.processors) + + app_settings = APPS_SETTINGS[app_name_upper]["settings"] + min_defaults = MIN_DEFAULTS.get(app_name.lower()) + + if deployment_type == "terraform": + check_config(processors, app_name_upper, app_settings, min_defaults) + + +def check_config(processors, app_name_upper, app_settings, min_defaults): + is_jsm_or_insight = app_name_upper in ["JSM", "INSIGHT"] + + if ((not is_jsm_or_insight and app_settings.concurrency == min_defaults['concurrency']) or + (is_jsm_or_insight and + app_settings.customers_concurrency == min_defaults['customer_concurrency'] and + app_settings.agents_concurrency == min_defaults['agent_concurrency'])): + if processors < APPS_SETTINGS[app_name_upper]['processors']: + raise SystemExit("You are using enterprise-scale load against a development environment. " + "Please check your instance configurations or decrease the load.") + + +def main(): + app_name = get_first_elem(sys.argv) + get_application_info(app_name) + + +if __name__ == "__main__": + main() From abc0dbb0a9ef7ed487f372c33ad143f00395d4b9 Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Wed, 31 May 2023 11:20:45 +0300 Subject: [PATCH 002/152] Fix of jsm.yml file , set insight to False --- app/jsm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jsm.yml b/app/jsm.yml index 201f0b470..e85fa9ec7 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -19,7 +19,7 @@ settings: ramp-up: 3m # time to spin all concurrent users total_actions_per_hour_agents: 5000 total_actions_per_hour_customers: 15000 - insight: True # Set True to enable Insight specific tests + insight: False # Set True to enable Insight specific tests WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 From 37e82b829cfd5e76d24bc21cdc1110b50fe6467a Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Wed, 31 May 2023 15:50:55 +0300 Subject: [PATCH 003/152] Moved logic to execution_compliance_check.py (renamed check_for_updates.py) --- app/bitbucket.yml | 4 +- app/confluence.yml | 4 +- app/jira.yml | 4 +- app/jsm.yml | 4 +- app/util/pre_run/check_for_updates.py | 21 ------ app/util/pre_run/environment_checker.py | 48 ------------- .../pre_run/execution_compliance_check.py | 72 +++++++++++++++++++ 7 files changed, 80 insertions(+), 77 deletions(-) delete mode 100644 app/util/pre_run/check_for_updates.py create mode 100644 app/util/pre_run/execution_compliance_check.py diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 716ae4d50..c30c2717c 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -24,8 +24,8 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py bitbucket - - python util/pre_run/check_for_updates.py + - python util/pre_run/environment_checker.py + - python util/pre_run/execution_compliance_check.py bitbucket - python util/pre_run/git_client_check.py - python util/data_preparation/bitbucket_prepare_data.py shutdown: diff --git a/app/confluence.yml b/app/confluence.yml index 162670c48..a2ae1e394 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -40,8 +40,8 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py confluence - - python util/pre_run/check_for_updates.py + - python util/pre_run/environment_checker.py + - python util/pre_run/execution_compliance_check.py confluence - python util/data_preparation/confluence_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/jira.yml b/app/jira.yml index 469284a36..f63729f98 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -40,8 +40,8 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py jira - - python util/pre_run/check_for_updates.py + - python util/pre_run/environment_checker.py + - python util/pre_run/execution_compliance_check.py jira - python util/data_preparation/jira_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/jsm.yml b/app/jsm.yml index e85fa9ec7..2d41b10b9 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -56,8 +56,8 @@ settings: services: - module: shellexec prepare: - - python util/pre_run/environment_checker.py jsm - - python util/pre_run/check_for_updates.py + - python util/pre_run/environment_checker.py + - python util/pre_run/execution_compliance_check.py jsm - python util/data_preparation/jsm_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/util/pre_run/check_for_updates.py b/app/util/pre_run/check_for_updates.py deleted file mode 100644 index 692a637e5..000000000 --- a/app/util/pre_run/check_for_updates.py +++ /dev/null @@ -1,21 +0,0 @@ -from util.common_util import get_latest_version, get_current_version, get_unsupported_version - -latest_version = get_latest_version() -current_version = get_current_version() -unsupported_version = get_unsupported_version() - -if latest_version is None: - print('Warning: failed to get the latest version') -elif unsupported_version is None: - print('Warning: failed to get the unsupported version') -elif current_version <= unsupported_version: - raise SystemExit(f"DCAPT version {current_version} is no longer supported. " - f"Consider an upgrade to the latest version: {latest_version}") -elif current_version < latest_version: - print(f"Warning: DCAPT version {current_version} is outdated. " - f"Consider upgrade to the latest version: {latest_version}.") -elif current_version == latest_version: - print(f"Info: DCAPT version {current_version} is the latest.") -else: - print(f"Info: DCAPT version {current_version} " - f"is ahead of the latest production version: {latest_version}.") diff --git a/app/util/pre_run/environment_checker.py b/app/util/pre_run/environment_checker.py index 9a2326433..9d0c987aa 100644 --- a/app/util/pre_run/environment_checker.py +++ b/app/util/pre_run/environment_checker.py @@ -1,18 +1,5 @@ -import sys from sys import version_info -from util.analytics.analytics_utils import get_first_elem -from util.analytics.application_info import ApplicationSelector -from util.analytics.analytics import MIN_DEFAULTS -from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS - -APPS_SETTINGS = { - "JIRA": {"settings": JIRA_SETTINGS, 'processors': 6}, - "CONFLUENCE": {"settings": CONFLUENCE_SETTINGS, 'processors': 4}, - "BITBUCKET": {"settings": BITBUCKET_SETTINGS, 'processors': 4}, - "JSM": {"settings": JSM_SETTINGS, 'processors': 6} - } - SUPPORTED_PYTHON_VERSIONS = ["3.8", "3.9", "3.10", "3.11"] python_full_version = '.'.join(map(str, version_info[0:3])) @@ -26,38 +13,3 @@ from util.conf import TOOLKIT_VERSION # noqa E402 print("Data Center App Performance Toolkit version: {}".format(TOOLKIT_VERSION)) - - -def get_application_info(app_name): - app_name_upper = app_name.upper() - if app_name_upper in APPS_SETTINGS: - app = ApplicationSelector(app_name).application - deployment_type = app.deployment - processors = int(app.processors) - - app_settings = APPS_SETTINGS[app_name_upper]["settings"] - min_defaults = MIN_DEFAULTS.get(app_name.lower()) - - if deployment_type == "terraform": - check_config(processors, app_name_upper, app_settings, min_defaults) - - -def check_config(processors, app_name_upper, app_settings, min_defaults): - is_jsm_or_insight = app_name_upper in ["JSM", "INSIGHT"] - - if ((not is_jsm_or_insight and app_settings.concurrency == min_defaults['concurrency']) or - (is_jsm_or_insight and - app_settings.customers_concurrency == min_defaults['customer_concurrency'] and - app_settings.agents_concurrency == min_defaults['agent_concurrency'])): - if processors < APPS_SETTINGS[app_name_upper]['processors']: - raise SystemExit("You are using enterprise-scale load against a development environment. " - "Please check your instance configurations or decrease the load.") - - -def main(): - app_name = get_first_elem(sys.argv) - get_application_info(app_name) - - -if __name__ == "__main__": - main() diff --git a/app/util/pre_run/execution_compliance_check.py b/app/util/pre_run/execution_compliance_check.py new file mode 100644 index 000000000..89b7b1560 --- /dev/null +++ b/app/util/pre_run/execution_compliance_check.py @@ -0,0 +1,72 @@ +import sys + +from util.common_util import get_latest_version, get_current_version, get_unsupported_version +from util.analytics.analytics_utils import get_first_elem +from util.analytics.application_info import ApplicationSelector +from util.analytics.analytics import MIN_DEFAULTS +from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS + +APPS_SETTINGS = { + "JIRA": {"settings": JIRA_SETTINGS, 'processors': 6}, + "CONFLUENCE": {"settings": CONFLUENCE_SETTINGS, 'processors': 4}, + "BITBUCKET": {"settings": BITBUCKET_SETTINGS, 'processors': 4}, + "JSM": {"settings": JSM_SETTINGS, 'processors': 6} +} + + +def check_dcapt_version(): + latest_version = get_latest_version() + current_version = get_current_version() + unsupported_version = get_unsupported_version() + + if latest_version is None: + print('Warning: failed to get the latest version') + elif unsupported_version is None: + print('Warning: failed to get the unsupported version') + elif current_version <= unsupported_version: + raise SystemExit(f"DCAPT version {current_version} is no longer supported. " + f"Consider an upgrade to the latest version: {latest_version}") + elif current_version < latest_version: + print(f"Warning: DCAPT version {current_version} is outdated. " + f"Consider upgrade to the latest version: {latest_version}.") + elif current_version == latest_version: + print(f"Info: DCAPT version {current_version} is the latest.") + else: + print(f"Info: DCAPT version {current_version} " + f"is ahead of the latest production version: {latest_version}.") + + +def validate_application_config(processors, app_name_upper, app_settings, min_defaults): + is_jsm_or_insight = app_name_upper in ["JSM", "INSIGHT"] + + if ((not is_jsm_or_insight and app_settings.concurrency == min_defaults['concurrency']) or + (is_jsm_or_insight and + app_settings.customers_concurrency == min_defaults['customer_concurrency'] and + app_settings.agents_concurrency == min_defaults['agent_concurrency'])): + if processors < APPS_SETTINGS[app_name_upper]['processors']: + raise SystemExit("You are using enterprise-scale load against a development environment. " + "Please check your instance configurations or decrease the load.") + + +def analyze_application_configuration(app_name): + app_name_upper = app_name.upper() + if app_name_upper in APPS_SETTINGS: + app = ApplicationSelector(app_name).application + deployment_type = app.deployment + processors = int(app.processors) + + app_settings = APPS_SETTINGS[app_name_upper]["settings"] + min_defaults = MIN_DEFAULTS.get(app_name.lower()) + + if deployment_type == "terraform": + validate_application_config(processors, app_name_upper, app_settings, min_defaults) + + +def main(): + check_dcapt_version() + app_name = get_first_elem(sys.argv) + analyze_application_configuration(app_name) + + +if __name__ == "__main__": + main() From c8ebe77735bbaebe8d238f2d25e1cdf243f3ab4a Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Tue, 6 Jun 2023 11:13:06 +0300 Subject: [PATCH 004/152] Fixes and improvements after code review --- app/bamboo.yml | 1 + app/crowd.yml | 2 +- .../pre_run/execution_compliance_check.py | 49 +++++++++++++------ 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index c239e1557..c9d006347 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -40,6 +40,7 @@ services: - module: shellexec prepare: - python util/pre_run/environment_checker.py + - python util/pre_run/execution_compliance_check.py bamboo - python util/data_preparation/bamboo_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/crowd.yml b/app/crowd.yml index 9ab60e7cc..b1b356dc9 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -37,7 +37,7 @@ services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/check_for_updates.py + - python util/pre_run/execution_compliance_check.py crowd - python util/data_preparation/crowd_prepare_data.py - python util/data_preparation/crowd_sync_check.py shutdown: diff --git a/app/util/pre_run/execution_compliance_check.py b/app/util/pre_run/execution_compliance_check.py index 89b7b1560..774d12e09 100644 --- a/app/util/pre_run/execution_compliance_check.py +++ b/app/util/pre_run/execution_compliance_check.py @@ -4,13 +4,16 @@ from util.analytics.analytics_utils import get_first_elem from util.analytics.application_info import ApplicationSelector from util.analytics.analytics import MIN_DEFAULTS -from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS +from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, BAMBOO_SETTINGS, \ + CROWD_SETTINGS APPS_SETTINGS = { - "JIRA": {"settings": JIRA_SETTINGS, 'processors': 6}, - "CONFLUENCE": {"settings": CONFLUENCE_SETTINGS, 'processors': 4}, - "BITBUCKET": {"settings": BITBUCKET_SETTINGS, 'processors': 4}, - "JSM": {"settings": JSM_SETTINGS, 'processors': 6} + "JIRA": {"settings": JIRA_SETTINGS}, + "CONFLUENCE": {"settings": CONFLUENCE_SETTINGS}, + "BITBUCKET": {"settings": BITBUCKET_SETTINGS}, + "JSM": {"settings": JSM_SETTINGS}, + "BAMBOO": {"settings": BAMBOO_SETTINGS}, + "CROWD": {"settings": CROWD_SETTINGS}, } @@ -37,35 +40,49 @@ def check_dcapt_version(): def validate_application_config(processors, app_name_upper, app_settings, min_defaults): - is_jsm_or_insight = app_name_upper in ["JSM", "INSIGHT"] + is_jsm = app_name_upper == "JSM" - if ((not is_jsm_or_insight and app_settings.concurrency == min_defaults['concurrency']) or - (is_jsm_or_insight and + if ((not is_jsm and app_settings.concurrency == min_defaults['concurrency']) or + (is_jsm and app_settings.customers_concurrency == min_defaults['customer_concurrency'] and app_settings.agents_concurrency == min_defaults['agent_concurrency'])): - if processors < APPS_SETTINGS[app_name_upper]['processors']: - raise SystemExit("You are using enterprise-scale load against a development environment. " - "Please check your instance configurations or decrease the load.") + if processors < 4: + concurrency = app_settings.concurrency if not is_jsm else (app_settings.customers_concurrency, + app_settings.agents_concurrency) + raise SystemExit( + f"You are trying to run an enterprise-scale load test with {concurrency} against the " + f"instance with a weaker configuration than recommended. " + f"Kindly consider decreasing the load in your {app_name_upper.lower()}.yml file or " + f"using or re-installing the appropriate environment.") def analyze_application_configuration(app_name): app_name_upper = app_name.upper() if app_name_upper in APPS_SETTINGS: app = ApplicationSelector(app_name).application - deployment_type = app.deployment - processors = int(app.processors) + processors = app.processors + + try: + processors = int(processors) + except ValueError: + print(f"Warning: You are using a server instance for running enterprise-scale load tests.") + return app_settings = APPS_SETTINGS[app_name_upper]["settings"] min_defaults = MIN_DEFAULTS.get(app_name.lower()) - if deployment_type == "terraform": - validate_application_config(processors, app_name_upper, app_settings, min_defaults) + validate_application_config(processors, app_name_upper, app_settings, min_defaults) def main(): check_dcapt_version() + if len(sys.argv) < 2: + raise SystemExit("Error: Please provide the application type as an argument.") app_name = get_first_elem(sys.argv) - analyze_application_configuration(app_name) + + # TODO: Add a check for CROWD configuration once the feature with processors is implemented in the product + if app_name.upper() != "CROWD": + analyze_application_configuration(app_name) if __name__ == "__main__": From 006f96da205c44e4c92a08f18cddc592d43b6aad Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Wed, 7 Jun 2023 10:25:10 +0300 Subject: [PATCH 005/152] typo fix --- app/util/pre_run/execution_compliance_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/pre_run/execution_compliance_check.py b/app/util/pre_run/execution_compliance_check.py index 774d12e09..91add6e27 100644 --- a/app/util/pre_run/execution_compliance_check.py +++ b/app/util/pre_run/execution_compliance_check.py @@ -65,7 +65,7 @@ def analyze_application_configuration(app_name): try: processors = int(processors) except ValueError: - print(f"Warning: You are using a server instance for running enterprise-scale load tests.") + print("Warning: You are using a server instance for running enterprise-scale load tests.") return app_settings = APPS_SETTINGS[app_name_upper]["settings"] From 21d022dbf83028f5c377e432ae010f5d315b84a6 Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Thu, 8 Jun 2023 09:36:42 +0300 Subject: [PATCH 006/152] small fix --- app/util/pre_run/execution_compliance_check.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/util/pre_run/execution_compliance_check.py b/app/util/pre_run/execution_compliance_check.py index 91add6e27..a9385f036 100644 --- a/app/util/pre_run/execution_compliance_check.py +++ b/app/util/pre_run/execution_compliance_check.py @@ -1,7 +1,6 @@ import sys from util.common_util import get_latest_version, get_current_version, get_unsupported_version -from util.analytics.analytics_utils import get_first_elem from util.analytics.application_info import ApplicationSelector from util.analytics.analytics import MIN_DEFAULTS from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, BAMBOO_SETTINGS, \ @@ -76,9 +75,10 @@ def analyze_application_configuration(app_name): def main(): check_dcapt_version() - if len(sys.argv) < 2: - raise SystemExit("Error: Please provide the application type as an argument.") - app_name = get_first_elem(sys.argv) + try: + app_name = sys.argv[1].lower() + except IndexError: + raise SystemExit("ERROR: execution_compliance_check.py expects application name as argument") # TODO: Add a check for CROWD configuration once the feature with processors is implemented in the product if app_name.upper() != "CROWD": From ae974cb0255491dda4527e5b0656a4051fdbb81d Mon Sep 17 00:00:00 2001 From: Oleksandr Popov Date: Wed, 14 Jun 2023 15:11:34 +0300 Subject: [PATCH 007/152] Small typo fix --- app/util/pre_run/execution_compliance_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/pre_run/execution_compliance_check.py b/app/util/pre_run/execution_compliance_check.py index a9385f036..5aafaa020 100644 --- a/app/util/pre_run/execution_compliance_check.py +++ b/app/util/pre_run/execution_compliance_check.py @@ -52,7 +52,7 @@ def validate_application_config(processors, app_name_upper, app_settings, min_de f"You are trying to run an enterprise-scale load test with {concurrency} against the " f"instance with a weaker configuration than recommended. " f"Kindly consider decreasing the load in your {app_name_upper.lower()}.yml file or " - f"using or re-installing the appropriate environment.") + f"using/re-installing the appropriate environment.") def analyze_application_configuration(app_name): From 86383ff490dd116444934e64e86743b698c05fb8 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 11 Oct 2023 17:48:11 +0200 Subject: [PATCH 008/152] Bump toolkit version to 8.0.0 --- app/util/conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/util/conf.py b/app/util/conf.py index 446cd862b..1d49a049c 100644 --- a/app/util/conf.py +++ b/app/util/conf.py @@ -2,8 +2,8 @@ from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML -TOOLKIT_VERSION = '7.6.0' -UNSUPPORTED_VERSION = '6.3.0' +TOOLKIT_VERSION = '8.0.0' +UNSUPPORTED_VERSION = '7.3.0' def read_yml_file(file): From 616342d5b702ad5832637e00706f1dbfcb3f3ce5 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 24 Oct 2023 11:31:09 +0300 Subject: [PATCH 009/152] refactor and fix environment checker --- app/bamboo.yml | 3 +- app/bitbucket.yml | 3 +- app/confluence.yml | 3 +- app/crowd.yml | 3 +- app/jira.yml | 3 +- app/jsm.yml | 3 +- app/util/analytics/application_info.py | 8 +++ app/util/api/confluence_clients.py | 12 ++-- app/util/conf.py | 1 + ...eck.py => environment_compliance_check.py} | 68 +++++++++++-------- 10 files changed, 67 insertions(+), 40 deletions(-) rename app/util/pre_run/{execution_compliance_check.py => environment_compliance_check.py} (50%) diff --git a/app/bamboo.yml b/app/bamboo.yml index c9d006347..08bcbf3ec 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -22,6 +22,7 @@ settings: LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor + environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. view_all_builds: 15 view_build_result: 15 view_build_configuration: 10 @@ -40,7 +41,7 @@ services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/execution_compliance_check.py bamboo + - python util/pre_run/environment_compliance_check.py bamboo - python util/data_preparation/bamboo_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/bitbucket.yml b/app/bitbucket.yml index c30c2717c..ea6a97758 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -21,11 +21,12 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/execution_compliance_check.py bitbucket + - python util/pre_run/environment_compliance_check.py bitbucket - python util/pre_run/git_client_check.py - python util/data_preparation/bitbucket_prepare_data.py shutdown: diff --git a/app/confluence.yml b/app/confluence.yml index a2ae1e394..c766eedee 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -21,6 +21,7 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. # Action percentage for JMeter and Locust load executors extended_metrics: False view_page: 33 @@ -41,7 +42,7 @@ services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/execution_compliance_check.py confluence + - python util/pre_run/environment_compliance_check.py confluence - python util/data_preparation/confluence_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/crowd.yml b/app/crowd.yml index b1b356dc9..862db6776 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -33,11 +33,12 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/execution_compliance_check.py crowd + - python util/pre_run/environment_compliance_check.py crowd - python util/data_preparation/crowd_prepare_data.py - python util/data_preparation/crowd_sync_check.py shutdown: diff --git a/app/jira.yml b/app/jira.yml index f63729f98..ab98d9992 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -21,6 +21,7 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. # Action percentage for Jmeter and Locust load executors create_issue: 4 search_jql: 11 @@ -41,7 +42,7 @@ services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/execution_compliance_check.py jira + - python util/pre_run/environment_compliance_check.py jira - python util/data_preparation/jira_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/jsm.yml b/app/jsm.yml index 2d41b10b9..25407a18a 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -24,6 +24,7 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 agent_view_request: 24 @@ -57,7 +58,7 @@ services: - module: shellexec prepare: - python util/pre_run/environment_checker.py - - python util/pre_run/execution_compliance_check.py jsm + - python util/pre_run/environment_compliance_check.py jsm - python util/data_preparation/jsm_prepare_data.py shutdown: - python util/post_run/jmeter_post_check.py diff --git a/app/util/analytics/application_info.py b/app/util/analytics/application_info.py index 0bca6bc82..9a4d0d6f3 100644 --- a/app/util/analytics/application_info.py +++ b/app/util/analytics/application_info.py @@ -81,6 +81,8 @@ def version(self): @property def nodes_count(self): + if self.client.get_confluence_nodes() == 'Server': + return 'Server' return len(self.client.get_nodes()) def __issues_count(self): @@ -100,6 +102,8 @@ def version(self): @property def nodes_count(self): + if self.client.get_confluence_nodes() == 'Server': + return 'Server' return len(self.client.get_confluence_nodes()) @property @@ -124,6 +128,8 @@ def version(self): @property def nodes_count(self): + if self.client.get_confluence_nodes() == 'Server': + return 'Server' return self.client.get_bitbucket_nodes_count() @property @@ -141,6 +147,8 @@ def version(self): @property def nodes_count(self): + if self.client.get_confluence_nodes() == 'Server': + return 'Server' return len(self.client.get_nodes()) def __issues_count(self): diff --git a/app/util/api/confluence_clients.py b/app/util/api/confluence_clients.py index 23695597d..749bf28dd 100644 --- a/app/util/api/confluence_clients.py +++ b/app/util/api/confluence_clients.py @@ -129,16 +129,18 @@ def is_remote_api_enabled(self): return response.status_code == 200 def get_confluence_nodes(self): - api_url = f"{self.host}/rest/atlassian-cluster-monitoring/cluster/nodes" - response = self.get(api_url, error_msg='Could not get Confluence nodes count via API', - expected_status_codes=[200, 500]) - if response.status_code == 500 and 'NonClusterMonitoring' in response.text: + response = self.get(f'{self.host}/rest/zdu/cluster', error_msg='Could not get Confluence nodes count via API', + expected_status_codes=[200, 403, 500]) + if response.status_code == 403 and 'clustered installation' in response.text: return 'Server' - nodes = [node['nodeId'] for node in response.json()] + nodes = [node['id'] for node in response.json()['nodes']] return nodes def get_available_processors(self): try: + nodes = self.get_confluence_nodes() + if nodes == 'Server': + return nodes node_id = self.get_confluence_nodes()[0] api_url = f'{self.host}/rest/atlassian-cluster-monitoring/cluster/suppliers/data/com.atlassian.cluster' \ f'.monitoring.cluster-monitoring-plugin/runtime-information/{node_id}' diff --git a/app/util/conf.py b/app/util/conf.py index db0c93623..e713a62b7 100644 --- a/app/util/conf.py +++ b/app/util/conf.py @@ -27,6 +27,7 @@ def __init__(self, config_yml): self.analytics_collector = self.get_property('allow_analytics') self.load_executor = self.get_property('load_executor') self.secure = self.get_property('secure') + self.environment_compliance_check = self.get_property('environment_compliance_check') @property def server_url(self): diff --git a/app/util/pre_run/execution_compliance_check.py b/app/util/pre_run/environment_compliance_check.py similarity index 50% rename from app/util/pre_run/execution_compliance_check.py rename to app/util/pre_run/environment_compliance_check.py index 5aafaa020..e3ace8231 100644 --- a/app/util/pre_run/execution_compliance_check.py +++ b/app/util/pre_run/environment_compliance_check.py @@ -7,12 +7,12 @@ CROWD_SETTINGS APPS_SETTINGS = { - "JIRA": {"settings": JIRA_SETTINGS}, - "CONFLUENCE": {"settings": CONFLUENCE_SETTINGS}, - "BITBUCKET": {"settings": BITBUCKET_SETTINGS}, - "JSM": {"settings": JSM_SETTINGS}, - "BAMBOO": {"settings": BAMBOO_SETTINGS}, - "CROWD": {"settings": CROWD_SETTINGS}, + "JIRA": JIRA_SETTINGS, + "CONFLUENCE": CONFLUENCE_SETTINGS, + "BITBUCKET": BITBUCKET_SETTINGS, + "JSM": JSM_SETTINGS, + "BAMBOO": BAMBOO_SETTINGS, + "CROWD": CROWD_SETTINGS, } @@ -40,37 +40,40 @@ def check_dcapt_version(): def validate_application_config(processors, app_name_upper, app_settings, min_defaults): is_jsm = app_name_upper == "JSM" - - if ((not is_jsm and app_settings.concurrency == min_defaults['concurrency']) or - (is_jsm and - app_settings.customers_concurrency == min_defaults['customer_concurrency'] and - app_settings.agents_concurrency == min_defaults['agent_concurrency'])): + if not is_jsm: + current_concurrency = app_settings.concurrency + else: + current_concurrency = (app_settings.customers_concurrency, app_settings.agents_concurrency) + if ( + (not is_jsm and current_concurrency == min_defaults['concurrency']) or + (is_jsm and + current_concurrency == (min_defaults['customer_concurrency'], min_defaults['agent_concurrency'])) + ): + # If the number of processors is less than 4, raise a SystemExit with a warning message. if processors < 4: - concurrency = app_settings.concurrency if not is_jsm else (app_settings.customers_concurrency, - app_settings.agents_concurrency) raise SystemExit( - f"You are trying to run an enterprise-scale load test with {concurrency} against the " - f"instance with a weaker configuration than recommended. " - f"Kindly consider decreasing the load in your {app_name_upper.lower()}.yml file or " - f"using/re-installing the appropriate environment.") + f"ERROR: You are trying to run an enterprise-scale load test with concurrency: {current_concurrency} against the " + f"instance with a weaker configuration than recommended.\n" + f"Kindly consider decreasing the `concurrency`/`total_actions_per_hour` in your {app_name_upper.lower()}.yml file if this development environment.\n" + f"For enterprise-scale load make sure environment has a compliant configuration.\n" + f"To skip environment compliance check set `environment_compliance_check` variable to False in your {app_name_upper.lower()}.yml file.") def analyze_application_configuration(app_name): app_name_upper = app_name.upper() - if app_name_upper in APPS_SETTINGS: - app = ApplicationSelector(app_name).application - processors = app.processors + app = ApplicationSelector(app_name).application + processors = app.processors - try: - processors = int(processors) - except ValueError: - print("Warning: You are using a server instance for running enterprise-scale load tests.") - return + try: + processors = int(processors) + except ValueError: + print("Warning: You are using a server instance for running enterprise-scale load tests.") + return - app_settings = APPS_SETTINGS[app_name_upper]["settings"] - min_defaults = MIN_DEFAULTS.get(app_name.lower()) + app_settings = APPS_SETTINGS[app_name_upper] + min_defaults = MIN_DEFAULTS.get(app_name.lower()) + validate_application_config(processors, app_name_upper, app_settings, min_defaults) - validate_application_config(processors, app_name_upper, app_settings, min_defaults) def main(): @@ -82,7 +85,14 @@ def main(): # TODO: Add a check for CROWD configuration once the feature with processors is implemented in the product if app_name.upper() != "CROWD": - analyze_application_configuration(app_name) + if app_name.upper() in APPS_SETTINGS: + app_settings = APPS_SETTINGS[app_name.upper()] + if app_settings.environment_compliance_check: + analyze_application_configuration(app_name) + else: + raise SystemExit(f'ERROR: Unknown application: {app_name.upper()}. ' + f'Supported applications are {list(APPS_SETTINGS.keys())}') + if __name__ == "__main__": From 6874524bbb06cb221325772e65b7e22f208a78d9 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Thu, 26 Oct 2023 17:24:32 +0200 Subject: [PATCH 010/152] allign comments --- app/bamboo.yml | 4 ++-- app/bitbucket.yml | 20 ++++++++++---------- app/confluence.yml | 22 +++++++++++----------- app/crowd.yml | 28 ++++++++++++++-------------- app/jira.yml | 22 +++++++++++----------- app/jsm.yml | 24 ++++++++++++------------ 6 files changed, 60 insertions(+), 60 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 08bcbf3ec..341214857 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -3,7 +3,7 @@ settings: artifacts-dir: results/bamboo/%Y-%m-%d_%H-%M-%S aggregator: consolidator verbose: false - check-updates: false # disable bzt check for updates + check-updates: false # disable bzt check for updates env: application_hostname: test-bamboo.atlassian.com # Bamboo DC hostname without protocol and port e.g. test-bamboo.atlassian.com or localhost application_protocol: http # http or https @@ -21,8 +21,8 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. # Action percentage for JMeter load executor - environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. view_all_builds: 15 view_build_result: 15 view_build_configuration: 10 diff --git a/app/bitbucket.yml b/app/bitbucket.yml index ea6a97758..aa62c3ef3 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -3,25 +3,25 @@ settings: artifacts-dir: results/bitbucket/%Y-%m-%d_%H-%M-%S aggregator: consolidator verbose: false - check-updates: false # disable bzt check for updates + check-updates: false # disable bzt check for updates env: application_hostname: test_bitbucket_instance.atlassian.com # Bitbucket DC hostname without protocol and port e.g. test-bitbucket.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 7990 etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: /bitbucket # e.g. /bitbucket for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. Leave this value blank for url without postfix. + application_protocol: http # http or https + application_port: 80 # 80, 443, 8080, 7990 etc + secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: /bitbucket # e.g. /bitbucket for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin - load_executor: jmeter # only jmeter executor is supported - concurrency: 20 # number of concurrent virtual users for jmeter scenario + load_executor: jmeter # only jmeter executor is supported + concurrency: 20 # number of concurrent virtual users for jmeter scenario test_duration: 50m - ramp-up: 10m # time to spin all concurrent users + ramp-up: 10m # time to spin all concurrent users total_actions_per_hour: 32700 WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. - environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. services: - module: shellexec prepare: diff --git a/app/confluence.yml b/app/confluence.yml index c766eedee..6c05945a5 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -3,27 +3,27 @@ settings: artifacts-dir: results/confluence/%Y-%m-%d_%H-%M-%S aggregator: consolidator verbose: false - check-updates: false # disable bzt check for updates + check-updates: false # disable bzt check for updates env: application_hostname: test_confluence_instance.atlassian.com # Confluence DC hostname without protocol, port and postfix e.g. test-confluence.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 1990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: /confluence # e.g. /confluence for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. Leave this value blank for url without postfix. + application_protocol: http # http or https + application_port: 80 # 80, 443, 8080, 1990, etc + secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: /confluence # e.g. /confluence for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin - load_executor: jmeter # jmeter and locust are supported. jmeter by default. - concurrency: 200 # number of concurrent virtual users for jmeter or locust scenario + load_executor: jmeter # jmeter and locust are supported. jmeter by default. + concurrency: 200 # number of concurrent virtual users for jmeter or locust scenario test_duration: 45m - ramp-up: 5m # time to spin all concurrent users + ramp-up: 5m # time to spin all concurrent users total_actions_per_hour: 20000 WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. - environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. - # Action percentage for JMeter and Locust load executors + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. extended_metrics: False + # Action percentage for JMeter and Locust load executors view_page: 33 view_dashboard: 9 view_blog: 13 diff --git a/app/crowd.yml b/app/crowd.yml index 862db6776..3fe46c702 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -3,37 +3,37 @@ settings: artifacts-dir: results/crowd/%Y-%m-%d_%H-%M-%S aggregator: consolidator verbose: false - check-updates: false # disable bzt check for updates + check-updates: false # disable bzt check for updates env: application_hostname: test_crowd_instance.atlassian.com # Crowd DC hostname without protocol and port e.g. test-crowd.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 4990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /crowd in case of url like http://localhost:4990/crowd + application_protocol: http # http or https + application_port: 80 # 80, 443, 8080, 4990, etc + secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # e.g. /crowd in case of url like http://localhost:4990/crowd admin_login: admin admin_password: admin application_name: crowd application_password: 1111 load_executor: jmeter - concurrency: 1000 # number of concurrent threads to authenticate random users + concurrency: 1000 # number of concurrent threads to authenticate random users test_duration: 45m # 1 node scenario parameters - ramp-up: 20s # time to spin all concurrent threads - total_actions_per_hour: 180000 # number of total JMeter actions per hour + ramp-up: 20s # time to spin all concurrent threads + total_actions_per_hour: 180000 # number of total JMeter actions per hour # 2 nodes scenario parameters - # ramp-up: 10s # time to spin all concurrent threads - # total_actions_per_hour: 360000 # number of total JMeter actions per hour + # ramp-up: 10s # time to spin all concurrent threads + # total_actions_per_hour: 360000 # number of total JMeter actions per hour # 4 nodes scenario parameters - # ramp-up: 5s # time to spin all concurrent threads - # total_actions_per_hour: 720000 # number of total JMeter actions per hour + # ramp-up: 5s # time to spin all concurrent threads + # total_actions_per_hour: 720000 # number of total JMeter actions per hour JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. - environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. services: - module: shellexec prepare: diff --git a/app/jira.yml b/app/jira.yml index ab98d9992..ad6493f59 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -3,25 +3,25 @@ settings: artifacts-dir: results/jira/%Y-%m-%d_%H-%M-%S aggregator: consolidator verbose: false - check-updates: false # disable bzt check for updates + check-updates: false # disable bzt check for updates env: application_hostname: test_jira_instance.atlassian.com # Jira DC hostname without protocol and port e.g. test-jira.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_protocol: http # http or https + application_port: 80 # 80, 443, 8080, 2990, etc + secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin - load_executor: jmeter # jmeter and locust are supported. jmeter by default. - concurrency: 200 # number of concurrent virtual users for jmeter or locust scenario + load_executor: jmeter # jmeter and locust are supported. jmeter by default. + concurrency: 200 # number of concurrent virtual users for jmeter or locust scenario test_duration: 45m - ramp-up: 3m # time to spin all concurrent users - total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour + ramp-up: 3m # time to spin all concurrent users + total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. - environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. # Action percentage for Jmeter and Locust load executors create_issue: 4 search_jql: 11 diff --git a/app/jsm.yml b/app/jsm.yml index 25407a18a..2d607c291 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -3,28 +3,28 @@ settings: artifacts-dir: results/jsm/%Y-%m-%d_%H-%M-%S aggregator: consolidator verbose: false - check-updates: false # disable bzt check for updates + check-updates: false # disable bzt check for updates env: application_hostname: test_jsm_instance.atlassian.com # Jira Service Desk DC hostname without protocol and port e.g. test-jsm.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_protocol: http # http or https + application_port: 80 # 80, 443, 8080, 2990, etc + secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin - load_executor: jmeter # jmeter and locust are supported. jmeter by default. - concurrency_agents: 50 # number of concurrent virtual agent users for jmeter or locust scenario - concurrency_customers: 150 # number of concurrent virtual customer users for jmeter or locust scenario + load_executor: jmeter # jmeter and locust are supported. jmeter by default. + concurrency_agents: 50 # number of concurrent virtual agent users for jmeter or locust scenario + concurrency_customers: 150 # number of concurrent virtual customer users for jmeter or locust scenario test_duration: 45m - ramp-up: 3m # time to spin all concurrent users + ramp-up: 3m # time to spin all concurrent users total_actions_per_hour_agents: 5000 total_actions_per_hour_customers: 15000 - insight: False # Set True to enable Insight specific tests + insight: False # Set True to enable Insight specific tests WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. - environment_compliance_check: True # Pre-test application environment validation. "False" to skip it. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 agent_view_request: 24 From d4523ec8cdf0c43a5b072c758f218f1b0c0b9930 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Fri, 27 Oct 2023 16:48:59 +0200 Subject: [PATCH 011/152] -t flag explanation and java 17 update --- README.md | 10 +++++----- app/util/k8s/README.MD | 5 ++++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index a3761d69f..da2d449fe 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ In case of technical questions, issues or problems with DC Apps Performance Tool #### Dependencies * Python 3.8, 3.9, 3.10 or 3.11 and pip -* JDK 11 +* JDK 17 * Google Chrome web browser * Git client (only for Bitbucket DC) @@ -41,7 +41,7 @@ If a first part of ChromeDriver version does not match with a first part of your Make sure that you have: * [Python](https://www.python.org/downloads/) (see [dependencies](#dependencies) section for supported versions) * pip -* [JDK 11](https://www.oracle.com/java/technologies/downloads/#java11) installed +* [JDK 17](https://www.oracle.com/java/technologies/downloads/#java17) installed * XCode Command Line Tools * Google Chrome web browser ``` @@ -81,7 +81,7 @@ pip install -r requirements.txt Make sure that you have: * [Python](https://www.python.org/downloads/) (see [dependencies](#dependencies) section for supported versions) * pip -* [JDK 11](https://www.oracle.com/java/technologies/downloads/#java11) installed +* [JDK 17](https://www.oracle.com/java/technologies/downloads/#java17) installed * Python developer package (e.g. `python3.9-dev` package for Python3.9) * Google Chrome web browser ``` @@ -99,7 +99,7 @@ We recommend using [virtualenv](https://virtualenv.pypa.io/en/latest/) for Tauru JDK setup (if missing): ``` sudo apt-get update -sudo apt-get install -y openjdk-11-jre-headless +sudo apt-get install -y openjdk-17-jre-headless ``` Chrome setup (if missing): ``` @@ -119,7 +119,7 @@ pip install -r requirements.txt ### Windows setup #### Installing Taurus manually -Make sure you have [Python](https://www.python.org/downloads/) (see [dependencies](#dependencies) section for supported versions), pip, and [JDK 11](https://www.oracle.com/java/technologies/downloads/#java11) installed: +Make sure you have [Python](https://www.python.org/downloads/) (see [dependencies](#dependencies) section for supported versions), pip, and [JDK 17](https://www.oracle.com/java/technologies/downloads/#java17) installed: ``` python --version or python3 --version pip --version diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index d612862e4..a2b49cddb 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -17,7 +17,7 @@ docker run --pull=always --env-file aws_envs \ docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars +-it atlassianlabs/terraform ./uninstall.sh -c config.tfvars ``` # Enterprise-scale environment @@ -35,6 +35,9 @@ docker run --pull=always --env-file aws_envs \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` ## Terminate enterprise-scale environment +Option `-t` deletes Terraform state files for all installed environment in the same region using the same AWS account. + +If state files are needed, e.g. there are other running clusters for other product, do not use `-t` flag in below command. ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ From 708788614c47f37101780ee9110edfc2d47a28ec Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 31 Oct 2023 10:56:35 +0200 Subject: [PATCH 012/152] fix comments --- app/util/analytics/application_info.py | 9 ++++----- app/util/api/confluence_clients.py | 2 +- app/util/pre_run/environment_compliance_check.py | 9 ++++----- 3 files changed, 9 insertions(+), 11 deletions(-) diff --git a/app/util/analytics/application_info.py b/app/util/analytics/application_info.py index 9a4d0d6f3..13de26957 100644 --- a/app/util/analytics/application_info.py +++ b/app/util/analytics/application_info.py @@ -81,7 +81,7 @@ def version(self): @property def nodes_count(self): - if self.client.get_confluence_nodes() == 'Server': + if self.client.get_nodes() == 'Server': return 'Server' return len(self.client.get_nodes()) @@ -128,9 +128,8 @@ def version(self): @property def nodes_count(self): - if self.client.get_confluence_nodes() == 'Server': - return 'Server' - return self.client.get_bitbucket_nodes_count() + nodes_count = self.client.get_bitbucket_nodes_count() + return nodes_count @property def dataset_information(self): @@ -147,7 +146,7 @@ def version(self): @property def nodes_count(self): - if self.client.get_confluence_nodes() == 'Server': + if self.client.get_nodes() == 'Server': return 'Server' return len(self.client.get_nodes()) diff --git a/app/util/api/confluence_clients.py b/app/util/api/confluence_clients.py index 749bf28dd..57db4e48c 100644 --- a/app/util/api/confluence_clients.py +++ b/app/util/api/confluence_clients.py @@ -140,7 +140,7 @@ def get_available_processors(self): try: nodes = self.get_confluence_nodes() if nodes == 'Server': - return nodes + return 'Server' node_id = self.get_confluence_nodes()[0] api_url = f'{self.host}/rest/atlassian-cluster-monitoring/cluster/suppliers/data/com.atlassian.cluster' \ f'.monitoring.cluster-monitoring-plugin/runtime-information/{node_id}' diff --git a/app/util/pre_run/environment_compliance_check.py b/app/util/pre_run/environment_compliance_check.py index e3ace8231..1f74dde24 100644 --- a/app/util/pre_run/environment_compliance_check.py +++ b/app/util/pre_run/environment_compliance_check.py @@ -40,10 +40,11 @@ def check_dcapt_version(): def validate_application_config(processors, app_name_upper, app_settings, min_defaults): is_jsm = app_name_upper == "JSM" - if not is_jsm: - current_concurrency = app_settings.concurrency - else: + if is_jsm: current_concurrency = (app_settings.customers_concurrency, app_settings.agents_concurrency) + else: + current_concurrency = app_settings.concurrency + if ( (not is_jsm and current_concurrency == min_defaults['concurrency']) or (is_jsm and @@ -75,7 +76,6 @@ def analyze_application_configuration(app_name): validate_application_config(processors, app_name_upper, app_settings, min_defaults) - def main(): check_dcapt_version() try: @@ -94,6 +94,5 @@ def main(): f'Supported applications are {list(APPS_SETTINGS.keys())}') - if __name__ == "__main__": main() From 498f224226fbbb376f7a35b9ba98933a57de1c1d Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 31 Oct 2023 11:08:54 +0200 Subject: [PATCH 013/152] added crowd --- app/crowd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/crowd.yml b/app/crowd.yml index 9490c6e8e..d36f529dc 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 4990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /crowd in case of url like http://localhost:4990/crowd + application_postfix: /crowd # e.g. /crowd in case of url like http://localhost:4990/crowd admin_login: admin admin_password: admin application_name: crowd From 11c358496b444bf7eef77c881009cb438c356bf8 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 31 Oct 2023 11:10:41 +0200 Subject: [PATCH 014/152] added more or equal --- app/util/pre_run/environment_compliance_check.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/util/pre_run/environment_compliance_check.py b/app/util/pre_run/environment_compliance_check.py index 1f74dde24..c3153ac29 100644 --- a/app/util/pre_run/environment_compliance_check.py +++ b/app/util/pre_run/environment_compliance_check.py @@ -46,9 +46,9 @@ def validate_application_config(processors, app_name_upper, app_settings, min_de current_concurrency = app_settings.concurrency if ( - (not is_jsm and current_concurrency == min_defaults['concurrency']) or + (not is_jsm and current_concurrency >= min_defaults['concurrency']) or (is_jsm and - current_concurrency == (min_defaults['customer_concurrency'], min_defaults['agent_concurrency'])) + current_concurrency >= (min_defaults['customer_concurrency'], min_defaults['agent_concurrency'])) ): # If the number of processors is less than 4, raise a SystemExit with a warning message. if processors < 4: From c04eaba3971d825c593c016172e3f1956a19efd1 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 1 Nov 2023 06:01:01 +0100 Subject: [PATCH 015/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bamboo.yml --- app/bamboo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 251343ba9..e2036401f 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -123,7 +123,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "118.0.5993.70" # Supports Chrome version 118. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From f49a9617dd9c2a5c57feb1f9f54a287518cb1f16 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 1 Nov 2023 06:01:02 +0100 Subject: [PATCH 016/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/confluence.yml --- app/confluence.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/confluence.yml b/app/confluence.yml index ab2f7f73b..df696d8b3 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -117,7 +117,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "118.0.5993.70" # Supports Chrome version 118. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From f2bf35c46a5545fc51eb6aa25976287ac5ac6816 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 1 Nov 2023 06:01:02 +0100 Subject: [PATCH 017/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bitbucket.yml --- app/bitbucket.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 6d530420a..16a305775 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -90,7 +90,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "118.0.5993.70" # Supports Chrome version 118. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 595d9aef662f35b2a5969a4a379c7a6c419ac192 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 1 Nov 2023 06:01:02 +0100 Subject: [PATCH 018/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jira.yml --- app/jira.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jira.yml b/app/jira.yml index 263184401..edca6d5f6 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -118,7 +118,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "118.0.5993.70" # Supports Chrome version 118. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 8c035e85099eb325f616905b11da5857a2bcb1c5 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 1 Nov 2023 06:01:03 +0100 Subject: [PATCH 019/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jsm.yml --- app/jsm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jsm.yml b/app/jsm.yml index 0a99988d3..5446490bf 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -170,7 +170,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "118.0.5993.70" # Supports Chrome version 118. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 66b54b8e4bbf5bee2549fcfd34508bf65ffa1105 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Mon, 6 Nov 2023 13:13:44 +0100 Subject: [PATCH 020/152] DCA-2133 Fix confluence prepare data 500 error --- app/util/data_preparation/confluence_prepare_data.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/app/util/data_preparation/confluence_prepare_data.py b/app/util/data_preparation/confluence_prepare_data.py index f891727c2..02de03902 100644 --- a/app/util/data_preparation/confluence_prepare_data.py +++ b/app/util/data_preparation/confluence_prepare_data.py @@ -44,8 +44,10 @@ def __create_data_set(rest_client, rpc_client): perf_user_api = ConfluenceRestClient(CONFLUENCE_SETTINGS.server_url, perf_user['username'], DEFAULT_USER_PASSWORD) pool = ThreadPool(processes=2) - async_pages = pool.apply_async(__get_pages, (perf_user_api, 5000)) - async_blogs = pool.apply_async(__get_blogs, (perf_user_api, 5000)) + + dcapt_dataset = bool(perf_user_api.search(limit=1, cql='type=page and text ~ PAGE_1')) + async_pages = pool.apply_async(__get_pages, (perf_user_api, 5000, dcapt_dataset)) + async_blogs = pool.apply_async(__get_blogs, (perf_user_api, 5000, dcapt_dataset)) async_pages.wait() async_blogs.wait() @@ -88,11 +90,10 @@ def __get_users(confluence_api, rpc_api, count): @print_timing('Getting pages') -def __get_pages(confluence_api, count): +def __get_pages(confluence_api, count, dcapt_dataset): pages_templates = [i for sublist in DATASET_PAGES_TEMPLATES.values() for i in sublist] pages_templates_count = len(pages_templates) pages_per_template = int(count / pages_templates_count) if count > pages_templates_count else 1 - dcapt_dataset = bool(confluence_api.search(limit=100, cql='type=page and text ~ PAGE_1')) total_pages = [] if dcapt_dataset: @@ -137,11 +138,10 @@ def __get_custom_pages(confluence_api, count, cql): @print_timing('Getting blogs') -def __get_blogs(confluence_api, count): +def __get_blogs(confluence_api, count, dcapt_dataset): blogs_templates = [i for sublist in DATASET_BLOGS_TEMPLATES.values() for i in sublist] blogs_templates_count = len(blogs_templates) blogs_per_template = int(count / blogs_templates_count) if count > blogs_templates_count else 1 - dcapt_dataset = bool(confluence_api.search(limit=100, cql='type=page and text ~ PAGE_1')) total_blogs = [] if dcapt_dataset: From b73a9811c887ed9d1d11f7a43e325e04838b7c95 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 6 Nov 2023 15:04:11 +0200 Subject: [PATCH 021/152] docs/ec2-limit-increase --- ...pps-performance-toolkit-user-guide-bamboo.md | 17 +++-------------- ...-performance-toolkit-user-guide-bitbucket.md | 17 +++-------------- ...performance-toolkit-user-guide-confluence.md | 17 +++-------------- ...apps-performance-toolkit-user-guide-crowd.md | 17 +++-------------- ...-apps-performance-toolkit-user-guide-jira.md | 17 +++-------------- ...c-apps-performance-toolkit-user-guide-jsm.md | 17 +++-------------- 6 files changed, 18 insertions(+), 84 deletions(-) diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 3ef7f1a7c..9cc8f4bf0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -27,20 +27,9 @@ test results for the Marketplace approval process. Preferably, use the below rec ## 1. Set up an enterprise-scale environment Bamboo Data Center on k8s #### EC2 CPU Limit -The installation of Bamboo requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. - -The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): - -| Parameter | Value | -|-----------------------|---------------------------------------------------------------------------------| -| Limit type | EC2 Instances | -| Severity | Urgent business impacting question | -| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | -| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | -| Limit | Instance Limit | -| New limit value | _The needed limit of CPU Cores_ | -| Case description | _Give a small description of your case_ | -Select the **Contact Option** and click **Submit** button. +The installation of Bamboo requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. #### Setup Bamboo Data Center with an enterprise-scale dataset on k8s diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index e68aeabd8..e31b8f202 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -181,20 +181,9 @@ Follow [Terminate development environment](https://github.com/atlassian/dc-app-p {{% /warning %}} #### EC2 CPU Limit -The installation of 4-nodes Bitbucket requires **48** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. - -The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): - -| Parameter | Value | -|-----------------------|---------------------------------------------------------------------------------| -| Limit type | EC2 Instances | -| Severity | Urgent business impacting question | -| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | -| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | -| Limit | Instance Limit | -| New limit value | _The needed limit of CPU Cores_ | -| Case description | _Give a small description of your case_ | -Select the **Contact Option** and click **Submit** button. +The installation of 4-nodes Bitbucket requires **48** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 52e86610e..eb98a43de 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -260,20 +260,9 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Confluence Data Center enterprise-scale environment with "large" dataset #### EC2 CPU Limit -The installation of 4-nodes Confluence requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. - -The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): - -| Parameter | Value | -|-----------------------|---------------------------------------------------------------------------------| -| Limit type | EC2 Instances | -| Severity | Urgent business impacting question | -| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | -| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | -| Limit | Instance Limit | -| New limit value | _The needed limit of CPU Cores_ | -| Case description | _Give a small description of your case_ | -Select the **Contact Option** and click **Submit** button. +The installation of 4-nodes Confluence requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 0f13b4f80..c2372fcc7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -24,20 +24,9 @@ In this document, we cover the use of the Data Center App Performance Toolkit on ## 1. Set up an enterprise-scale environment Crowd Data Center on k8s #### EC2 CPU Limit -The installation of 4-nodes Crowd requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. - -The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): - -| Parameter | Value | -|-----------------------|---------------------------------------------------------------------------------| -| Limit type | EC2 Instances | -| Severity | Urgent business impacting question | -| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | -| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | -| Limit | Instance Limit | -| New limit value | _The needed limit of CPU Cores_ | -| Case description | _Give a small description of your case_ | -Select the **Contact Option** and click **Submit** button. +The installation of 4-nodes Crowd requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. #### Setup Crowd Data Center with an enterprise-scale dataset on k8s diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 446c05a71..866e1534f 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -274,20 +274,9 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Jira Data Center enterprise-scale environment with "large" dataset #### EC2 CPU Limit -The installation of 4-nodes Jira requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. - -The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): - -| Parameter | Value | -|-----------------------|---------------------------------------------------------------------------------| -| Limit type | EC2 Instances | -| Severity | Urgent business impacting question | -| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | -| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | -| Limit | Instance Limit | -| New limit value | _The needed limit of CPU Cores_ | -| Case description | _Give a small description of your case_ | -Select the **Contact Option** and click **Submit** button. +The installation of 4-nodes Jira requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index c3561a77e..25080f2fe 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -310,20 +310,9 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Jira Service Management Data Center enterprise-scale environment with "large" dataset #### EC2 CPU Limit -The installation of 4-nodes Jira Service Management requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. - -The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): - -| Parameter | Value | -|-----------------------|---------------------------------------------------------------------------------| -| Limit type | EC2 Instances | -| Severity | Urgent business impacting question | -| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | -| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | -| Limit | Instance Limit | -| New limit value | _The needed limit of CPU Cores_ | -| Case description | _Give a small description of your case_ | -Select the **Contact Option** and click **Submit** button. +The installation of 4-nodes Jira Service Management requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. From b4ae874155ca79b6f1a5dc8595338e311f43b54e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 7 Nov 2023 12:17:46 +0200 Subject: [PATCH 022/152] test comment snapshot id --- app/util/k8s/dcapt-small.tfvars | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index aa4069387..3a62b6178 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -93,7 +93,7 @@ jira_version_tag = "9.4.10" # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # # Jira 9.4.10 DCAPT small dataset EBS snapshot - jira_shared_home_snapshot_id = "snap-019fd367ec397b1f3" +# jira_shared_home_snapshot_id = "snap-019fd367ec397b1f3" # Jira 8.20.26 DCAPT small dataset EBS snapshot # jira_shared_home_snapshot_id = "snap-0592bc60820536611" # JSM 5.4.10 DCAPT small dataset EBS snapshot @@ -106,7 +106,7 @@ jira_version_tag = "9.4.10" # Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. # # Jira 9.4.10 DCAPT small dataset RDS snapshot - jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-10" +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-10" # Jira 8.20.26 DCAPT small dataset RDS snapshot # jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-26" # JSM 5.4.10 DCAPT small dataset RDS snapshot From a2524ead42cbb25db343750d59c507fc6f5c6ffd Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 8 Nov 2023 11:41:15 +0200 Subject: [PATCH 023/152] test comment snapshot id --- app/util/k8s/dcapt-small.tfvars | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 3a62b6178..3f5b676c9 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -12,13 +12,13 @@ # Unique name of your small-scale test cluster. # This value can not be altered after the configuration has been applied. # ! REQUIRED ! -environment_name = "dcapt-product-small" +environment_name = "dcapt-sm-smor-3" # Supported products: jira, confluence and bitbucket # e.g.: products = ["confluence"] # ! REQUIRED ! -products = ["product-to-deploy"] - +products = ["jira"] +jira_dataset_size = "small" # Default AWS region for DCAPT snapshots. region = "us-east-2" @@ -28,7 +28,13 @@ region = "us-east-2" whitelist_cidr = ["0.0.0.0/0"] # (optional) Custom tags for all resources to be created. Please add all tags you need to propagate among the resources. -resource_tags = {Name: "dcapt-testing-small"} +resource_tags = { + persist_days : "0.4", + business_unit : "Engineering-Enterprise DC", + creator : "ci", + resource_owner : "ometelytsia", + service_name : "dcapt" +} # Instance types that is preferred for EKS node group. # Confluence, Jira - use default value @@ -75,7 +81,7 @@ jira_image_repository = "atlassian/jira-software" # If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. # Please make sure valid confluence license is used without spaces and new line symbols. # ! REQUIRED ! -jira_license = "jira-license" +jira_license = "AAACCA0ODAoPeNqNk1Fv2jAQx9/zKSztZUNySIJaBlKk0cTrskKCksA0rXswyQGugh3ZDhvffiYBtR0dmpQX2/e/+93/Lu++QYlisUcDB7nu2LkZ37jofpYjz/E8ayMB+FbUNUh7ygrgCkjJNBPcJ3FO0nkaZcSKm90KZLJeKJDKx64VCK5poWO6A58qTdX2YD7+ieqKKsUotwuxs56YpPaFdt7IYksVhFSD7znuCDtD7H60TuXzQw1t3iCZzUgaRJPp+Yn8rpk8tLq56zlfzhhkRll1hSMDuQcZhf7dLFri1Ln3cDocTXAweFh2kLUUZVNo+3jASqz1LyrBNqnZHnwtG7gWZoBoAFyD7EKzZqUKyerWxfbmDZffarOt8e8p9Hq9OMnx5yTF8zQJF0EeJTFeZMQ8+IEE40uJVgekt4BOWRDhhShBIkP+BIVGP7Za14/jfn8j7Fcm9atOgaFT/LRRKBAXGpVMaclWjQaTmSmkBSoapcXOzNO2jPWmc055cTkewxWkZJKTEN99P0IajYbqoE3R80zNVKZRmJEYT93BaHB7O/SueX2xTW3sxQRI7P93ykxTeVSuaaXASuSGcqZoa/7k7JDV2muu/l7ak2tLQ3MUvP6hWqJaMnXaoRCe9+KrYUDZiQG9P3aAuhY+PI4R2dOqaQt25BebeGWLXhK81D3n7M5/AGEqbKMwLQIVAIGhKyPPM2ZK4yWWthGFszXWziMrAhQPrZpZ8MurfHmf42EG2hC3RgCAaQ==X02ok" # Number of Jira/JSM application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Jira is fully From 2a69d1bc103913b1079eea8bd88560537ed12205 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 9 Nov 2023 10:26:17 +0200 Subject: [PATCH 024/152] fix resources --- app/util/k8s/dcapt-small.tfvars | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 3f5b676c9..2ba0e1104 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -12,7 +12,7 @@ # Unique name of your small-scale test cluster. # This value can not be altered after the configuration has been applied. # ! REQUIRED ! -environment_name = "dcapt-sm-smor-3" +environment_name = "dcapt-testing-small" # Supported products: jira, confluence and bitbucket # e.g.: products = ["confluence"] @@ -28,13 +28,7 @@ region = "us-east-2" whitelist_cidr = ["0.0.0.0/0"] # (optional) Custom tags for all resources to be created. Please add all tags you need to propagate among the resources. -resource_tags = { - persist_days : "0.4", - business_unit : "Engineering-Enterprise DC", - creator : "ci", - resource_owner : "ometelytsia", - service_name : "dcapt" -} +resource_tags = {Name: "dcapt-testing-small"} # Instance types that is preferred for EKS node group. # Confluence, Jira - use default value From a38aaed1e75b9b1832f1ee4623d1b1f11a940348 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 9 Nov 2023 11:57:37 +0200 Subject: [PATCH 025/152] fix resources --- app/util/k8s/dcapt-small.tfvars | 1 + 1 file changed, 1 insertion(+) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 2ba0e1104..9da5d1c1b 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -18,6 +18,7 @@ environment_name = "dcapt-testing-small" # e.g.: products = ["confluence"] # ! REQUIRED ! products = ["jira"] +snapshots_json_file_path=dcapt-snapshots.json jira_dataset_size = "small" # Default AWS region for DCAPT snapshots. region = "us-east-2" From 56282fddac40819e66062ceee90618df4e7a8fd8 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 9 Nov 2023 11:57:55 +0200 Subject: [PATCH 026/152] fix resources --- app/util/k8s/dcapt-small.tfvars | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 9da5d1c1b..436afb3ac 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -18,7 +18,7 @@ environment_name = "dcapt-testing-small" # e.g.: products = ["confluence"] # ! REQUIRED ! products = ["jira"] -snapshots_json_file_path=dcapt-snapshots.json +snapshots_json_file_path="dcapt-snapshots.json" jira_dataset_size = "small" # Default AWS region for DCAPT snapshots. region = "us-east-2" From 9e047c053c144da6887d224908eb2b83be4cb8bb Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 10 Nov 2023 11:55:23 +0200 Subject: [PATCH 027/152] replace --- app/util/k8s/dcapt-small.tfvars | 46 ++++++++++++--------------------- 1 file changed, 16 insertions(+), 30 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 436afb3ac..b3ce8f93e 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -12,14 +12,13 @@ # Unique name of your small-scale test cluster. # This value can not be altered after the configuration has been applied. # ! REQUIRED ! -environment_name = "dcapt-testing-small" +environment_name = "dcapt-product-small" # Supported products: jira, confluence and bitbucket # e.g.: products = ["confluence"] # ! REQUIRED ! -products = ["jira"] -snapshots_json_file_path="dcapt-snapshots.json" -jira_dataset_size = "small" +products = ["product-to-deploy"] + # Default AWS region for DCAPT snapshots. region = "us-east-2" @@ -28,6 +27,9 @@ region = "us-east-2" # default value to your desired CIDR blocks. e.g. ["10.20.0.0/16" , "99.68.64.0/10"] whitelist_cidr = ["0.0.0.0/0"] +# Path to a JSON file with EBS and RDS snapshot IDs +snapshots_json_file_path = "dcapt-snapshots.json" + # (optional) Custom tags for all resources to be created. Please add all tags you need to propagate among the resources. resource_tags = {Name: "dcapt-testing-small"} @@ -76,7 +78,10 @@ jira_image_repository = "atlassian/jira-software" # If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. # Please make sure valid confluence license is used without spaces and new line symbols. # ! REQUIRED ! -jira_license = "AAACCA0ODAoPeNqNk1Fv2jAQx9/zKSztZUNySIJaBlKk0cTrskKCksA0rXswyQGugh3ZDhvffiYBtR0dmpQX2/e/+93/Lu++QYlisUcDB7nu2LkZ37jofpYjz/E8ayMB+FbUNUh7ygrgCkjJNBPcJ3FO0nkaZcSKm90KZLJeKJDKx64VCK5poWO6A58qTdX2YD7+ieqKKsUotwuxs56YpPaFdt7IYksVhFSD7znuCDtD7H60TuXzQw1t3iCZzUgaRJPp+Yn8rpk8tLq56zlfzhhkRll1hSMDuQcZhf7dLFri1Ln3cDocTXAweFh2kLUUZVNo+3jASqz1LyrBNqnZHnwtG7gWZoBoAFyD7EKzZqUKyerWxfbmDZffarOt8e8p9Hq9OMnx5yTF8zQJF0EeJTFeZMQ8+IEE40uJVgekt4BOWRDhhShBIkP+BIVGP7Za14/jfn8j7Fcm9atOgaFT/LRRKBAXGpVMaclWjQaTmSmkBSoapcXOzNO2jPWmc055cTkewxWkZJKTEN99P0IajYbqoE3R80zNVKZRmJEYT93BaHB7O/SueX2xTW3sxQRI7P93ykxTeVSuaaXASuSGcqZoa/7k7JDV2muu/l7ak2tLQ3MUvP6hWqJaMnXaoRCe9+KrYUDZiQG9P3aAuhY+PI4R2dOqaQt25BebeGWLXhK81D3n7M5/AGEqbKMwLQIVAIGhKyPPM2ZK4yWWthGFszXWziMrAhQPrZpZ8MurfHmf42EG2hC3RgCAaQ==X02ok" +jira_license = "jira-license" + +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +jira_dataset_size = "small" # Number of Jira/JSM application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Jira is fully @@ -90,31 +95,6 @@ jira_version_tag = "9.4.10" # JSM version # jira_version_tag = "5.4.10" -# Shared home restore configuration. -# Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. -# -# Jira 9.4.10 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-019fd367ec397b1f3" -# Jira 8.20.26 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0592bc60820536611" -# JSM 5.4.10 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0e340e980918e45f6" -# JSM 4.20.26 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-096d1185a5fee02ea" - -# Database restore configuration. -# Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. -# Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. -# -# Jira 9.4.10 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-10" -# Jira 8.20.26 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-26" -# JSM 5.4.10 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-10" -# JSM 4.20.26 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-26" - # Helm chart version of Jira # jira_helm_chart_version = "" @@ -176,6 +156,9 @@ jira_db_master_password = "Password1!" # ! REQUIRED ! confluence_license = "confluence-license" +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +confluence_dataset_size = "small" + # Number of Confluence application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Confluence is fully # installed and configured. @@ -272,6 +255,9 @@ confluence_collaborative_editing_enabled = true # ! REQUIRED ! bitbucket_license = "bitbucket-license" +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +bitbucket_dataset_size = "small" + # Number of Bitbucket application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Bitbucket is fully # installed and configured. From 9c54e1a9968e99854a9634449c161107931c8a24 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 10 Nov 2023 11:58:22 +0200 Subject: [PATCH 028/152] remove snapshots id --- app/util/k8s/dcapt-small.tfvars | 36 --------------------------------- 1 file changed, 36 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index b3ce8f93e..759679105 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -167,26 +167,6 @@ confluence_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions confluence_version_tag = "8.5.1" -# Shared home restore configuration. -# 8.5.1 DCAPT small dataset EBS snapshot -confluence_shared_home_snapshot_id = "snap-008cc496f440198de" -# 7.19.14 DCAPT small dataset EBS snapshot -# confluence_shared_home_snapshot_id = "snap-0a175c4fd76039985" - -# Database restore configuration. -# Build number stored within the snapshot and Confluence license are also required, so that Confluence can be fully setup prior to start. -# 8.5.1 DCAPT small dataset RDS snapshot -confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-5-1" -# 7.19.14 DCAPT small dataset RDS snapshot -# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-14" - -# Build number for a specific Confluence version can be found in the link below: -# https://developer.atlassian.com/server/confluence/confluence-build-information -# 8.5.1 -confluence_db_snapshot_build_number = "9012" -# 7.19.14 -# confluence_db_snapshot_build_number = "8804" - # Helm chart version of Confluence #confluence_helm_chart_version = "" @@ -266,22 +246,6 @@ bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions bitbucket_version_tag = "7.21.16" -# Shared home restore configuration. -# Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. -# -# 7.21.16 DCAPT small dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-04351bd6779e3ee76" -# 8.9.5 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-01806166c1afe8bd5" - -# Database restore configuration. -# Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. -# -# 7.21.16 DCAPT small dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-16" -# 8.9.5 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-5" - # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" From 18c92aaf9074b3b08ec7a334266356dd81a5dd9a Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 14 Nov 2023 11:19:04 +0200 Subject: [PATCH 029/152] move licenses --- app/util/k8s/dcapt-small.tfvars | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 759679105..416bef9b5 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -19,6 +19,15 @@ environment_name = "dcapt-product-small" # ! REQUIRED ! products = ["product-to-deploy"] +# License +# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_jira_license`) and keep the below line commented out +# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. +# Please make sure valid license is used without spaces and new line symbols. +# ! REQUIRED ! +jira_license = "jira-license" +confluence_license = "confluence-license" +bitbucket_license = "bitbucket-license" + # Default AWS region for DCAPT snapshots. region = "us-east-2" @@ -73,13 +82,6 @@ jira_image_repository = "atlassian/jira-software" # JSM # jira_image_repository = "atlassian/jira-servicemanagement" -# Jira/JSM license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_jira_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid confluence license is used without spaces and new line symbols. -# ! REQUIRED ! -jira_license = "jira-license" - # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large jira_dataset_size = "small" @@ -149,13 +151,6 @@ jira_db_master_password = "Password1!" # Confluence Settings ################################################################################ -# Confluence license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_confluence_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid confluence license is used without spaces and new line symbols. -# ! REQUIRED ! -confluence_license = "confluence-license" - # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large confluence_dataset_size = "small" @@ -228,13 +223,6 @@ confluence_collaborative_editing_enabled = true # Bitbucket Settings ################################################################################ -# Bitbucket license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_bitbucket_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here -# Please make sure valid bitbucket license is used without spaces and new line symbols. -# ! REQUIRED ! -bitbucket_license = "bitbucket-license" - # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "small" From 13970c6c2e4acd9356aa8c88b6513a159b6a0ad2 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 14 Nov 2023 11:38:37 +0200 Subject: [PATCH 030/152] refactor main .tfvars --- app/util/k8s/dcapt.tfvars | 131 ++++---------------------------------- 1 file changed, 11 insertions(+), 120 deletions(-) diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 6ffe95ad0..6514dbb01 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -17,6 +17,17 @@ environment_name = "dcapt-product" # ! REQUIRED ! products = ["product-to-deploy"] +# License +# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_jira_license`) and keep the below line commented out +# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. +# Please make sure valid license is used without spaces and new line symbols. +# ! REQUIRED ! +jira_license = "jira-license" +confluence_license = "confluence-license" +bitbucket_license = "bitbucket-license" +crowd_license = "crowd-license" +bamboo_license = "bamboo-license" + # Default AWS region for DCAPT snapshots. region = "us-east-2" @@ -69,13 +80,6 @@ jira_image_repository = "atlassian/jira-software" # JSM # jira_image_repository = "atlassian/jira-servicemanagement" -# Jira/JSM license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_jira_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid Jira/JSM license is used without spaces and new line symbols. -# ! REQUIRED ! -jira_license = "jira-license" - # Number of Jira/JSM application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Jira is fully # installed and configured. @@ -88,31 +92,6 @@ jira_version_tag = "9.4.10" # JSM version # jira_version_tag = "5.4.10" -# Shared home restore configuration. -# Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. -# -# Jira 9.4.10 DCAPT large dataset EBS snapshot -jira_shared_home_snapshot_id = "snap-084e99e384dcfbe31" -# Jira 8.20.26 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0c0c388d53cd4153b" -# JSM 5.4.10 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0381cc00e37231565" -# JSM 4.20.26 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0f7aa03eea37f3304" - -# Database restore configuration. -# Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. -# Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. -# -# Jira 9.4.10 DCAPT large dataset RDS snapshot -jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-10" -# Jira 8.20.26 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-26" -# JSM 5.4.10 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-10" -# JSM 4.20.26 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-26" - # Helm chart version of Jira # jira_helm_chart_version = "" @@ -161,13 +140,6 @@ jira_db_master_password = "Password1!" # Confluence Settings ################################################################################ -# Confluence license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_confluence_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid Confluence license is used without spaces and new line symbols. -# ! REQUIRED ! -confluence_license = "confluence-license" - # Number of Confluence application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Confluence is fully # installed and configured. @@ -176,30 +148,6 @@ confluence_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions confluence_version_tag = "8.5.1" -# Shared home restore configuration. -# Make sure confluence version set in `confluence_version_tag` match the snapshot version. -# -# 8.5.1 DCAPT large dataset EBS snapshot -confluence_shared_home_snapshot_id = "snap-074a2fdca0497b6b6" -# 7.19.14 DCAPT large dataset EBS snapshot -# confluence_shared_home_snapshot_id = "snap-00f5e8147604a017e" - -# Database restore configuration. -# Make sure confluence version set in `confluence_version_tag` match the snapshot version. -# Build number stored within the snapshot and Confluence license are also required, so that Confluence can be fully setup prior to start. -# -# 8.5.1 DCAPT large dataset RDS snapshot -confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-5-1" -# 7.19.14 DCAPT large dataset RDS snapshot -# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-14" - -# Build number for a specific Confluence version can be found in the link below: -# https://developer.atlassian.com/server/confluence/confluence-build-information -# 8.5.1 -confluence_db_snapshot_build_number = "9012" -# 7.19.14 -# confluence_db_snapshot_build_number = "8804" - # Helm chart version of Confluence # confluence_helm_chart_version = "" @@ -259,13 +207,6 @@ confluence_collaborative_editing_enabled = true # Bitbucket Settings ################################################################################ -# Bitbucket license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_bitbucket_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here -# Please make sure valid Bitbucket license is used without spaces and new line symbols. -# ! REQUIRED ! -bitbucket_license = "bitbucket-license" - # Number of Bitbucket application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Bitbucket is fully # installed and configured. @@ -274,22 +215,6 @@ bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions bitbucket_version_tag = "7.21.16" -# Shared home restore configuration. -# Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. -# -# 7.21.16 DCAPT large dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-0d4bbe0cf3056c0ee" -# 8.9.5 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-0261a9130a9fd7618" - -# Database restore configuration. -# Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. -# -# 7.21.16 DCAPT large dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-16" -# 8.9.5 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-5" - # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" @@ -367,13 +292,6 @@ bitbucket_db_master_password = "Password1!" # Crowd Settings ################################################################################ -# Crowd license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_crowd_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here -# Please make sure valid Crowd license is used without spaces and new line symbols. -# ! REQUIRED ! -crowd_license = "crowd-license" - # Number of Crowd application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Crowd is fully # installed and configured. @@ -382,26 +300,6 @@ crowd_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions crowd_version_tag = "5.1.4" -# Dataset Restore - -# Shared home restore configuration -# To restore shared home dataset, you can provide EBS snapshot ID that contains content of the shared home volume. -# This volume will be mounted to the NFS server and used when the product is started. -# Make sure the snapshot is available in the region you are deploying to and it follows all product requirements. -# -# Crowd 5.1.4 DCAPT large dataset EBS snapshot -crowd_shared_home_snapshot_id = "snap-0a8e229690be9ae30" - -# Database restore configuration -# If you want to restore the database from a snapshot, uncomment the following line and provide the snapshot identifier. -# This will restore the database from the snapshot and will not create a new database. -# The snapshot should be in the same AWS account and region as the environment to be deployed. -# Please also provide crowd_db_master_username and crowd_db_master_password that matches the ones in snapshot -# -# Crowd 5.1.4 DCAPT large dataset RDS snapshot -crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-4" -crowd_db_snapshot_build_number = "1893" - # Helm chart version of Crowd and Crowd agent instances. By default the latest version is installed. # crowd_helm_chart_version = "" @@ -457,13 +355,6 @@ crowd_db_master_password = "Password1!" # Bamboo Settings ################################################################################ -# Bamboo license -# To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_bamboo_license`) and keep the below line commented out -# If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid Bamboo license is used without spaces and new line symbols. -# ! REQUIRED ! -bamboo_license = "bamboo-license" - # By default, latest supported by DCAPT version is set. # https://hub.docker.com/r/atlassian/bamboo/tags # https://hub.docker.com/r/atlassian/bamboo-agent-base/tags From 0cb7533b4ecac2a31bf9b6ccb08d326fe52d06d4 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 14 Nov 2023 13:26:12 +0200 Subject: [PATCH 031/152] refactor main .tfvars --- app/util/k8s/dcapt.tfvars | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 6514dbb01..7bce2d1fb 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -77,6 +77,10 @@ max_cluster_capacity = 4 # # Jira jira_image_repository = "atlassian/jira-software" + +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +jira_dataset_size = "large" + # JSM # jira_image_repository = "atlassian/jira-servicemanagement" @@ -140,6 +144,9 @@ jira_db_master_password = "Password1!" # Confluence Settings ################################################################################ +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +confluence_dataset_size = "large" + # Number of Confluence application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Confluence is fully # installed and configured. @@ -207,6 +214,9 @@ confluence_collaborative_editing_enabled = true # Bitbucket Settings ################################################################################ +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +bitbucket_dataset_size = "large" + # Number of Bitbucket application nodes # Note: For initial installation this value needs to be set to 1 and it can be changed only after Bitbucket is fully # installed and configured. From bb9e34f193b39f462d919cce9776f94a74981176 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 14 Nov 2023 13:26:42 +0200 Subject: [PATCH 032/152] refactor main .tfvars --- app/util/k8s/dcapt.tfvars | 3 +++ 1 file changed, 3 insertions(+) diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 7bce2d1fb..bfabc4175 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -36,6 +36,9 @@ region = "us-east-2" # default value to your desired CIDR blocks. e.g. ["10.20.0.0/16" , "99.68.64.0/10"] whitelist_cidr = ["0.0.0.0/0"] +# Path to a JSON file with EBS and RDS snapshot IDs +snapshots_json_file_path = "dcapt-snapshots.json" + # (optional) Custom tags for all resources to be created. Please add all tags you need to propagate among the resources. resource_tags = {Name: "dcapt-testing"} From 347dd0e92cce08545b8e4f64b1513deac6985651 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 15 Nov 2023 14:48:17 +0200 Subject: [PATCH 033/152] remove snapshots from docs --- app/util/k8s/dcapt-small.tfvars | 5 +++-- app/util/k8s/dcapt.tfvars | 1 - docs/dc-apps-performance-toolkit-user-guide-bitbucket.md | 2 -- docs/dc-apps-performance-toolkit-user-guide-confluence.md | 2 -- docs/dc-apps-performance-toolkit-user-guide-jira.md | 2 -- docs/dc-apps-performance-toolkit-user-guide-jsm.md | 2 -- 6 files changed, 3 insertions(+), 11 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 416bef9b5..486c8d9c4 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -91,8 +91,7 @@ jira_dataset_size = "small" jira_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -# -# Jira version +# Jira version. jira_version_tag = "9.4.10" # JSM version # jira_version_tag = "5.4.10" @@ -202,6 +201,7 @@ confluence_db_major_engine_version = "14" confluence_db_instance_class = "db.t3.medium" confluence_db_allocated_storage = 200 confluence_db_iops = 1000 + # If you restore the database, make sure `confluence_db_name' is set to the db name from the snapshot. # Set `null` if the snapshot does not have a default db name. confluence_db_name = "confluence" @@ -296,6 +296,7 @@ bitbucket_db_major_engine_version = "14" bitbucket_db_instance_class = "db.t3.medium" bitbucket_db_allocated_storage = 100 bitbucket_db_iops = 1000 + # If you restore the database, make sure `bitbucket_db_name' is set to the db name from the snapshot. # Set `null` if the snapshot does not have a default db name. bitbucket_db_name = "bitbucket" diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index bfabc4175..dd8fe4818 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -93,7 +93,6 @@ jira_dataset_size = "large" jira_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -# # Jira version jira_version_tag = "9.4.10" # JSM version diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index e31b8f202..6ed1aa1d4 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -78,7 +78,6 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Bitbucket version specified in **bitbucket_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash docker run --env-file aws_envs \ @@ -229,7 +228,6 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large - `instance_types` - `["m5.4xlarge"]` 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Bitbucket version specified in **bitbucket_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash docker run --env-file aws_envs \ diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index eb98a43de..4878cfa4b 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -77,7 +77,6 @@ Below process describes how to install low-tier Confluence DC with "small" datas 6. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ @@ -313,7 +312,6 @@ Below process describes how to install enterprise-scale Confluence DC with "larg 6. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash docker run --pull=always --env-file aws_envs \ diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 866e1534f..662ec7e49 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -88,7 +88,6 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc 6. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ @@ -344,7 +343,6 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat 6. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 25080f2fe..6528bb5c4 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -90,7 +90,6 @@ Below process describes how to install low-tier Jira Service Management DC with 6. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ @@ -376,7 +375,6 @@ Below process describes how to install enterprise-scale Jira Service Management 6. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 7. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash docker run --pull=always --env-file aws_envs \ From 13fa2c32d8ab1d672016221aa8af86d150f07a06 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 15 Nov 2023 17:36:35 +0200 Subject: [PATCH 034/152] Added dcapt-snapshot file volume --- app/util/k8s/README.MD | 9 +++------ app/util/k8s/dcapt-small.tfvars | 2 +- app/util/k8s/dcapt.tfvars | 2 +- docs/dc-apps-performance-toolkit-user-guide-bamboo.md | 1 + docs/dc-apps-performance-toolkit-user-guide-bitbucket.md | 3 +++ .../dc-apps-performance-toolkit-user-guide-confluence.md | 3 +++ docs/dc-apps-performance-toolkit-user-guide-crowd.md | 2 ++ docs/dc-apps-performance-toolkit-user-guide-jira.md | 3 +++ docs/dc-apps-performance-toolkit-user-guide-jsm.md | 3 +++ 9 files changed, 20 insertions(+), 8 deletions(-) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index a2b49cddb..06a172050 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -9,6 +9,7 @@ ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -31,6 +32,7 @@ docker run --pull=always --env-file aws_envs \ ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -68,9 +70,4 @@ docker run --pull=always --env-file aws_envs \ --entrypoint="python" \ -v "$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION -``` - -# Non default product version or aws region -File [dcapt-snapshots.json](./dcapt-snapshots.json) has all available RDS and EBS snapshots IDs for all supported product -versions and AWS regions. -Set `version_tag`, `shared_home_snapshot_id` and `db_snapshot_id` values correspondingly to product version and region. \ No newline at end of file +``` \ No newline at end of file diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 486c8d9c4..bc3ee2ecf 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -28,7 +28,7 @@ jira_license = "jira-license" confluence_license = "confluence-license" bitbucket_license = "bitbucket-license" -# Default AWS region for DCAPT snapshots. +# Default AWS region for DCAPT snapshots. Supported regions are us-east-1, us-east-2, us-west-1, us-west-2. region = "us-east-2" # List of IP ranges that are allowed to access the running applications over the World Wide Web. diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index dd8fe4818..ed695ea2d 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -28,7 +28,7 @@ bitbucket_license = "bitbucket-license" crowd_license = "crowd-license" bamboo_license = "bamboo-license" -# Default AWS region for DCAPT snapshots. +# Default AWS region for DCAPT snapshots. Supported regions are us-east-1, us-east-2, us-west-1, us-west-2. region = "us-east-2" # List of IP ranges that are allowed to access the running applications over the World Wide Web. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 9cc8f4bf0..ff652fff9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -60,6 +60,7 @@ specifically for performance testing during the DC app review process. ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 6ed1aa1d4..d22189c82 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -82,6 +82,7 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase ``` bash docker run --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -232,6 +233,7 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large ``` bash docker run --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -428,6 +430,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 4878cfa4b..0d1f7ecd6 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -81,6 +81,7 @@ Below process describes how to install low-tier Confluence DC with "small" datas ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -316,6 +317,7 @@ Below process describes how to install enterprise-scale Confluence DC with "larg ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -503,6 +505,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index c2372fcc7..b6de660aa 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -58,6 +58,7 @@ specifically for performance testing during the DC app review process. ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -284,6 +285,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 662ec7e49..221a222f7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -92,6 +92,7 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -347,6 +348,7 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -558,6 +560,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 6528bb5c4..333e09fb9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -94,6 +94,7 @@ Below process describes how to install low-tier Jira Service Management DC with ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -379,6 +380,7 @@ Below process describes how to install enterprise-scale Jira Service Management ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -596,6 +598,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC ``` bash docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` From 6564bbd52fd69c46ffc5613a7f8739fb820c8d0e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 23 Nov 2023 11:59:53 +0200 Subject: [PATCH 035/152] Delete hosted zone with cluster --- app/util/k8s/terminate_cluster.py | 72 +++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index e8a17cb60..4423c7906 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -48,6 +48,26 @@ def wait_for_node_group_delete(eks_client, cluster_name, node_group): logging.error(f"Node group {node_group} for cluster {cluster_name} was not deleted in {timeout} seconds.") +def wait_for_hosted_zone_delete(route53_client, hosted_zone_id): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + route53_client.get_hosted_zone(Id=hosted_zone_id) + except route53_client.exceptions.NoSuchHostedZone: + logging.info(f"Hosted zone {hosted_zone_id} was successfully deleted.") + break + logging.info(f"Hosted zone {hosted_zone_id} is still exists. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"Hosted zone {hosted_zone_id} was not deleted in {timeout} seconds.") + + def wait_for_cluster_delete(eks_client, cluster_name): timeout = 600 # 10 min attempt = 0 @@ -89,6 +109,26 @@ def wait_for_rds_delete(rds_client, db_name): logging.error(f"RDS {db_name} was not deleted in {timeout} seconds.") +def delete_record_from_hosted_zone(route53_client, hosted_zone_id, record): + change_batch = { + 'Changes': [ + { + 'Action': 'DELETE', + 'ResourceRecordSet': record + } + ] + } + try: + route53_client.change_resource_record_sets( + HostedZoneId=hosted_zone_id, + ChangeBatch=change_batch + ) + logging.info(f"Record {record['Name']} was successfully deleted from hosted zone {hosted_zone_id}.") + except Exception as e: + logging.error(f'Unexpected error occurs, could not delete record from hosted zone {hosted_zone_id}: {e}') + + + def delete_nodegroup(aws_region, cluster_name): try: eks_client = boto3.client('eks', region_name=aws_region) @@ -133,6 +173,36 @@ def delete_cluster(aws_region, cluster_name): raise e +def delete_hosted_zone_record_if_exists(aws_region, cluster_name): + environment_name = cluster_name.replace('atlas-', '').replace('-cluster', '') + try: + route53_client = boto3.client('route53', region_name=aws_region) + existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] + if not existed_hosted_zones: + return + for hosted_zone in existed_hosted_zones: + if environment_name in hosted_zone['Name']: + hosted_zone_to_delete = hosted_zone + records_hosted_zone_to_delete = route53_client.list_resource_record_sets( + HostedZoneId=hosted_zone['Id'])['ResourceRecordSets'] + for record in records_hosted_zone_to_delete: + if record['Type'] not in ['NS', 'SOA']: + delete_record_from_hosted_zone(route53_client, hosted_zone['Id'], record) + route53_client.delete_hosted_zone(Id=hosted_zone_to_delete['Id']) + wait_for_hosted_zone_delete(route53_client, hosted_zone['Id']) + break + + existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] + existed_hosted_zones_ids = [zone["Id"] for zone in existed_hosted_zones] + for hosted_zone_id in existed_hosted_zones_ids: + records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id)['ResourceRecordSets'] + for record in records_set: + if environment_name in record['Name']: + delete_record_from_hosted_zone(route53_client, hosted_zone_id, record) + except Exception as e: + logging.error(f"Unexpected error occurs: {e}") + + def delete_lb(aws_region, vpc_id): elb_client = boto3.client('elb', region_name=aws_region) try: @@ -373,6 +443,7 @@ def terminate_cluster(cluster_name, aws_region=None): # Delete the nodegroup and cluster in the specified region delete_nodegroup(aws_region, cluster_name) delete_cluster(aws_region, cluster_name) + delete_hosted_zone_record_if_exists(aws_region, cluster_name) def release_eip(aws_region, vpc_name): @@ -729,3 +800,4 @@ def main(): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main() + From f16c76abd6f6575b9959ccdb1fedfa19e285965e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 23 Nov 2023 12:00:54 +0200 Subject: [PATCH 036/152] remove lines --- app/util/k8s/terminate_cluster.py | 1 - 1 file changed, 1 deletion(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 4423c7906..6f5434965 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -800,4 +800,3 @@ def main(): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main() - From a698b8a3021a3b7f8706cb9b2d9a57940602cf4a Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 27 Nov 2023 18:32:40 +0200 Subject: [PATCH 037/152] docs/update-jira-jsm-reindex-time --- docs/dc-apps-performance-toolkit-user-guide-jira.md | 3 +-- docs/dc-apps-performance-toolkit-user-guide-jsm.md | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 221a222f7..1f60d4011 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -451,8 +451,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o If you are submitting a Jira app, you are required to conduct a Lucene Index timing test. This involves conducting a foreground re-index on a single-node Data Center deployment (with your app installed) and a dataset that has 1M issues. {{% note %}} -The re-index time for Jira 8.20.x is about ~30-50 minutes, while for Jira 9.4.x it can take significantly longer at around 110-130 minutes. -This increase in re-index time is due to a known issue which affects Jira 9.4.x, and you can find more information about it in this ticket: [Re-Index: Jira 9.4.x](https://jira.atlassian.com/browse/JRASERVER-74787). +The re-index time for Jira is about ~50-70 minutes. {{% /note %}} **Benchmark your re-index time with your app installed:** diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 333e09fb9..356f57cd0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -488,7 +488,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o If you are submitting a Jira Service Management app, you are required to conduct a Lucene Index timing test. This involves conducting a foreground re-index on a single-node Data Center deployment (with your app installed) and a dataset that has 1M issues. {{% note %}} -The re-index time for JSM 4.20.x is about ~30-50 minutes, while for JSM 5.4.x it can take significantly longer at around 110-130 minutes. This increase in re-index time is due to a known issue which affects JSM 5.4.x, and you can find more information about it in this ticket: [Re-Index: JSM 9.4.x](https://jira.atlassian.com/browse/JRASERVER-74787). +The re-index time for JSM is about ~35-45 minutes. {{% /note %}} From c351e11a5b653a0d84e2e06374f41fca78c8bdf0 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Mon, 27 Nov 2023 17:44:05 +0100 Subject: [PATCH 038/152] DCA-2139 resource allocation and documentation updates --- app/util/k8s/README.MD | 29 +++-- app/util/k8s/dcapt-small.tfvars | 52 ++++---- app/util/k8s/dcapt.tfvars | 114 ++++++++++-------- ...s-performance-toolkit-user-guide-bamboo.md | 9 +- ...erformance-toolkit-user-guide-bitbucket.md | 22 ++-- ...rformance-toolkit-user-guide-confluence.md | 20 +-- ...ps-performance-toolkit-user-guide-crowd.md | 14 +-- ...pps-performance-toolkit-user-guide-jira.md | 20 +-- ...apps-performance-toolkit-user-guide-jsm.md | 20 +-- 9 files changed, 164 insertions(+), 136 deletions(-) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index 06a172050..d484fe1a4 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -1,4 +1,17 @@ # Development environment + +## Note for Windows users +For some Windows setups, Git Bash `$PWD` command does not return correct full path. +In this case $PWD needs to be changed to absolute path directory in Windows format. +E.g. +```bash +docker run --pull=always --env-file aws_envs \ +-v "//c//Users//user//dc-app-performance-toolkit//app//util//k8s//dcapt.tfvars:/data-center-terraform/conf.tfvars" \ +-v "//c//Users//user//dc-app-performance-toolkit//app//util//k8s//dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ +-v "//c//Users//user//dc-app-performance-toolkit//app//util//k8s///logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./install.sh -c conf.tfvars +``` + ## Create development environment * set AWS credential in [aws_envs](./aws_envs) file * set correct values in [dcapt-small.tfvars](./dcapt-small.tfvars) file: @@ -8,17 +21,17 @@ * run install development environment command: ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./install.sh -c config.tfvars +-it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` ## Terminate development environment ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./uninstall.sh -c config.tfvars +-it atlassianlabs/terraform ./uninstall.sh -c conf.tfvars ``` # Enterprise-scale environment @@ -31,10 +44,10 @@ docker run --pull=always --env-file aws_envs \ * run install enterprise-scale environment command: ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./install.sh -c config.tfvars +-it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` ## Terminate enterprise-scale environment Option `-t` deletes Terraform state files for all installed environment in the same region using the same AWS account. @@ -42,9 +55,9 @@ Option `-t` deletes Terraform state files for all installed environment in the s If state files are needed, e.g. there are other running clusters for other product, do not use `-t` flag in below command. ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars +-it atlassianlabs/terraform ./uninstall.sh -t -c conf.tfvars ``` # Collect detailed k8s logs diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index bc3ee2ecf..30fa95084 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -6,7 +6,7 @@ # See https://developer.atlassian.com/platform/marketplace/dc-apps-performance-and-scale-testing/ for more information. ################################################################################ -# Common Settings +# Configuration settings to change ################################################################################ # Unique name of your small-scale test cluster. @@ -14,20 +14,25 @@ # ! REQUIRED ! environment_name = "dcapt-product-small" -# Supported products: jira, confluence and bitbucket -# e.g.: products = ["confluence"] +# Supported products: jira, confluence and bitbucket. +# For JSM set product as jira. +# e.g.: products = ["jira"] # ! REQUIRED ! products = ["product-to-deploy"] # License # To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_jira_license`) and keep the below line commented out # If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid license is used without spaces and new line symbols. +# ! IMPORTANT ! Please make sure valid license is used without spaces and new line symbols. # ! REQUIRED ! jira_license = "jira-license" confluence_license = "confluence-license" bitbucket_license = "bitbucket-license" +################################################################################ +# Common Settings +################################################################################ + # Default AWS region for DCAPT snapshots. Supported regions are us-east-1, us-east-2, us-west-1, us-west-2. region = "us-east-2" @@ -43,9 +48,6 @@ snapshots_json_file_path = "dcapt-snapshots.json" resource_tags = {Name: "dcapt-testing-small"} # Instance types that is preferred for EKS node group. -# Confluence, Jira - use default value -# Bitbucket - ["t3.2xlarge"] -# ! REQUIRED ! instance_types = ["t3.xlarge"] instance_disk_size = 100 @@ -54,7 +56,7 @@ instance_disk_size = 100 # and increase/decrease the number of nodes accordingly. This ensures there is always enough resources for the workloads # and removes the need to change this value. min_cluster_capacity = 1 -max_cluster_capacity = 1 +max_cluster_capacity = 2 # By default, Ingress controller listens on 443 and 80. You can enable only http port 80 by # uncommenting the below line, which will disable port 443. This results in fewer inbound rules in Nginx controller security group. @@ -80,8 +82,16 @@ max_cluster_capacity = 1 # Jira jira_image_repository = "atlassian/jira-software" # JSM +# ! REQUIRED for JSM ! # jira_image_repository = "atlassian/jira-servicemanagement" +# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions +# Jira version. +jira_version_tag = "9.4.10" +# JSM version +# ! REQUIRED for JSM ! +# jira_version_tag = "5.4.10" + # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large jira_dataset_size = "small" @@ -90,12 +100,6 @@ jira_dataset_size = "small" # installed and configured. jira_replica_count = 1 -# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -# Jira version. -jira_version_tag = "9.4.10" -# JSM version -# jira_version_tag = "5.4.10" - # Helm chart version of Jira # jira_helm_chart_version = "" @@ -119,8 +123,8 @@ jira_nfs_limits_memory = "1.5Gi" # Storage # initial volume size of local/shared home EBS. -jira_local_home_size = "10Gi" -jira_shared_home_size = "10Gi" +jira_local_home_size = "20Gi" +jira_shared_home_size = "20Gi" # RDS instance configurable attributes. Note that the allowed value of allocated storage and iops may vary based on instance type. # You may want to adjust these values according to your needs. @@ -150,6 +154,9 @@ jira_db_master_password = "Password1!" # Confluence Settings ################################################################################ +# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions +confluence_version_tag = "8.5.1" + # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large confluence_dataset_size = "small" @@ -158,9 +165,6 @@ confluence_dataset_size = "small" # installed and configured. confluence_replica_count = 1 -# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "8.5.1" - # Helm chart version of Confluence #confluence_helm_chart_version = "" @@ -184,7 +188,7 @@ synchrony_stack_size = "2048k" # Storage confluence_local_home_size = "20Gi" -confluence_shared_home_size = "10Gi" +confluence_shared_home_size = "20Gi" # Confluence NFS instance resource configuration confluence_nfs_requests_cpu = "500m" @@ -223,6 +227,9 @@ confluence_collaborative_editing_enabled = true # Bitbucket Settings ################################################################################ +# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions +bitbucket_version_tag = "8.9.5" + # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "small" @@ -231,9 +238,6 @@ bitbucket_dataset_size = "small" # installed and configured. bitbucket_replica_count = 1 -# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.16" - # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" @@ -270,7 +274,7 @@ bitbucket_min_heap = "1024m" bitbucket_max_heap = "2048m" # Storage -bitbucket_local_home_size = "10Gi" +bitbucket_local_home_size = "20Gi" bitbucket_shared_home_size = "20Gi" # Bitbucket NFS instance resource configuration diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index ed695ea2d..35f6bd466 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -4,7 +4,7 @@ # See https://developer.atlassian.com/platform/marketplace/dc-apps-performance-and-scale-testing/ for more information. ################################################################################ -# Common Settings +# Configuration settings to change ################################################################################ # Unique name of your enterprise-scale test cluster. @@ -12,15 +12,16 @@ # ! REQUIRED ! environment_name = "dcapt-product" -# Supported products: jira, confluence, bitbucket and bamboo. -# e.g.: products = ["confluence"] +# Supported products: jira, confluence, bitbucket, crowd and bamboo. +# For JSM set product as jira. +# e.g.: products = ["jira"] # ! REQUIRED ! products = ["product-to-deploy"] # License # To avoid storing license in a plain text file, we recommend storing it in an environment variable prefixed with `TF_VAR_` (i.e. `TF_VAR_jira_license`) and keep the below line commented out # If storing license as plain-text is not a concern for this environment, feel free to uncomment the following line and supply the license here. -# Please make sure valid license is used without spaces and new line symbols. +# ! IMPORTANT ! Please make sure valid license is used without spaces and new line symbols. # ! REQUIRED ! jira_license = "jira-license" confluence_license = "confluence-license" @@ -28,6 +29,19 @@ bitbucket_license = "bitbucket-license" crowd_license = "crowd-license" bamboo_license = "bamboo-license" +# Replica count. +# Number of product application nodes. +# Note: For initial installation this value needs to be set to 1 and it can be changed only after product is fully +# installed and configured. +jira_replica_count = 1 +confluence_replica_count = 1 +bitbucket_replica_count = 1 +crowd_replica_count = 1 + +################################################################################ +# Common Settings +################################################################################ + # Default AWS region for DCAPT snapshots. Supported regions are us-east-1, us-east-2, us-west-1, us-west-2. region = "us-east-2" @@ -43,19 +57,15 @@ snapshots_json_file_path = "dcapt-snapshots.json" resource_tags = {Name: "dcapt-testing"} # Instance types that is preferred for EKS node group. -# Confluence, Bamboo, Jira - use default value -# Bitbucket - ["m5.4xlarge"] -# Crowd - ["m5.xlarge"] -# ! REQUIRED ! instance_types = ["m5.2xlarge"] -instance_disk_size = 100 +instance_disk_size = 200 # Minimum and maximum size of the EKS cluster. # Cluster-autoscaler is installed in the EKS cluster that will manage the requested capacity # and increase/decrease the number of nodes accordingly. This ensures there is always enough resources for the workloads # and removes the need to change this value. min_cluster_capacity = 1 -max_cluster_capacity = 4 +max_cluster_capacity = 6 # By default, Ingress controller listens on 443 and 80. You can enable only http port 80 by # uncommenting the below line, which will disable port 443. This results in fewer inbound rules in Nginx controller security group. @@ -69,6 +79,17 @@ max_cluster_capacity = 4 # #domain = "" +################################################################################ +# Execution Environment Settings +################################################################################ +# Create a docker-in-docker privileged container as execution environment pod + +start_test_deployment = "true" +test_deployment_cpu_request = "3" +test_deployment_cpu_limit = "4" +test_deployment_mem_request = "6Gi" +test_deployment_mem_limit = "6Gi" + ################################################################################ # Jira/JSM Settings ################################################################################ @@ -81,23 +102,21 @@ max_cluster_capacity = 4 # Jira jira_image_repository = "atlassian/jira-software" -# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large -jira_dataset_size = "large" - # JSM +# ! REQUIRED for JSM ! # jira_image_repository = "atlassian/jira-servicemanagement" -# Number of Jira/JSM application nodes -# Note: For initial installation this value needs to be set to 1 and it can be changed only after Jira is fully -# installed and configured. -jira_replica_count = 1 - # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # Jira version jira_version_tag = "9.4.10" + # JSM version +# ! REQUIRED for JSM ! # jira_version_tag = "5.4.10" +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large. +jira_dataset_size = "large" + # Helm chart version of Jira # jira_helm_chart_version = "" @@ -108,15 +127,15 @@ jira_installation_timeout = 25 # Jira/JSM instance resource configuration jira_cpu = "6" -jira_mem = "24Gi" +jira_mem = "20Gi" jira_min_heap = "12288m" jira_max_heap = "12288m" jira_reserved_code_cache = "2048m" # Storage # initial volume size of local/shared home EBS. -jira_local_home_size = "100Gi" -jira_shared_home_size = "100Gi" +jira_local_home_size = "200Gi" +jira_shared_home_size = "200Gi" # RDS instance configurable attributes. Note that the allowed value of allocated storage and iops may vary based on instance type. # You may want to adjust these values according to your needs. @@ -146,17 +165,12 @@ jira_db_master_password = "Password1!" # Confluence Settings ################################################################################ -# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large -confluence_dataset_size = "large" - -# Number of Confluence application nodes -# Note: For initial installation this value needs to be set to 1 and it can be changed only after Confluence is fully -# installed and configured. -confluence_replica_count = 1 - # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions confluence_version_tag = "8.5.1" +# Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large +confluence_dataset_size = "large" + # Helm chart version of Confluence # confluence_helm_chart_version = "" @@ -166,21 +180,21 @@ confluence_version_tag = "8.5.1" confluence_installation_timeout = 30 # Confluence instance resource configuration -confluence_cpu = "4" +confluence_cpu = "6" confluence_mem = "20Gi" confluence_min_heap = "12288m" confluence_max_heap = "12288m" # Synchrony instance resource configuration -synchrony_cpu = "2" -synchrony_mem = "2.5Gi" +synchrony_cpu = "1" +synchrony_mem = "3Gi" synchrony_min_heap = "1024m" synchrony_max_heap = "2048m" synchrony_stack_size = "2048k" # Storage confluence_local_home_size = "200Gi" -confluence_shared_home_size = "100Gi" +confluence_shared_home_size = "200Gi" # RDS instance configurable attributes. Note that the allowed value of allocated storage and iops may vary based on instance type. # You may want to adjust these values according to your needs. @@ -216,17 +230,12 @@ confluence_collaborative_editing_enabled = true # Bitbucket Settings ################################################################################ +# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions +bitbucket_version_tag = "8.9.5" + # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "large" -# Number of Bitbucket application nodes -# Note: For initial installation this value needs to be set to 1 and it can be changed only after Bitbucket is fully -# installed and configured. -bitbucket_replica_count = 1 - -# Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.16" - # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" @@ -304,11 +313,6 @@ bitbucket_db_master_password = "Password1!" # Crowd Settings ################################################################################ -# Number of Crowd application nodes -# Note: For initial installation this value needs to be set to 1 and it can be changed only after Crowd is fully -# installed and configured. -crowd_replica_count = 1 - # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions crowd_version_tag = "5.1.4" @@ -327,8 +331,8 @@ crowd_min_heap = "2048m" crowd_max_heap = "2048m" # Storage -crowd_local_home_size = "15Gi" -crowd_shared_home_size = "15Gi" +crowd_local_home_size = "20Gi" +crowd_shared_home_size = "20Gi" # Crowd NFS instance resource configuration crowd_nfs_requests_cpu = "1" @@ -414,8 +418,8 @@ bamboo_min_heap = "2048m" bamboo_max_heap = "4096m" # Bamboo Agent instance resource configuration -bamboo_agent_cpu = "200m" -bamboo_agent_mem = "700m" +bamboo_agent_cpu = "250m" +bamboo_agent_mem = "1000m" # Storage bamboo_local_home_size = "200Gi" @@ -456,10 +460,18 @@ bamboo_dataset_url = "https://centaurus-datasets.s3.amazonaws.com/bamboo/dcapt-b # to kube-monitoring namespace. Defaults to false. # monitoring_enabled = true -# Create Grafana service of LoadBalancer type. Defaults to false. To restric access to LB URL +# Create Grafana service of LoadBalancer type. Defaults to false. To restrict access to LB URL # the list of CIRDs from whitelist_cidr will be automatically applied. + # monitoring_grafana_expose_lb = true +# Command to select cluster: +# export ENVIRONMENT_NAME=your_environment_name +# aws eks update-kubeconfig --region us-east-2 --name atlas-$ENVIRONMENT_NAME-cluster + +# Command to get grafana ulr: kubectl get svc -n kube-monitoring | grep grafana +# Default grafana creds: admin/prom-operator + # Prometheus Persistent Volume Claim size. Defaults to 10Gi. # Out of the box EKS cluster is created with gp2 storage class which does not allow volume expansion, # i.e. if you expect a high volume of metrics or metrics with high cardinality it is recommended diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index ff652fff9..f4e2410e7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-10-03" +date: "2023-11-27" --- # Data Center App Performance Toolkit User Guide For Bamboo @@ -56,13 +56,14 @@ specifically for performance testing during the DC app review process. Use `BX02-9YO1-IN86-LO5G` Server ID for generation. {{% /note %}} -6. From local terminal (Git bash terminal for Windows) start the installation (~40min): +6. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash user) +7. ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. 8. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index d22189c82..75c07187f 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-10-03" +date: "2023-11-27" --- # Data Center App Performance Toolkit User Guide For Bitbucket @@ -69,7 +69,6 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** - - `instance_types` - `["t3.2xlarge"]` {{% note %}} New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). @@ -78,13 +77,13 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): +7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -226,16 +225,15 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** - - `instance_types` - `["m5.4xlarge"]` 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): +7. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -426,13 +424,13 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. -3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): +3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 0d1f7ecd6..36ad0572a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-10-03" +date: "2023-11-27" --- # Data Center App Performance Toolkit User Guide For Confluence @@ -77,13 +77,13 @@ Below process describes how to install low-tier Confluence DC with "small" datas 6. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): +7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -313,13 +313,13 @@ Below process describes how to install enterprise-scale Confluence DC with "larg 6. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): +7. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -501,13 +501,13 @@ To receive scalability benchmark results for two-node Confluence DC **with** app 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. -3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): +3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index b6de660aa..28b5c6da5 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-10-03" +date: "2023-11-27" --- # Data Center App Performance Toolkit User Guide For Crowd @@ -54,13 +54,13 @@ specifically for performance testing during the DC app review process. Use `BX02-9YO1-IN86-LO5G` Server ID for generation. {{% /note %}} -6. From local terminal (Git bash terminal for Windows) start the installation (~40min): +6. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. @@ -281,13 +281,13 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. -3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): +3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 221a222f7..32c40c14a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-10-03" +date: "2023-11-27" --- # Data Center App Performance Toolkit User Guide For Jira @@ -88,13 +88,13 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc 6. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): +7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -344,13 +344,13 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat 6. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): +7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -556,13 +556,13 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. -3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): +3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 333e09fb9..ed6d650f2 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-10-03" +date: "2023-11-27" --- # Data Center App Performance Toolkit User Guide For Jira Service Management @@ -90,13 +90,13 @@ Below process describes how to install low-tier Jira Service Management DC with 6. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~20 min): +7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -376,13 +376,13 @@ Below process describes how to install enterprise-scale Jira Service Management 6. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): +7. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -594,13 +594,13 @@ To receive scalability benchmark results for two-node Jira Service Management DC 1. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. -3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): +3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c config.tfvars + -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: From bc61415f7777c5894f41e4a4ba0b8e7c6642ef27 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Fri, 1 Dec 2023 13:15:43 +0100 Subject: [PATCH 039/152] DCA-2108 cleanup old sh scripts --- app/util/bitbucket/populate_db.sh | 287 --------------- app/util/bitbucket/upload_attachments.sh | 120 ------- app/util/confluence/index-snapshot.sh | 74 ---- app/util/confluence/index-sync.sh | 43 --- .../confluence/index-wait-till-finished.sh | 64 ---- app/util/confluence/populate_db.sh | 249 ------------- app/util/confluence/upload_attachments.sh | 124 ------- app/util/crowd/populate_db.sh | 204 ----------- app/util/jira/populate_db.sh | 327 ------------------ app/util/jira/upload_attachments.sh | 165 --------- app/util/jsm/README.md | 32 -- 11 files changed, 1689 deletions(-) delete mode 100644 app/util/bitbucket/populate_db.sh delete mode 100644 app/util/bitbucket/upload_attachments.sh delete mode 100644 app/util/confluence/index-snapshot.sh delete mode 100644 app/util/confluence/index-sync.sh delete mode 100644 app/util/confluence/index-wait-till-finished.sh delete mode 100644 app/util/confluence/populate_db.sh delete mode 100644 app/util/confluence/upload_attachments.sh delete mode 100644 app/util/crowd/populate_db.sh delete mode 100644 app/util/jira/populate_db.sh delete mode 100644 app/util/jira/upload_attachments.sh delete mode 100644 app/util/jsm/README.md diff --git a/app/util/bitbucket/populate_db.sh b/app/util/bitbucket/populate_db.sh deleted file mode 100644 index c16552e19..000000000 --- a/app/util/bitbucket/populate_db.sh +++ /dev/null @@ -1,287 +0,0 @@ -#!/bin/bash - -################### Check if NFS exists ################### -pgrep nfsd > /dev/null && echo "NFS found" || { echo NFS process was not found. This script is intended to run only on the Bitbucket NFS Server machine. && exit 1; } - -# Read command line arguments -while [[ "$#" -gt 0 ]]; do case $1 in - --small) small=1 ;; - --custom) custom=1 ;; - --force) - if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then - force=1 - version=${2} - shift - else - force=1 - fi - ;; - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -################### Variables section ################### -# Command to install psql client for Amazon Linux 2. -# In case of different distributive, please adjust accordingly or install manually. -INSTALL_PSQL_CMD="amazon-linux-extras install -y postgresql11" - -# DB config file location (dbconfig.xml) -DB_CONFIG="/media/atl/bitbucket/shared/bitbucket.properties" - -# Depending on BITBUCKET installation directory -BITBUCKET_VERSION_FILE="/media/atl/bitbucket/shared/bitbucket.version" - -# DB admin user name, password and DB name -BITBUCKET_DB_NAME="bitbucket" -BITBUCKET_DB_USER="postgres" -BITBUCKET_DB_PASS="Password1!" - -# Bitbucket DC has auto PRs decline feature enabled by default from 7.7.X version -BITBUCKET_AUTO_DECLINE_VERSION="7.7.0" - -# BITBUCKET version variables -SUPPORTED_BITBUCKET_VERSIONS=(7.17.15 7.21.10 8.8.2) - -BITBUCKET_VERSION=$(sudo su bitbucket -c "cat ${BITBUCKET_VERSION_FILE}") -if [[ -z "$BITBUCKET_VERSION" ]]; then - echo The $BITBUCKET_VERSION_FILE file does not exists or emtpy. Please check if BITBUCKET_VERSION_FILE variable \ - has a valid file path of the Bitbucket version file or set your Cluster BITBUCKET_VERSION explicitly. - exit 1 -fi -echo "Bitbucket version: ${BITBUCKET_VERSION}" - -# Datasets AWS bucket and db dump name - -DATASETS_SIZE="large" -if [[ ${small} == 1 ]]; then - DATASETS_SIZE="small" -fi -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/bitbucket" -DB_DUMP_NAME="db.dump" -DB_DUMP_URL="${DATASETS_AWS_BUCKET}/${BITBUCKET_VERSION}/${DATASETS_SIZE}/${DB_DUMP_NAME}" - -################### End of variables section ################### - -# Custom version check -if [[ ${custom} == 1 ]]; then - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$BITBUCKET_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - if curl --output /dev/null --silent --head --fail "$DB_DUMP_URL"; then - echo "Custom version $BITBUCKET_VERSION dataset URL found: ${DB_DUMP_URL}" - else - echo "Error: there is no dataset for version $BITBUCKET_VERSION" - exit 1 - fi -# Check if Bitbucket version is supported -elif [[ ! "${SUPPORTED_BITBUCKET_VERSIONS[*]}" =~ ${BITBUCKET_VERSION} ]]; then - echo "Bitbucket Version: ${BITBUCKET_VERSION} is not officially supported by Data Center App Performance Toolkit." - echo "Supported Bitbucket Versions: ${SUPPORTED_BITBUCKET_VERSIONS[*]}" - echo "If you want to force apply an existing datasets to your Bitbucket, use --force flag with version of dataset you want to apply:" - echo "e.g. ./populate_db.sh --force 6.10.0" - echo "!!! Warning !!! This may break your Bitbucket instance. Also, note that downgrade is not supported by Bitbucket." - # Check if --force flag is passed into command - if [[ ${force} == 1 ]]; then - # Check if version was specified after --force flag - if [[ -z ${version} ]]; then - echo "Error: --force flag requires version after it." - echo "Specify one of these versions: ${SUPPORTED_BITBUCKET_VERSIONS[*]}" - exit 1 - fi - # Check if passed Bitbucket version is in list of supported - if [[ " ${SUPPORTED_BITBUCKET_VERSIONS[@]} " =~ " ${version} " ]]; then - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/${version}/${DATASETS_SIZE}/${DB_DUMP_NAME}" - echo "Force mode. Dataset URL: ${DB_DUMP_URL}" - else - LAST_DATASET_VERSION=${SUPPORTED_BITBUCKET_VERSIONS[${#SUPPORTED_BITBUCKET_VERSIONS[@]}-1]} - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$LAST_DATASET_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - echo "Specific dataset version was not specified after --force flag, using the last available: ${LAST_DATASET_VERSION}" - echo "Dataset URL: ${DB_DUMP_URL}" - fi - else - # No force flag - exit 1 - fi -fi - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores Postgres DB from SQL DB dump for Bitbucket DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "DB_CONFIG=${DB_CONFIG}" -echo "BITBUCKET_DB_NAME=${BITBUCKET_DB_NAME}" -echo "BITBUCKET_DB_USER=${BITBUCKET_DB_USER}" -echo "BITBUCKET_DB_PASS=${BITBUCKET_DB_PASS}" -echo "DB_DUMP_URL=${DB_DUMP_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]]; then - echo "Script was canceled." - exit 1 -fi - -echo "Step1: Check Postgres Client" -if ! [[ -x "$(command -v psql)" ]]; then - echo "Install Postgres client" - sudo su -c "${INSTALL_PSQL_CMD}" - if [[ $? -ne 0 ]]; then - echo "Postgres Client was NOT installed." - echo "Check correctness of install command or install Postgres client manually." - echo "INSTALL_PSQL_CMD=${INSTALL_PSQL_CMD}" - exit 1 - fi -else - echo "Postgres client is already installed" -fi -echo "Current PostgreSQL version is $(psql -V)" - -echo "Step2: Get DB Host, check DB connection and permissions" -DB_HOST=$(sudo su -c "cat ${DB_CONFIG} | grep 'jdbc:postgresql' | cut -d'/' -f3 | cut -d':' -f1") -if [[ -z ${DB_HOST} ]]; then - echo "DataBase URL was not found in ${DB_CONFIG}" - exit 1 -fi -echo "DB_HOST=${DB_HOST}" - -echo "Check database permissions for user ${BITBUCKET_DB_USER}" -PGPASSWORD=${BITBUCKET_DB_PASS} createdb -U ${BITBUCKET_DB_USER} -h ${DB_HOST} -T template0 -E "UNICODE" -l "C" TEST -if [[ $? -ne 0 ]]; then - echo "User ${BITBUCKET_DB_USER} doesn't have permission to create database." - exit 1 -else - PGPASSWORD=${BITBUCKET_DB_PASS} dropdb -U ${BITBUCKET_DB_USER} -h ${DB_HOST} TEST -fi - -PGPASSWORD=${BITBUCKET_DB_PASS} pg_isready -U ${BITBUCKET_DB_USER} -h ${DB_HOST} -if [[ $? -ne 0 ]]; then - echo "Connection to DB failed. Please check correctness of following variables:" - echo "BITBUCKET_DB_NAME=${BITBUCKET_DB_NAME}" - echo "BITBUCKET_DB_USER=${BITBUCKET_DB_USER}" - echo "BITBUCKET_DB_PASS=${BITBUCKET_DB_PASS}" - echo "DB_HOST=${DB_HOST}" - exit 1 -fi - -echo "Step3: Write 'instance.url' property to file" -BITBUCKET_BASE_URL_FILE="base_url" -if [[ -s ${BITBUCKET_BASE_URL_FILE} ]]; then - echo "File ${BITBUCKET_BASE_URL_FILE} was found. Base url: $(cat ${BITBUCKET_BASE_URL_FILE})." -else - PGPASSWORD=${BITBUCKET_DB_PASS} psql -h ${DB_HOST} -d ${BITBUCKET_DB_NAME} -U ${BITBUCKET_DB_USER} -Atc \ - "select prop_value from app_property where prop_key='instance.url';" > ${BITBUCKET_BASE_URL_FILE} - if [[ ! -s ${BITBUCKET_BASE_URL_FILE} ]]; then - echo "Failed to get Base URL value from database. Check DB configuration variables." - exit 1 - fi - echo "$(cat ${BITBUCKET_BASE_URL_FILE}) was written to the ${BITBUCKET_BASE_URL_FILE} file." -fi - -echo "Step4: Write license to file" -BITBUCKET_LICENSE_FILE="license" -if [[ -s ${BITBUCKET_LICENSE_FILE} ]]; then - echo "File ${BITBUCKET_LICENSE_FILE} was found. License: $(cat ${BITBUCKET_LICENSE_FILE})." -else - PGPASSWORD=${BITBUCKET_DB_PASS} psql -h ${DB_HOST} -d ${BITBUCKET_DB_NAME} -U ${BITBUCKET_DB_USER} -tAc \ - "select prop_value from app_property where prop_key = 'license';" | sed "s/\r//g" > ${BITBUCKET_LICENSE_FILE} - if [[ ! -s ${BITBUCKET_LICENSE_FILE} ]]; then - echo "Failed to get bitbucket license from database. Check DB configuration variables." - exit 1 - fi - echo "$(cat ${BITBUCKET_LICENSE_FILE}) was written to the ${BITBUCKET_LICENSE_FILE} file." -fi - -echo "Step5: Download DB dump" -DUMP_DIR='/media/atl/bitbucket/shared' -if [[ $? -ne 0 ]]; then - echo "Directory ${DUMP_DIR} does not exist" - exit 1 -fi -sudo su -c "rm -rf ${DUMP_DIR}/${DB_DUMP_NAME}" -ARTIFACT_SIZE_BYTES=$(curl -sI ${DB_DUMP_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(sudo su bitbucket -c "df -k --output=avail $DUMP_DIR | tail -n1") -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi; -# use computer style progress bar -sudo su bitbucket -c "time wget --progress=dot:giga ${DB_DUMP_URL} -P ${DUMP_DIR}" -if [[ $? -ne 0 ]]; then - echo "DB dump download failed! Pls check available disk space." - exit 1 -fi - -echo "Step6: SQL Restore" -echo "Check DB connection" -echo "Drop DB" -sudo su -c "PGPASSWORD=${BITBUCKET_DB_PASS} dropdb -U ${BITBUCKET_DB_USER} -h ${DB_HOST} ${BITBUCKET_DB_NAME}" -if [[ $? -ne 0 ]]; then - echo "Drop DB failed. Please make sure you stop Bitbucket." - exit 1 -fi -sleep 5 -echo "Create DB" -sudo su -c "PGPASSWORD=${BITBUCKET_DB_PASS} createdb -U ${BITBUCKET_DB_USER} -h ${DB_HOST} -T template0 ${BITBUCKET_DB_NAME}" -if [[ $? -ne 0 ]]; then - echo "Create DB failed." - exit 1 -fi -sleep 5 -echo "PG Restore" -sudo su -c "time PGPASSWORD=${BITBUCKET_DB_PASS} pg_restore --schema=public -v -j 8 -U ${BITBUCKET_DB_USER} -h ${DB_HOST} -d ${BITBUCKET_DB_NAME} ${DUMP_DIR}/${DB_DUMP_NAME}" -if [[ $? -ne 0 ]]; then - echo "SQL Restore failed!" - exit 1 -fi -sudo su -c "rm -rf ${DUMP_DIR}/${DB_DUMP_NAME}" - -echo "Step7: Update 'instance.url' property in database" -if [[ -s ${BITBUCKET_BASE_URL_FILE} ]]; then - BASE_URL=$(cat ${BITBUCKET_BASE_URL_FILE}) - if [[ $(PGPASSWORD=${BITBUCKET_DB_PASS} psql -h ${DB_HOST} -d ${BITBUCKET_DB_NAME} -U ${BITBUCKET_DB_USER} -c \ - "UPDATE app_property SET prop_value = '${BASE_URL}' WHERE prop_key = 'instance.url';") != "UPDATE 1" ]]; then - echo "Couldn't update database 'instance.url' property. Please check your database connection." - exit 1 - else - echo "The database 'instance.url' property was updated with ${BASE_URL}" - fi -else - echo "The ${BITBUCKET_BASE_URL_FILE} file doesn't exist or empty. Please check file existence or 'instance.url' property in the database." - exit 1 -fi - -echo "Step8: Update license property in database" -if [[ -s ${BITBUCKET_LICENSE_FILE} ]]; then - LICENSE=$(cat ${BITBUCKET_LICENSE_FILE}) - if [[ $(PGPASSWORD=${BITBUCKET_DB_PASS} psql -h ${DB_HOST} -d ${BITBUCKET_DB_NAME} -U ${BITBUCKET_DB_USER} -c \ - "update app_property set prop_value = '${LICENSE}' where prop_key = 'license';") != "UPDATE 1" ]]; then - echo "Couldn't update database bitbucket license property. Please check your database connection." - exit 1 - else - echo "The database bitbucket license property was updated with ${LICENSE}" - fi -else - echo "The ${BITBUCKET_LICENSE_FILE} file doesn't exist or empty. Please check file existence or 'bitbucket license' property in the database." - exit 1 -fi - -echo "Step9: Remove ${BITBUCKET_BASE_URL_FILE} file" -sudo rm ${BITBUCKET_BASE_URL_FILE} - -echo "Step10: Remove ${BITBUCKET_LICENSE_FILE} file" -sudo rm ${BITBUCKET_LICENSE_FILE} - -echo "DCAPT util script execution is finished successfully." -echo # move to a new line - -echo "Important: new admin user credentials are admin/admin" -echo "Important: do not start Bitbucket until attachments restore is finished" - -if [ "$(printf '%s\n' "$BITBUCKET_AUTO_DECLINE_VERSION" "$BITBUCKET_VERSION" | sort -V | head -n1)" = "$BITBUCKET_AUTO_DECLINE_VERSION" ]; then - echo "Bitbucket ${BITBUCKET_VERSION} version has auto PRs decline feature enabled and it will be disabled in bitbucket.properties file." - echo "feature.pull.request.auto.decline=false" | sudo tee -a ${DB_CONFIG} -fi diff --git a/app/util/bitbucket/upload_attachments.sh b/app/util/bitbucket/upload_attachments.sh deleted file mode 100644 index 0a905ac18..000000000 --- a/app/util/bitbucket/upload_attachments.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/bash - -################### Check if NFS exists ################### -pgrep nfsd > /dev/null && echo "NFS found" || { echo NFS process was not found. This script is intended to run only on the Bitbucket NFS Server machine. && exit 1; } - -# Read command line arguments -while [[ "$#" -gt 0 ]]; do case $1 in - --small) small=1 ;; - --custom) custom=1 ;; - --force) - if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then - force=1 - version=${2} - shift - else - force=1 - fi - ;; - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -################### Variables section ################### -# Bitbucket version variables -BITBUCKET_VERSION_FILE="/media/atl/bitbucket/shared/bitbucket.version" -SUPPORTED_BITBUCKET_VERSIONS=(7.17.15 7.21.10 8.8.2) - -BITBUCKET_VERSION=$(sudo su bitbucket -c "cat ${BITBUCKET_VERSION_FILE}") -if [[ -z "$BITBUCKET_VERSION" ]]; then - echo The $BITBUCKET_VERSION_FILE file does not exists or emtpy. Please check if BITBUCKET_VERSION_FILE variable \ - has a valid file path of the Bitbucket version file or set your Cluster BITBUCKET_VERSION explicitly. - exit 1 -fi -echo "Bitbucket Version: ${BITBUCKET_VERSION}" - -DATASETS_SIZE="large" -if [[ ${small} == 1 ]]; then - DATASETS_SIZE="small" -fi - -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/bitbucket" -ATTACHMENTS_TAR="attachments.tar.gz" -ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/${BITBUCKET_VERSION}/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" -NFS_DIR="/media/atl/bitbucket/shared" -ATTACHMENT_DIR_DATA="data" -################### End of variables section ################### - -# Custom version check -if [[ ${custom} == 1 ]]; then - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$BITBUCKET_VERSION/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - if curl --output /dev/null --silent --head --fail "$ATTACHMENTS_TAR_URL"; then - echo "Custom version $BITBUCKET_VERSION dataset URL found: ${ATTACHMENTS_TAR_URL}" - else - echo "Error: there is no dataset for version $BITBUCKET_VERSION" - exit 1 - fi -# Check if Bitbucket version is supported -elif [[ ! "${SUPPORTED_BITBUCKET_VERSIONS[*]}" =~ ${BITBUCKET_VERSION} ]]; then - echo "Bitbucket Version: ${BITBUCKET_VERSION} is not officially supported by Data Center App Peformance Toolkit." - echo "Supported Bitbucket Versions: ${SUPPORTED_BITBUCKET_VERSIONS[*]}" - echo "If you want to force apply an existing datasets to your BITBUCKET, use --force flag with version of dataset you want to apply:" - echo "e.g. ./upload_attachments --force 6.10.0" - echo "!!! Warning !!! This may broke your Bitbucket instance." - # Check if --force flag is passed into command - if [[ ${force} == 1 ]]; then - # Check if passed Bitbucket version is in list of supported - if [[ "${SUPPORTED_BITBUCKET_VERSIONS[*]}" =~ ${version} ]]; then - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/${version}/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - echo "Force mode. Dataset URL: ${ATTACHMENTS_TAR_URL}" - else - LAST_DATASET_VERSION=${SUPPORTED_BITBUCKET_VERSIONS[${#SUPPORTED_BITBUCKET_VERSIONS[@]}-1]} - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$LAST_DATASET_VERSION/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - echo "Specific dataset version was not specified after --force flag, using the last available: ${LAST_DATASET_VERSION}" - echo "Dataset URL: ${ATTACHMENTS_TAR_URL}" - fi - else - # No force flag - exit 1 - fi -fi - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores attachments into Bitbucket DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "NFS_DIR=${NFS_DIR}" -echo "ATTACHMENTS_TAR_URL=${ATTACHMENTS_TAR_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]] -then - echo "Script was canceled." - exit 1 -fi - - -echo "Step1: Download and untar attachments" -sudo su -c "rm -rf ${ATTACHMENTS_TAR}" -ARTIFACT_SIZE_BYTES=$(curl -sI ${ATTACHMENTS_TAR_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(sudo su bitbucket -c "df -k --output=avail $NFS_DIR | tail -n1") -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi; - -sudo su bitbucket -c "time wget -qO- ${ATTACHMENTS_TAR_URL} -P ${NFS_DIR} | tar -xz --checkpoint=.10000 -C ${NFS_DIR}/${ATTACHMENT_DIR_DATA} --strip-components 1" -if [[ $? -ne 0 ]]; then - echo "Untar failed!" - exit 1 -fi -echo "DCAPT util script execution is finished successfully." -echo "Important: do not forget to start Bitbucket" -echo # move to a new line \ No newline at end of file diff --git a/app/util/confluence/index-snapshot.sh b/app/util/confluence/index-snapshot.sh deleted file mode 100644 index b862fa98d..000000000 --- a/app/util/confluence/index-snapshot.sh +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash - -# Wait until index snapshot for Confluence DC is generated - -TEMP_DIR="/var/atlassian/application-data/confluence/temp" -TEMP_ZIP="/var/atlassian/application-data/confluence/index/*main_index*zip" -MIN_SNAPSHOT_SIZE=5242880 - -TIMEOUT=3600 # 1 hour -COUNTER=0 -SLEEP_TIME=30 -ATTEMPTS=$((TIMEOUT / SLEEP_TIME)) -FAIL_FAST_COUNTER=0 -FAIL_FAST_ATTEMPTS=20 -FAIL_FAST_FLAG=true - - -while [ ${COUNTER} -lt ${ATTEMPTS} ];do - # Get the latest snapshot from the index-snapshots folder - SNAPSHOT=$(sudo su -c "ls -tr /media/atl/confluence/shared-home/index-snapshots/IndexSnapshot_main_index_*zip" 2>/dev/null | tail -1) - if sudo su -c "test -z ${SNAPSHOT}"; then - echo "There is no snapshot file yet in /media/atl/confluence/shared-home/index-snapshots/ folder." - else - SNAPSHOT_SIZE=$(sudo su -c "du -s ${SNAPSHOT}" | cut -f1) - echo "Snapshot file found. Current size: ${SNAPSHOT_SIZE}" - if sudo su -c "test -f ${SNAPSHOT} && [ ${SNAPSHOT_SIZE} -gt ${MIN_SNAPSHOT_SIZE} ]"; then - echo # New line - echo "Snapshot was created successfully." - break - fi - fi - - if [ ${FAIL_FAST_COUNTER} -eq ${FAIL_FAST_ATTEMPTS} ]; then - echo # move to a new line - echo "Snapshot generation did not started." - echo "Try to create a new Confluence page in UI and run 'General configuration' > 'Scheduled Jobs' > 'Clean Journal Entries' job again." - exit 1 - fi - - if sudo su -c "test -d ${TEMP_DIR}"; then - TEMP_DIR_SIZE=$(sudo su -c "du -s ${TEMP_DIR}" | cut -f1) - if [[ ${TEMP_DIR_SIZE} -gt 0 ]]; then - echo "Temp dir size > 0. Current temp dir size: ${TEMP_DIR_SIZE}" - FAIL_FAST_FLAG=false - else - echo "Temp dir size is zero." - fi - fi - - if sudo su -c "test -f ${TEMP_ZIP}"; then - TEMP_ZIP_SIZE=$(sudo su -c "du -s ${TEMP_ZIP}" | cut -f1) - echo "Temp ZIP file found. Current temp ZIP file size: ${TEMP_ZIP_SIZE}" - fi - - if [ "$FAIL_FAST_FLAG" = true ]; then - echo "FAIL_FAST_COUNTER: $FAIL_FAST_COUNTER/$FAIL_FAST_ATTEMPTS" - (( FAIL_FAST_COUNTER++ )) || true - fi - - echo "Waiting for Snapshot generation, attempt ${COUNTER}/${ATTEMPTS} at waiting ${SLEEP_TIME} seconds." - echo # New line - echo # New line - sleep ${SLEEP_TIME} - (( COUNTER++ )) || true -done - -if [ ${COUNTER} -eq ${ATTEMPTS} ]; then - echo # move to a new line - echo "Snapshot generation fails." - echo "Try to create a new Confluence page in UI and run 'General configuration' > 'Scheduled Jobs' > 'Clean Journal Entries' job again." - exit 1 -fi - -echo "DCAPT util script execution is finished successfully." \ No newline at end of file diff --git a/app/util/confluence/index-sync.sh b/app/util/confluence/index-sync.sh deleted file mode 100644 index c39d5a876..000000000 --- a/app/util/confluence/index-sync.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash - -# Wait until index sync is finished on a new Confluence DC node - -SEARCH_LOG="/var/atlassian/application-data/confluence/logs/*.log" -TIMEOUT=1200 - -if [ "$(sudo su confluence -c "ls -l ""$SEARCH_LOG"" 2>/dev/null | wc -l")" -gt 0 ] -then - echo "Log files were found:" - sudo su confluence -c "ls $SEARCH_LOG" -else - echo "There are no log files found like $SEARCH_LOG" - exit 1 -fi - -function find_word_in_log() { - COUNTER=0 - SLEEP_TIME=10 - ATTEMPTS=$((TIMEOUT / SLEEP_TIME)) - while [ ${COUNTER} -lt ${ATTEMPTS} ];do - check_grep=`sudo su confluence -c "cat $SEARCH_LOG" | grep -o "$1"` - if [ -z "$check_grep" ];then - for i in {1..$COUNTER}; do echo -n .; done - sleep ${SLEEP_TIME} - let COUNTER=$COUNTER+1 - else - echo "$check_grep" - break - fi - - done - if [ ${COUNTER} -eq ${ATTEMPTS} ]; then - echo # move to a new line - echo "Failed to find $1 in $SEARCH_LOG in $TIMEOUT seconds" - exit 1 - fi -} - -find_word_in_log "Index recovery is required for main index, starting now" -find_word_in_log "main index recovered from shared home directory" - -echo "DCAPT util script execution is finished successfully." \ No newline at end of file diff --git a/app/util/confluence/index-wait-till-finished.sh b/app/util/confluence/index-wait-till-finished.sh deleted file mode 100644 index fac3ddb9c..000000000 --- a/app/util/confluence/index-wait-till-finished.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -# Wait for full re index finished - -SEARCH_LOG="/var/atlassian/application-data/confluence/logs/atlassian-confluence-index*" -CONFLUENCE_VERSION_FILE="/media/atl/confluence/shared-home/confluence.version" -PROGRESS="Re-index progress:.*" -FINISHED="Re-index progress: 100% complete" - -CONFLUENCE_VERSION=$(sudo su confluence -c "cat ${CONFLUENCE_VERSION_FILE}") -if [[ -z "$CONFLUENCE_VERSION" ]]; then - echo The $CONFLUENCE_VERSION_FILE file does not exists or emtpy. Please check if CONFLUENCE_VERSION_FILE variable \ - has a valid file path of the Confluence version file or set your Cluster CONFLUENCE_VERSION explicitly. - exit 1 -fi -echo "Confluence Version: ${CONFLUENCE_VERSION}" - -if [ "$(sudo su confluence -c "ls -l ""$SEARCH_LOG"" 2>/dev/null | wc -l")" -gt 0 ] -then - echo "Log files were found:" - # get all logs files as string without newline chars and sorted by last edit time oldest to newest - LOG_FILE_NAMES=$(sudo su confluence -c "ls -tr $SEARCH_LOG | tr '\n' ' '") - echo "$LOG_FILE_NAMES" -else - echo "ERROR: There are no log files found like $SEARCH_LOG" - echo "Make sure your Confluence version is 7.7.x or higher." - exit 1 -fi - -TIMEOUT=21600 # 6 hour -COUNTER=0 -SLEEP_TIME=60 -ATTEMPTS=$((TIMEOUT / SLEEP_TIME)) - -while [ ${COUNTER} -lt ${ATTEMPTS} ];do - grep_result=$(sudo su -c "grep -h -o \"$PROGRESS\" $LOG_FILE_NAMES" 2>/dev/null | tail -1) - echo "Status:" - echo "$grep_result" - if [ -z "$grep_result" ];then - echo "ERROR: $PROGRESS was not found in $SEARCH_LOG" - echo "Check if index process was started." - exit 1 - fi - finished=$(echo "$grep_result" | grep "$FINISHED") - if [ -z "$finished" ];then - echo "Waiting for index finished, attempt ${COUNTER}/${ATTEMPTS} at waiting ${SLEEP_TIME} seconds." - echo # New line - sleep ${SLEEP_TIME} - (( COUNTER++ )) || true - else - echo "Index finished successfully." - break - fi -done - -if [ "${COUNTER}" -eq ${ATTEMPTS} ]; then - echo # move to a new line - echo "ERROR: Wait for index finished failed" - echo "See logs for more details:" - sudo su -c "ls -a $SEARCH_LOG" - exit 1 -fi - -echo "DCAPT util script execution is finished successfully." \ No newline at end of file diff --git a/app/util/confluence/populate_db.sh b/app/util/confluence/populate_db.sh deleted file mode 100644 index ebb678d77..000000000 --- a/app/util/confluence/populate_db.sh +++ /dev/null @@ -1,249 +0,0 @@ -#!/bin/bash - -################### Variables section ################### -# Command to install psql client for Amazon Linux 2. -# In case of different distributive, please adjust accordingly or install manually. -INSTALL_PSQL_CMD="amazon-linux-extras install -y postgresql10" - -# DB config file location (dbconfig.xml) -DB_CONFIG="/var/atlassian/application-data/confluence/confluence.cfg.xml" - -# Depending on Confluence installation directory -CONFLUENCE_VERSION_FILE="/media/atl/confluence/shared-home/confluence.version" - -# DB admin user name, password and DB name -CONFLUENCE_DB_NAME="confluence" -CONFLUENCE_DB_USER="postgres" -CONFLUENCE_DB_PASS="Password1!" - -# Confluence DB requests -SELECT_CONFLUENCE_SETTING_SQL="select BANDANAVALUE from BANDANA where BANDANACONTEXT = '_GLOBAL' and BANDANAKEY = 'atlassian.confluence.settings';" - -# Confluence version variables -SUPPORTED_CONFLUENCE_VERSIONS=(7.19.2) - -if [[ ! $(systemctl status confluence) ]]; then - echo "The Confluence service was not found on this host." \ - "Please make sure you are running this script on a host that is running Confluence." - exit 1 -fi - -CONFLUENCE_VERSION=$(sudo su confluence -c "cat ${CONFLUENCE_VERSION_FILE}") -if [[ -z "$CONFLUENCE_VERSION" ]]; then - echo The $CONFLUENCE_VERSION_FILE file does not exists or emtpy. Please check if CONFLUENCE_VERSION_FILE variable \ - has a valid file path of the Confluence version file or set your Cluster CONFLUENCE_VERSION explicitly. - exit 1 -fi -echo "Confluence Version: ${CONFLUENCE_VERSION}" - -# Datasets AWS bucket and db dump name -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/confluence" -DATASETS_SIZE="large" -DB_DUMP_NAME="db.dump" -DB_DUMP_URL="${DATASETS_AWS_BUCKET}/${CONFLUENCE_VERSION}/${DATASETS_SIZE}/${DB_DUMP_NAME}" - -################### End of variables section ################### - -# Custom version check -if [[ "$1" == "--custom" ]]; then - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$CONFLUENCE_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - if curl --output /dev/null --silent --head --fail "$DB_DUMP_URL"; then - echo "Custom version $CONFLUENCE_VERSION dataset URL found: ${DB_DUMP_URL}" - else - echo "Error: there is no dataset for version $CONFLUENCE_VERSION" - exit 1 - fi -# Check if Confluence version is supported -elif [[ ! "${SUPPORTED_CONFLUENCE_VERSIONS[*]}" =~ ${CONFLUENCE_VERSION} ]]; then - echo "Confluence Version: ${CONFLUENCE_VERSION} is not officially supported by Data Center App Performance Toolkit." - echo "Supported Confluence Versions: ${SUPPORTED_CONFLUENCE_VERSIONS[*]}" - echo "If you want to force apply an existing datasets to your Confluence, use --force flag with version of dataset you want to apply:" - echo "e.g. ./populate_db.sh --force 7.4.5" - echo "!!! Warning !!! This may break your Confluence instance. Also, note that downgrade is not supported by Confluence." - # Check if --force flag is passed into command - if [[ "$1" == "--force" ]]; then - # Check if version was specified after --force flag - if [[ -z "$2" ]]; then - echo "Error: --force flag requires version after it." - echo "Specify one of these versions: ${SUPPORTED_CONFLUENCE_VERSIONS[*]}" - exit 1 - fi - # Check if passed Confluence version is in list of supported - if [[ " ${SUPPORTED_CONFLUENCE_VERSIONS[@]} " =~ " ${2} " ]]; then - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$2/${DATASETS_SIZE}/${DB_DUMP_NAME}" - echo "Force mode. Dataset URL: ${DB_DUMP_URL}" - else - LAST_DATASET_VERSION=${SUPPORTED_CONFLUENCE_VERSIONS[${#SUPPORTED_CONFLUENCE_VERSIONS[@]}-1]} - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$LAST_DATASET_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - echo "Specific dataset version was not specified after --force flag, using the last available: ${LAST_DATASET_VERSION}" - echo "Dataset URL: ${DB_DUMP_URL}" - fi - else - # No force flag - exit 1 - fi -fi - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores Postgres DB from SQL DB dump for Confluence DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "DB_CONFIG=${DB_CONFIG}" -echo "CONFLUENCE_DB_NAME=${CONFLUENCE_DB_NAME}" -echo "CONFLUENCE_DB_USER=${CONFLUENCE_DB_USER}" -echo "CONFLUENCE_DB_PASS=${CONFLUENCE_DB_PASS}" -echo "DB_DUMP_URL=${DB_DUMP_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]]; then - echo "Script was canceled." - exit 1 -fi - -echo "Step1: Check Postgres Client" -if ! [[ -x "$(command -v psql)" ]]; then - echo "Install Postgres client" - sudo su -c "${INSTALL_PSQL_CMD}" - if [[ $? -ne 0 ]]; then - echo "Postgres Client was NOT installed." - echo "Check correctness of install command or install Postgres client manually." - echo "INSTALL_PSQL_CMD=${INSTALL_PSQL_CMD}" - exit 1 - fi -else - echo "Postgres client is already installed" -fi -echo "Current PostgreSQL version is $(psql -V)" - -echo "Step2: Get DB Host, check DB connection and user permissions" -DB_HOST=$(sudo su -c "cat ${DB_CONFIG} | grep 'jdbc:postgresql' | cut -d'/' -f3 | cut -d':' -f1") -if [[ -z ${DB_HOST} ]]; then - echo "DataBase URL was not found in ${DB_CONFIG}" - exit 1 -fi -echo "DB_HOST=${DB_HOST}" - -echo "Check DB connection" -PGPASSWORD=${CONFLUENCE_DB_PASS} pg_isready -U ${CONFLUENCE_DB_USER} -h ${DB_HOST} -if [[ $? -ne 0 ]]; then - echo "Connection to DB failed. Please check correctness of following variables:" - echo "CONFLUENCE_DB_NAME=${CONFLUENCE_DB_NAME}" - echo "CONFLUENCE_DB_USER=${CONFLUENCE_DB_USER}" - echo "CONFLUENCE_DB_PASS=${CONFLUENCE_DB_PASS}" - echo "DB_HOST=${DB_HOST}" - exit 1 -fi - -echo "Check database permissions for user ${CONFLUENCE_DB_USER}" -PGPASSWORD=${CONFLUENCE_DB_PASS} createdb -U ${CONFLUENCE_DB_USER} -h ${DB_HOST} -T template0 -E "UNICODE" -l "C" TEST -if [[ $? -ne 0 ]]; then - echo "User ${CONFLUENCE_DB_USER} doesn't have permission to create database." - exit 1 -else - PGPASSWORD=${CONFLUENCE_DB_PASS} dropdb -U ${CONFLUENCE_DB_USER} -h ${DB_HOST} TEST -fi - -echo "Step3: Write confluence baseUrl to file" -CONFLUENCE_BASE_URL_FILE="base_url" -if [[ -s ${CONFLUENCE_BASE_URL_FILE} ]];then - echo "File ${CONFLUENCE_BASE_URL_FILE} was found. Base url: $(cat ${CONFLUENCE_BASE_URL_FILE})." -else - PGPASSWORD=${CONFLUENCE_DB_PASS} psql -h ${DB_HOST} -d ${CONFLUENCE_DB_NAME} -U ${CONFLUENCE_DB_USER} -Atc "${SELECT_CONFLUENCE_SETTING_SQL}" \ - | grep -i "" > ${CONFLUENCE_BASE_URL_FILE} - if [[ ! -s ${CONFLUENCE_BASE_URL_FILE} ]]; then - echo "Failed to get Base URL value from database. Check DB configuration variables." - exit 1 - fi - echo "$(cat ${CONFLUENCE_BASE_URL_FILE}) was written to the ${CONFLUENCE_BASE_URL_FILE} file." -fi - -echo "Step4: Stop Confluence" -sudo systemctl stop confluence -if [[ $? -ne 0 ]]; then - echo "Confluence did not stop. Please try to rerun script." - exit 1 -fi - -echo "Step5: Download DB dump" -rm -rf ${DB_DUMP_NAME} -ARTIFACT_SIZE_BYTES=$(curl -sI ${DB_DUMP_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(df -k --output=avail "$PWD" | tail -n1) -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi -# use computer style progress bar -time wget --progress=dot:giga ${DB_DUMP_URL} -if [[ $? -ne 0 ]]; then - echo "DB dump download failed! Pls check available disk space." - exit 1 -fi - -echo "Step6: SQL Restore" -echo "Drop DB" -PGPASSWORD=${CONFLUENCE_DB_PASS} dropdb -U ${CONFLUENCE_DB_USER} -h ${DB_HOST} ${CONFLUENCE_DB_NAME} -if [[ $? -ne 0 ]]; then - echo "Drop DB failed." - exit 1 -fi -sleep 5 -echo "Create DB" -PGPASSWORD=${CONFLUENCE_DB_PASS} createdb -U ${CONFLUENCE_DB_USER} -h ${DB_HOST} -T template0 ${CONFLUENCE_DB_NAME} -if [[ $? -ne 0 ]]; then - echo "Create DB failed." - exit 1 -fi -sleep 5 -echo "PG Restore" -time PGPASSWORD=${CONFLUENCE_DB_PASS} pg_restore --schema=public -v -j 8 -U ${CONFLUENCE_DB_USER} -h ${DB_HOST} -d ${CONFLUENCE_DB_NAME} ${DB_DUMP_NAME} -if [[ $? -ne 0 ]]; then - echo "SQL Restore failed!" - exit 1 -fi - -echo "Step7: Update confluence baseUrl value in database" -BASE_URL_TO_REPLACE=$(PGPASSWORD=${CONFLUENCE_DB_PASS} psql -h ${DB_HOST} -d ${CONFLUENCE_DB_NAME} -U ${CONFLUENCE_DB_USER} -Atc \ -"${SELECT_CONFLUENCE_SETTING_SQL}" | grep -i "") - -if [[ -z "${BASE_URL_TO_REPLACE}" ]]; then - echo "The BASE_URL_TO_REPLACE variable is empty. Please check that the confluence baseUrl value is exist in the database." - exit 1 -fi - -if [[ -s ${CONFLUENCE_BASE_URL_FILE} ]]; then - BASE_URL=$(cat ${CONFLUENCE_BASE_URL_FILE}) - if [[ $(PGPASSWORD=${CONFLUENCE_DB_PASS} psql -h ${DB_HOST} -d ${CONFLUENCE_DB_NAME} -U ${CONFLUENCE_DB_USER} -c \ - "update BANDANA - set BANDANAVALUE = replace(BANDANAVALUE, '${BASE_URL_TO_REPLACE}', '${BASE_URL}') - where BANDANACONTEXT = '_GLOBAL' - and BANDANAKEY = 'atlassian.confluence.settings';") != "UPDATE 1" ]]; then - echo "Couldn't update database baseUrl value. Please check your DB configuration variables." - exit 1 - else - echo "The database baseUrl value was updated with ${BASE_URL}" - fi -else - echo "The ${CONFLUENCE_BASE_URL_FILE} file doesn't exist or empty. Check DB configuration variables." - exit 1 -fi - -echo "Step8: Start Confluence" -sudo systemctl start confluence -rm -rf ${DB_DUMP_NAME} - -echo "Step9: Remove ${CONFLUENCE_BASE_URL_FILE} file" -sudo rm ${CONFLUENCE_BASE_URL_FILE} - -echo "DCAPT util script execution is finished successfully." -echo # move to a new line - -echo "Important: new admin user credentials are admin/admin" -echo "Wait a couple of minutes until Confluence is started." diff --git a/app/util/confluence/upload_attachments.sh b/app/util/confluence/upload_attachments.sh deleted file mode 100644 index 47fd0b199..000000000 --- a/app/util/confluence/upload_attachments.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/bash - - -################### Variables section ################### -# Confluence version variables -CONFLUENCE_VERSION_FILE="/media/atl/confluence/shared-home/confluence.version" -SUPPORTED_CONFLUENCE_VERSIONS=(7.19.2) -CONFLUENCE_VERSION=$(sudo su confluence -c "cat ${CONFLUENCE_VERSION_FILE}") -if [[ -z "$CONFLUENCE_VERSION" ]]; then - echo The $CONFLUENCE_VERSION_FILE file does not exists or emtpy. Please check if CONFLUENCE_VERSION_FILE variable \ - has a valid file path of the Confluence version file or set your Cluster CONFLUENCE_VERSION explicitly. - exit 1 -fi -echo "Confluence Version: ${CONFLUENCE_VERSION}" - -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/confluence" -ATTACHMENTS_TAR="attachments.tar.gz" -ATTACHMENTS_DIR="attachments" -DATASETS_SIZE="large" -ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/${CONFLUENCE_VERSION}/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" -TMP_DIR="/tmp" -EFS_DIR="/media/atl/confluence/shared-home" -################### End of variables section ################### - -if [[ ! $(systemctl status confluence) ]]; then - echo "The Confluence service was not found on this host." \ - "Please make sure you are running this script on a host that is running Confluence." - exit 1 -fi - -# Custom version check -if [[ "$1" == "--custom" ]]; then - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$CONFLUENCE_VERSION/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - if curl --output /dev/null --silent --head --fail "$ATTACHMENTS_TAR_URL"; then - echo "Custom version $CONFLUENCE_VERSION dataset URL found: ${ATTACHMENTS_TAR_URL}" - else - echo "Error: there is no dataset for version $CONFLUENCE_VERSION" - exit 1 - fi -# Check if Confluence version is supported -elif [[ ! "${SUPPORTED_CONFLUENCE_VERSIONS[*]}" =~ ${CONFLUENCE_VERSION} ]]; then - echo "Confluence Version: ${CONFLUENCE_VERSION} is not officially supported by Data Center App Peformance Toolkit." - echo "Supported Confluence Versions: ${SUPPORTED_CONFLUENCE_VERSIONS[*]}" - echo "If you want to force apply an existing datasets to your CONFLUENCE, use --force flag with version of dataset you want to apply:" - echo "e.g. ./upload_attachments --force 7.4.5" - echo "!!! Warning !!! This may broke your Confluence instance." - # Check if --force flag is passed into command - if [[ "$1" == "--force" ]]; then - # Check if passed Confluence version is in list of supported - if [[ "${SUPPORTED_CONFLUENCE_VERSIONS[*]}" =~ ${2} ]]; then - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$2/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - echo "Force mode. Dataset URL: ${ATTACHMENTS_TAR_URL}" - else - LAST_ATTACHMENTS_VERSION=${SUPPORTED_CONFLUENCE_VERSIONS[${#SUPPORTED_CONFLUENCE_VERSIONS[@]}-1]} - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$LAST_ATTACHMENTS_VERSION/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - echo "Specific dataset version was not specified after --force flag, using the last available: ${LAST_ATTACHMENTS_VERSION}" - echo "Dataset URL: ${ATTACHMENTS_TAR_URL}" - fi - else - # No force flag - exit 1 - fi -fi - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores attachments into Confluence DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "EFS_DIR=${EFS_DIR}" -echo "ATTACHMENTS_TAR_URL=${ATTACHMENTS_TAR_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]] -then - echo "Script was canceled." - exit 1 -fi - - -echo "Step1: Download msrcync" -# https://github.com/jbd/msrsync -cd ${TMP_DIR} || exit 1 -if [[ -s msrsync ]]; then - echo "msrsync already downloaded" -else - sudo su confluence -c "wget https://raw.githubusercontent.com/jbd/msrsync/master/msrsync && chmod +x msrsync" -fi - -echo "Step2: Download attachments" -sudo su -c "rm -rf ${ATTACHMENTS_TAR}" -ARTIFACT_SIZE_BYTES=$(curl -sI ${ATTACHMENTS_TAR_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(df -k --output=avail "$PWD" | tail -n1) -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi; -sudo su confluence -c "time wget --progress=dot:giga ${ATTACHMENTS_TAR_URL}" - -echo "Step3: Untar attachments to tmp folder" -sudo su -c "rm -rf ${ATTACHMENTS_DIR}" -sudo su confluence -c "time tar -xzf ${ATTACHMENTS_TAR} --checkpoint=.10000" -if [[ $? -ne 0 ]]; then - echo "Untar failed!" - exit 1 -fi -echo "Counting total files number:" -sudo su confluence -c "find ${ATTACHMENTS_DIR} -type f -print | wc -l" -echo "Deleting ${ATTACHMENTS_TAR}" -sudo su -c "rm -rf ${ATTACHMENTS_TAR}" - -echo "Step4: Copy attachments to EFS" -sudo su confluence -c "time ./msrsync -P -p 100 -f 3000 ${ATTACHMENTS_DIR} ${EFS_DIR}" -sudo su -c "rm -rf ${ATTACHMENTS_DIR}" - -echo "DCAPT util script execution is finished successfully." -echo # move to a new line diff --git a/app/util/crowd/populate_db.sh b/app/util/crowd/populate_db.sh deleted file mode 100644 index 55e466728..000000000 --- a/app/util/crowd/populate_db.sh +++ /dev/null @@ -1,204 +0,0 @@ -#!/bin/bash - -################### Variables section ################### -# Command to install psql client for Amazon Linux 2. -# In case of different distributive, please adjust accordingly or install manually. -INSTALL_PSQL_CMD="amazon-linux-extras install -y postgresql11" - -# DB config file location (dbconfig.xml) -DB_CONFIG="/usr/lib/systemd/system/crowd.service" - -# Depending on Crowd installation directory -CROWD_VERSION_FILE="/media/atl/crowd/shared/crowd.version" - -# DB admin user name, password and DB name -CROWD_DB_NAME="crowd" -CROWD_DB_USER="postgres" -CROWD_DB_PASS="Password1!" - -# Crowd version variables -BASE_CROWD_VERSION=4.3.0 -SUPPORTED_CROWD_VERSIONS=(5.0.5) - -if [[ ! $(systemctl status crowd) ]]; then - echo "The Crowd service was not found on this host." \ - "Please make sure you are running this script on a host that is running Crowd." - exit 1 -fi - -CROWD_VERSION=$(sudo su crowd -c "cat ${CROWD_VERSION_FILE}") -if [[ -z "$CROWD_VERSION" ]]; then - echo The $CROWD_VERSION_FILE file does not exists or emtpy. Please check if CROWD_VERSION_FILE variable \ - has a valid file path of the Crowd version file or set your Cluster CROWD_VERSION explicitly. - exit 1 -fi -echo "Crowd Version: ${CROWD_VERSION}" - -# Datasets AWS bucket and db dump name -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/crowd" -DATASETS_SIZE="large" -DB_DUMP_NAME="db.dump" - -################### End of variables section ################### - -# Check if Crowd version is supported -if [[ ! "${SUPPORTED_CROWD_VERSIONS[*]}" =~ ${CROWD_VERSION} ]]; then - echo "Crowd Version: ${CROWD_VERSION} is not officially supported by Data Center App Performance Toolkit." - echo "Supported Crowd Versions: ${SUPPORTED_CROWD_VERSIONS[*]}" - echo "!!! Warning !!! Dump from version $BASE_CROWD_VERSION would be used" -fi - -DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$BASE_CROWD_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores Postgres DB from SQL DB dump for Сrowd DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "DB_CONFIG=${DB_CONFIG}" -echo "CROWD_DB_NAME=${CROWD_DB_NAME}" -echo "CROWD_DB_USER=${CROWD_DB_USER}" -echo "CROWD_DB_PASS=${CROWD_DB_PASS}" -echo "DB_DUMP_URL=${DB_DUMP_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]]; then - echo "Script was canceled." - exit 1 -fi - -echo "Step1: Check Postgres Client" -if ! [[ -x "$(command -v psql)" ]]; then - echo "Install Postgres client" - sudo su -c "${INSTALL_PSQL_CMD}" - if [[ $? -ne 0 ]]; then - echo "Postgres Client was NOT installed." - echo "Check correctness of install command or install Postgres client manually." - echo "INSTALL_PSQL_CMD=${INSTALL_PSQL_CMD}" - exit 1 - fi -else - echo "Postgres client is already installed" -fi -echo "Current PostgreSQL version is $(psql -V)" - -echo "Step2: Get DB Host, check DB connection and user permissions" -DB_HOST=$(sudo su -c "cat ${DB_CONFIG} | grep 'jdbc:postgresql' | cut -d'/' -f3 | cut -d':' -f1") -if [[ -z ${DB_HOST} ]]; then - echo "DataBase URL was not found in ${DB_CONFIG}" - exit 1 -fi -echo "DB_HOST=${DB_HOST}" - -echo "Check database permissions for user ${CROWD_DB_USER}" -PGPASSWORD=${CROWD_DB_PASS} createdb -U ${CROWD_DB_USER} -h ${DB_HOST} -T template0 -E "UNICODE" -l "C" TEST -if [[ $? -ne 0 ]]; then - echo "User ${CROWD_DB_USER} doesn't have permission to create database." - exit 1 -else - PGPASSWORD=${CROWD_DB_PASS} dropdb -U ${CROWD_DB_USER} -h ${DB_HOST} TEST -fi - -echo "Check DB connection" -PGPASSWORD=${CROWD_DB_PASS} pg_isready -U ${CROWD_DB_USER} -h ${DB_HOST} -if [[ $? -ne 0 ]]; then - echo "Connection to DB failed. Please check correctness of following variables:" - echo "CROWD_DB_NAME=${CROWD_DB_NAME}" - echo "CROWD_DB_USER=${CROWD_DB_USER}" - echo "CROWD_DB_PASS=${CROWD_DB_PASS}" - echo "DB_HOST=${DB_HOST}" - exit 1 -fi - -echo "Step3: Stop Crowd" -sudo systemctl stop crowd -if [[ $? -ne 0 ]]; then - echo "Crowd did not stop. Please try to rerun script." - exit 1 -fi - -echo "Step4: Write 'base.url' property to file" -CROWD_BASE_URL_FILE="base_url" -if [[ -s ${CROWD_BASE_URL_FILE} ]]; then - echo "File ${CROWD_BASE_URL_FILE} was found. Base url: $(cat ${CROWD_BASE_URL_FILE})." -else - PGPASSWORD=${CROWD_DB_PASS} psql -h ${DB_HOST} -d ${CROWD_DB_NAME} -U ${CROWD_DB_USER} -Atc \ - "select property_value from cwd_property where property_name='base.url';" > ${CROWD_BASE_URL_FILE} - if [[ ! -s ${CROWD_BASE_URL_FILE} ]]; then - echo "Failed to get Base URL value from database. Check DB configuration variables." - exit 1 - fi - echo "$(cat ${CROWD_BASE_URL_FILE}) was written to the ${CROWD_BASE_URL_FILE} file." -fi - -echo "Step5: Download DB dump" -rm -rf ${DB_DUMP_NAME} -ARTIFACT_SIZE_BYTES=$(curl -sI ${DB_DUMP_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(df -k --output=avail "$PWD" | tail -n1) -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi -# use computer style progress bar -time wget --progress=dot:giga ${DB_DUMP_URL} -if [[ $? -ne 0 ]]; then - echo "DB dump download failed! Pls check available disk space." - exit 1 -fi - -echo "Step6: SQL Restore" -echo "Drop DB" -PGPASSWORD=${CROWD_DB_PASS} dropdb -U ${CROWD_DB_USER} -h ${DB_HOST} ${CROWD_DB_NAME} -if [[ $? -ne 0 ]]; then - echo "Drop DB failed." - exit 1 -fi -sleep 5 -echo "Create DB" -PGPASSWORD=${CROWD_DB_PASS} createdb -U ${CROWD_DB_USER} -h ${DB_HOST} -T template0 ${CROWD_DB_NAME} -if [[ $? -ne 0 ]]; then - echo "Create DB failed." - exit 1 -fi -sleep 5 -echo "PG Restore" -time PGPASSWORD=${CROWD_DB_PASS} pg_restore --schema=public -v -j 8 -U ${CROWD_DB_USER} -h ${DB_HOST} -d ${CROWD_DB_NAME} ${DB_DUMP_NAME} -if [[ $? -ne 0 ]]; then - echo "SQL Restore failed!" - exit 1 -fi - -echo "Step7: Update 'base.url' property in database" -if [[ -s ${CROWD_BASE_URL_FILE} ]]; then - BASE_URL=$(cat ${CROWD_BASE_URL_FILE}) - if [[ $(PGPASSWORD=${CROWD_DB_PASS} psql -h ${DB_HOST} -d ${CROWD_DB_NAME} -U ${CROWD_DB_USER} -c \ - "UPDATE cwd_property SET property_value = '${BASE_URL}' WHERE property_name = 'base.url';") != "UPDATE 1" ]]; then - echo "Couldn't update database 'base.url' property. Please check your database connection." - exit 1 - else - echo "The database 'base.url' property was updated with ${BASE_URL}" - fi -else - echo "The ${CROWD_BASE_URL_FILE} file doesn't exist or empty. Please check file existence or 'base.url' property in the database." - exit 1 -fi - -echo "Step8: Start Crowd" -sudo systemctl start crowd -rm -rf ${DB_DUMP_NAME} - -echo "Step9: Remove ${CROWD_BASE_URL_FILE} file" -sudo rm ${CROWD_BASE_URL_FILE} - -echo "DCAPT util script execution is finished successfully." -echo # move to a new line - -echo "Important: new admin user credentials are admin/admin" -echo "Wait a couple of minutes until Crowd is started." diff --git a/app/util/jira/populate_db.sh b/app/util/jira/populate_db.sh deleted file mode 100644 index e61ef3716..000000000 --- a/app/util/jira/populate_db.sh +++ /dev/null @@ -1,327 +0,0 @@ -#!/bin/bash - -# Read command line arguments -while [[ "$#" -gt 0 ]]; do case $1 in - --jsm) jsm=1 ;; - --small) small=1 ;; - --custom) custom=1 ;; - --force) - if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then - force=1 - version=${2} - shift - else - force=1 - fi - ;; - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [[ ! $(systemctl status jira) ]]; then - echo "The Jira service was not found on this host." \ - "Please make sure you are running this script on a host that is running Jira." - exit 1 -fi - -################### Variables section ################### -# Command to install psql client for Amazon Linux 2. -# In case of different distributive, please adjust accordingly or install manually. -INSTALL_PSQL_CMD="amazon-linux-extras install -y postgresql11" - -# DB config file location (dbconfig.xml) -DB_CONFIG="/var/atlassian/application-data/jira/dbconfig.xml" - -# Depending on Jira installation directory -JIRA_CURRENT_DIR="/opt/atlassian/jira-software/current" -JIRA_SETENV_FILE="${JIRA_CURRENT_DIR}/bin/setenv.sh" -JIRA_VERSION_FILE="/media/atl/jira/shared/jira-software.version" - -# DB admin user name, password and DB name -JIRA_DB_NAME="jira" -JIRA_DB_USER="postgres" -JIRA_DB_PASS="Password1!" - -# Jira/JSM supported versions - -SUPPORTED_JIRA_VERSIONS=(8.20.26 9.4.10) -SUPPORTED_JSM_VERSIONS=(4.20.26 5.4.10) - -SUPPORTED_VERSIONS=("${SUPPORTED_JIRA_VERSIONS[@]}") -# JSM section -if [[ ${jsm} == 1 ]]; then - JIRA_CURRENT_DIR="/opt/atlassian/jira-servicedesk/current" - JIRA_SETENV_FILE="${JIRA_CURRENT_DIR}/bin/setenv.sh" - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-servicedesk.version" - SUPPORTED_VERSIONS=("${SUPPORTED_JSM_VERSIONS[@]}") -fi - -JIRA_VERSION=$(sudo su jira -c "cat ${JIRA_VERSION_FILE}") -if [[ -z "$JIRA_VERSION" ]]; then - echo "ERROR: Failed to get Jira version. If your application type is JSM use flag '--jsm'." \ - "Otherwise check if JIRA_VERSION_FILE variable (${JIRA_VERSION_FILE})" \ - "has a valid file path of the Jira version file or set your Cluster JIRA_VERSION explicitly." - exit 1 -fi -echo "Jira Version: ${JIRA_VERSION}" - -# Datasets AWS bucket and db dump name -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/jira" -if [[ ${jsm} == 1 ]]; then - DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/jsm" -fi -DATASETS_SIZE="large" -if [[ ${jsm} == 1 && ${small} == 1 ]]; then - # Only JSM supports "small" dataset - DATASETS_SIZE="small" -fi -DB_DUMP_NAME="db.dump" -DB_DUMP_URL="${DATASETS_AWS_BUCKET}/${JIRA_VERSION}/${DATASETS_SIZE}/${DB_DUMP_NAME}" - -################### End of variables section ################### - -# Custom version check -if [[ ${custom} == 1 ]]; then - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$JIRA_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - if curl --output /dev/null --silent --head --fail "$DB_DUMP_URL"; then - echo "Custom version $JIRA_VERSION dataset URL found: ${DB_DUMP_URL}" - else - echo "Error: there is no dataset for version $JIRA_VERSION" - exit 1 - fi -# Check if Jira version is supported -elif [[ ! "${SUPPORTED_VERSIONS[*]}" =~ ${JIRA_VERSION} ]]; then - echo "Jira Version: ${JIRA_VERSION} is not officially supported by Data Center App Performance Toolkit." - echo "Supported Jira Versions: ${SUPPORTED_VERSIONS[*]}" - echo "If you want to force apply an existing datasets to your Jira, use --force flag with version of dataset you want to apply:" - echo "e.g. ./populate_db.sh --force 8.5.0" - echo "!!! Warning !!! This may break your Jira instance." - # Check if --force flag is passed into command - if [[ ${force} == 1 ]]; then - # Check if version was specified after --force flag - if [[ -z ${version} ]]; then - echo "Error: --force flag requires version after it." - echo "Specify one of these versions: ${SUPPORTED_VERSIONS[*]}" - exit 1 - fi - # Check if passed Jira version is in list of supported - if [[ " ${SUPPORTED_VERSIONS[@]} " =~ " ${version} " ]]; then - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/${version}/${DATASETS_SIZE}/${DB_DUMP_NAME}" - echo "Force mode. Dataset URL: ${DB_DUMP_URL}" - # If there is no DOWNGRADE_OPT - set it - DOWNGRADE_OPT="Djira.downgrade.allowed=true" - if sudo su jira -c "! grep -q ${DOWNGRADE_OPT} $JIRA_SETENV_FILE"; then - sudo sed -i "s/JVM_SUPPORT_RECOMMENDED_ARGS=\"/&-${DOWNGRADE_OPT} /" "${JIRA_SETENV_FILE}" - echo "Flag -${DOWNGRADE_OPT} was set in ${JIRA_SETENV_FILE}" - fi - else - LAST_DATASET_VERSION=${SUPPORTED_VERSIONS[${#SUPPORTED_VERSIONS[@]}-1]} - DB_DUMP_URL="${DATASETS_AWS_BUCKET}/$LAST_DATASET_VERSION/${DATASETS_SIZE}/${DB_DUMP_NAME}" - echo "Specific dataset version was not specified after --force flag, using the last available: ${LAST_DATASET_VERSION}" - echo "Dataset URL: ${DB_DUMP_URL}" - fi - else - # No force flag - exit 1 - fi -fi - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores Postgres DB from SQL DB dump for Jira DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "JIRA_CURRENT_DIR=${JIRA_CURRENT_DIR}" -echo "DB_CONFIG=${DB_CONFIG}" -echo "JIRA_DB_NAME=${JIRA_DB_NAME}" -echo "JIRA_DB_USER=${JIRA_DB_USER}" -echo "JIRA_DB_PASS=${JIRA_DB_PASS}" -echo "DB_DUMP_URL=${DB_DUMP_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]]; then - echo "Script was canceled." - exit 1 -fi - -echo "Step1: Check Postgres Client" -if ! [[ -x "$(command -v psql)" ]]; then - echo "Install Postgres client" - sudo su -c "${INSTALL_PSQL_CMD}" - if [[ $? -ne 0 ]]; then - echo "Postgres Client was NOT installed." - echo "Check correctness of install command or install Postgres client manually." - echo "INSTALL_PSQL_CMD=${INSTALL_PSQL_CMD}" - exit 1 - fi -else - echo "Postgres client is already installed" -fi -echo "Current PostgreSQL version is $(psql -V)" - -echo "Step2: Get DB Host, check DB connection and user permissions" -DB_HOST=$(sudo su -c "cat ${DB_CONFIG} | grep 'jdbc:postgresql' | cut -d'/' -f3 | cut -d':' -f1") -if [[ -z ${DB_HOST} ]]; then - echo "DataBase URL was not found in ${DB_CONFIG}" - exit 1 -fi -echo "DB_HOST=${DB_HOST}" - -echo "Check database connection" -PGPASSWORD=${JIRA_DB_PASS} pg_isready -U ${JIRA_DB_USER} -h ${DB_HOST} -if [[ $? -ne 0 ]]; then - echo "Connection to database failed. Please check correctness of following variables:" - echo "JIRA_DB_NAME=${JIRA_DB_NAME}" - echo "JIRA_DB_USER=${JIRA_DB_USER}" - echo "JIRA_DB_PASS=${JIRA_DB_PASS}" - echo "DB_HOST=${DB_HOST}" - exit 1 -fi - -echo "Check database permissions for user ${JIRA_DB_USER}" -PGPASSWORD=${JIRA_DB_PASS} createdb -U ${JIRA_DB_USER} -h ${DB_HOST} -T template0 -E "UNICODE" -l "C" TEST -if [[ $? -ne 0 ]]; then - echo "User ${JIRA_DB_USER} doesn't have permission to create database." - exit 1 -else - PGPASSWORD=${JIRA_DB_PASS} dropdb -U ${JIRA_DB_USER} -h ${DB_HOST} TEST -fi - -echo "Step3: Write jira.baseurl property to file" -JIRA_BASE_URL_FILE="base_url" -if [[ -s ${JIRA_BASE_URL_FILE} ]]; then - echo "File ${JIRA_BASE_URL_FILE} was found. Base url: $(cat ${JIRA_BASE_URL_FILE})." -else - PGPASSWORD=${JIRA_DB_PASS} psql -h ${DB_HOST} -d ${JIRA_DB_NAME} -U ${JIRA_DB_USER} -Atc \ - "select propertyvalue from propertyentry PE - join propertystring PS on PE.id=PS.id - where PE.property_key = 'jira.baseurl';" > ${JIRA_BASE_URL_FILE} - if [[ ! -s ${JIRA_BASE_URL_FILE} ]]; then - echo "Failed to get Base URL value from database." - exit 1 - fi - echo "$(cat ${JIRA_BASE_URL_FILE}) was written to the ${JIRA_BASE_URL_FILE} file." -fi - -echo "Step4: Write jira license to file" -JIRA_LICENSE_FILE="license" -if [[ -s ${JIRA_LICENSE_FILE} ]]; then - echo "File ${JIRA_LICENSE_FILE} was found. License: $(cat ${JIRA_LICENSE_FILE})." - else - PGPASSWORD=${JIRA_DB_PASS} psql -h ${DB_HOST} -d ${JIRA_DB_NAME} -U ${JIRA_DB_USER} -Atc \ - "select license from productlicense;" > ${JIRA_LICENSE_FILE} - if [[ ! -s ${JIRA_LICENSE_FILE} ]]; then - echo "Failed to get jira license from database. Check DB configuration variables." - exit 1 - fi - echo "$(cat ${JIRA_LICENSE_FILE}) was written to the ${JIRA_LICENSE_FILE} file." -fi - -echo "Step5: Stop Jira" -sudo systemctl stop jira -if [[ $? -ne 0 ]]; then - echo "Jira did not stop. Please try to rerun script." - exit 1 -fi - -echo "Step6: Download database dump" -rm -rf ${DB_DUMP_NAME} -ARTIFACT_SIZE_BYTES=$(curl -sI ${DB_DUMP_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(df -k --output=avail "$PWD" | tail -n1) -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi -# use computer style progress bar -time wget --progress=dot:giga "${DB_DUMP_URL}" -if [[ $? -ne 0 ]]; then - echo "Database dump download failed! Pls check available disk space." - exit 1 -fi - -echo "Step7: SQL Restore" -echo "Drop database" -PGPASSWORD=${JIRA_DB_PASS} dropdb -U ${JIRA_DB_USER} -h ${DB_HOST} ${JIRA_DB_NAME} -if [[ $? -ne 0 ]]; then - echo "Drop DB failed." - exit 1 -fi -sleep 5 -echo "Create database" -PGPASSWORD=${JIRA_DB_PASS} createdb -U ${JIRA_DB_USER} -h ${DB_HOST} -T template0 -E "UNICODE" -l "C" ${JIRA_DB_NAME} -if [[ $? -ne 0 ]]; then - echo "Create database failed." - exit 1 -fi -sleep 5 -echo "PG Restore" -time PGPASSWORD=${JIRA_DB_PASS} pg_restore --schema=public -v -U ${JIRA_DB_USER} -h ${DB_HOST} -d ${JIRA_DB_NAME} ${DB_DUMP_NAME} -if [[ $? -ne 0 ]]; then - echo "SQL Restore failed!" - exit 1 -fi - -echo "Step8: Update jira.baseurl property in database" -if [[ -s ${JIRA_BASE_URL_FILE} ]]; then - BASE_URL=$(cat $JIRA_BASE_URL_FILE) - if [[ $(PGPASSWORD=${JIRA_DB_PASS} psql -h ${DB_HOST} -d ${JIRA_DB_NAME} -U ${JIRA_DB_USER} -c \ - "update propertystring - set propertyvalue = '${BASE_URL}' - from propertyentry PE - where PE.id=propertystring.id - and PE.property_key = 'jira.baseurl';") != "UPDATE 1" ]]; then - echo "Couldn't update database jira.baseurl property. Please check your database connection." - exit 1 - else - echo "The database jira.baseurl property was updated with ${BASE_URL}" - fi -else - echo "The ${JIRA_BASE_URL_FILE} file doesn't exist or empty. Please check file existence or 'jira.baseurl' property in the database." - exit 1 -fi - -echo "Step9: Update jira license in database" -if [[ -s ${JIRA_LICENSE_FILE} ]]; then - LICENSE=$(cat ${JIRA_LICENSE_FILE}) - LICENSE_ID=$(PGPASSWORD=${JIRA_DB_PASS} psql -h ${DB_HOST} -d ${JIRA_DB_NAME} -U ${JIRA_DB_USER} -Atc \ - "select id from productlicense;") - if [[ -z "${LICENSE_ID}" ]]; then - echo "License update failed. License id value in the database is empty." - exit 1 - fi - if [[ $(PGPASSWORD=${JIRA_DB_PASS} psql -h ${DB_HOST} -d ${JIRA_DB_NAME} -U ${JIRA_DB_USER} -c \ - "update productlicense - set license = '${LICENSE}' - where id = '${LICENSE_ID}';") != "UPDATE 1" ]]; then - echo "Couldn't update database jira license. Please check your database connection." - exit 1 - else - echo "The database jira license was updated with ${LICENSE}" - fi -else - echo "The ${JIRA_LICENSE_FILE} file doesn't exist or empty. Please check file existence or jira license in the database." - exit 1 -fi - -echo "Step10: Start Jira" - sudo systemctl start jira - -rm -rf ${DB_DUMP_NAME} - -echo "Step11: Remove ${JIRA_BASE_URL_FILE} file" -sudo rm ${JIRA_BASE_URL_FILE} - -echo "Step12: Remove ${JIRA_LICENSE_FILE} file" -sudo rm ${JIRA_LICENSE_FILE} - -echo "DCAPT util script execution is finished successfully." -echo # move to a new line - -echo "Important: new admin user credentials are admin/admin" -echo "Wait a couple of minutes until Jira is started." diff --git a/app/util/jira/upload_attachments.sh b/app/util/jira/upload_attachments.sh deleted file mode 100644 index 6bf03ce87..000000000 --- a/app/util/jira/upload_attachments.sh +++ /dev/null @@ -1,165 +0,0 @@ -#!/bin/bash - -# Read command line arguments -while [[ "$#" -gt 0 ]]; do case $1 in - --jsm) jsm=1 ;; - --small) small=1 ;; - --custom) custom=1 ;; - --force) - if [ -n "$2" ] && [ "${2:0:1}" != "-" ]; then - force=1 - version=${2} - shift - else - force=1 - fi - ;; - *) echo "Unknown parameter passed: $1"; exit 1;; -esac; shift; done - -if [[ ! $(systemctl status jira) ]]; then - echo "The Jira service was not found on this host." \ - "Please make sure you are running this script on a host that is running Jira." - exit 1 -fi - -################### Variables section ################### -# Jira version variables -JIRA_VERSION_FILE="/media/atl/jira/shared/jira-software.version" - -# Jira/JSM supported versions - -SUPPORTED_JIRA_VERSIONS=(8.20.26 9.4.10) -SUPPORTED_JSM_VERSIONS=(4.20.26 5.4.10) - -SUPPORTED_VERSIONS=("${SUPPORTED_JIRA_VERSIONS[@]}") -if [[ ${jsm} == 1 ]]; then - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-servicedesk.version" - SUPPORTED_VERSIONS=("${SUPPORTED_JSM_VERSIONS[@]}") -fi -JIRA_VERSION=$(sudo su jira -c "cat ${JIRA_VERSION_FILE}") -if [[ -z "$JIRA_VERSION" ]]; then - echo "ERROR: Failed to get Jira version. If your application type is JSM use flag '--jsm'." \ - "Otherwise check if JIRA_VERSION_FILE variable (${JIRA_VERSION_FILE})" \ - "has a valid file path of the Jira version file or set your Cluster JIRA_VERSION explicitly." - exit 1 -fi -echo "Jira Version: ${JIRA_VERSION}" - -DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/jira" -if [[ ${jsm} == 1 ]]; then - DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/jsm" -fi -ATTACHMENTS_TAR="attachments.tar.gz" -ATTACHMENTS_DIR="attachments" -AVATARS_DIR="avatars" -DATASETS_SIZE="large" -if [[ ${jsm} == 1 && ${small} == 1 ]]; then - # Only JSM supports "small" dataset - DATASETS_SIZE="small" -fi -ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/${JIRA_VERSION}/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" -TMP_DIR="/tmp" -EFS_DIR="/media/atl/jira/shared/data" -################### End of variables section ################### - -# Custom version check -if [[ ${custom} == 1 ]]; then - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$JIRA_VERSION/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - if curl --output /dev/null --silent --head --fail "$ATTACHMENTS_TAR_URL"; then - echo "Custom version $JIRA_VERSION dataset URL found: ${ATTACHMENTS_TAR_URL}" - else - echo "Error: there is no dataset for version $JIRA_VERSION" - exit 1 - fi -# Check if Jira version is supported -elif [[ ! "${SUPPORTED_VERSIONS[*]}" =~ ${JIRA_VERSION} ]]; then - echo "Jira Version: ${JIRA_VERSION} is not officially supported by Data Center App Performance Toolkit." - echo "Supported Jira Versions: ${SUPPORTED_VERSIONS[*]}" - echo "If you want to force apply an existing datasets to your Jira, use --force flag with version of dataset you want to apply:" - echo "e.g. ./upload_attachments --force 8.5.0" - echo "!!! Warning !!! This may broke your Jira instance." - # Check if --force flag is passed into command - if [[ ${force} == 1 ]]; then - # Check if passed Jira version is in list of supported - if [[ "${SUPPORTED_VERSIONS[*]}" =~ ${version} ]]; then - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/${version}/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - echo "Force mode. Dataset URL: ${ATTACHMENTS_TAR_URL}" - else - LAST_DATASET_VERSION=${SUPPORTED_VERSIONS[${#SUPPORTED_VERSIONS[@]}-1]} - ATTACHMENTS_TAR_URL="${DATASETS_AWS_BUCKET}/$LAST_DATASET_VERSION/${DATASETS_SIZE}/${ATTACHMENTS_TAR}" - echo "Specific dataset version was not specified after --force flag, using the last available: ${LAST_DATASET_VERSION}" - echo "Dataset URL: ${ATTACHMENTS_TAR_URL}" - fi - else - # No force flag - exit 1 - fi -fi - -echo "!!! Warning !!!" -echo # move to a new line -echo "This script restores attachments into Jira DC created with AWS Quickstart defaults." -echo "You can review or modify default variables in 'Variables section' of this script." -echo # move to a new line -echo "Variables:" -echo "EFS_DIR=${EFS_DIR}" -echo "ATTACHMENTS_TAR_URL=${ATTACHMENTS_TAR_URL}" -echo # move to a new line -read -p "I confirm that variables are correct and want to proceed (y/n)? " -n 1 -r -echo # move to a new line -if [[ ! $REPLY =~ ^[Yy]$ ]] -then - echo "Script was canceled." - exit 1 -fi - - -echo "Step1: Download msrcync" -# https://github.com/jbd/msrsync -cd ${TMP_DIR} || exit 1 -if [[ -s msrsync ]]; then - echo "msrsync already downloaded" -else - sudo su jira -c "wget https://raw.githubusercontent.com/jbd/msrsync/master/msrsync && chmod +x msrsync" -fi - -echo "Step2: Download attachments" -sudo su -c "rm -rf ${ATTACHMENTS_TAR}" -ARTIFACT_SIZE_BYTES=$(curl -sI ${ATTACHMENTS_TAR_URL} | grep "Content-Length" | awk {'print $2'} | tr -d '[:space:]') -ARTIFACT_SIZE_GB=$((${ARTIFACT_SIZE_BYTES}/1024/1024/1024)) -FREE_SPACE_KB=$(df -k --output=avail "$PWD" | tail -n1) -FREE_SPACE_GB=$((${FREE_SPACE_KB}/1024/1024)) -REQUIRED_SPACE_GB=$((5 + ${ARTIFACT_SIZE_GB})) -if [[ ${FREE_SPACE_GB} -lt ${REQUIRED_SPACE_GB} ]]; then - echo "Not enough free space for download." - echo "Free space: ${FREE_SPACE_GB} GB" - echo "Required space: ${REQUIRED_SPACE_GB} GB" - exit 1 -fi; -sudo su jira -c "time wget --progress=dot:giga ${ATTACHMENTS_TAR_URL}" - -echo "Step3: Untar attachments to tmp folder" -sudo su -c "rm -rf ${ATTACHMENTS_DIR}" -sudo su jira -c "tar -xzf ${ATTACHMENTS_TAR} --checkpoint=.10000" -if [[ $? -ne 0 ]]; then - echo "Untar failed!" - exit 1 -fi -echo "Counting total files number:" -sudo su jira -c "find ${ATTACHMENTS_DIR} -type f -print | wc -l" -echo "Deleting ${ATTACHMENTS_TAR}" -sudo su -c "rm -rf ${ATTACHMENTS_TAR}" - -echo "Step4: Copy attachments to EFS" -sudo su jira -c "time ./msrsync -P -p 100 -f 3000 ${ATTACHMENTS_DIR} ${EFS_DIR}" -sudo su -c "rm -rf ${ATTACHMENTS_DIR}" - -if [[ ${jsm} == 1 ]]; then - echo "Step5: Copy avatars to EFS" - sudo su jira -c "time ./msrsync -P -p 100 -f 3000 ${AVATARS_DIR} ${EFS_DIR}" - sudo su -c "rm -rf ${AVATARS_DIR}" -fi - -echo "DCAPT util script execution is finished successfully." -echo # move to a new line \ No newline at end of file diff --git a/app/util/jsm/README.md b/app/util/jsm/README.md deleted file mode 100644 index f5c042d91..000000000 --- a/app/util/jsm/README.md +++ /dev/null @@ -1,32 +0,0 @@ -# JSM "large" dataset -Use following command to upload enterprise-scale "large" dataset to the Jira Service Management Data Center. -This dataset is suitable for DC apps approval process performance test results generation. - -#### JSM populate DB "large" -Populate DB from a postgres db dump: - -`./app/util/jira/populate_db.sh --jsm` - -#### JSM upload attachments "large" -Copy attachments: - -`./app/util/jira/upload_attahcments.sh --jsm` - -#### JSM index sync -To check if index successfully replicated to a new node after scaling event execute command on a new node: - -`./app/util/jira/index-sync.sh` - -# JSM "small dataset -There is also a `small` dataset available for JSM. This dataset is suitable for local -Data Center Apps Performance Toolkit setup, testing and app-specific actions development. - -#### JSM populate DB "small" -Populate DB from a postgres db dump: - -`./app/util/jira/populate_db.sh --jsm --small` - -#### JSM upload attachments "small" -Copy attachments: - -`./app/util/jira/upload_attahcments.sh --jsm --small` \ No newline at end of file From dcc524b9c5bb856d0cb1073284399015f7c6dbec Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 6 Dec 2023 06:01:02 +0100 Subject: [PATCH 040/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bamboo.yml --- app/bamboo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 3cda58273..0ea9db586 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -125,7 +125,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 0ce13f47211bee5ad51e586fbe35f63b82a3262d Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 6 Dec 2023 06:01:02 +0100 Subject: [PATCH 041/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/confluence.yml --- app/confluence.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/confluence.yml b/app/confluence.yml index fc2666a92..fbd10721e 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -118,7 +118,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 24455b7bcd93b532517d4305d17c0f79fc403ac4 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 6 Dec 2023 06:01:03 +0100 Subject: [PATCH 042/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bitbucket.yml --- app/bitbucket.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 63a4ac5a2..3636f780f 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -91,7 +91,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 83730bd60563a5d77b973e6df7bd926062b41bdf Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 6 Dec 2023 06:01:03 +0100 Subject: [PATCH 043/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jira.yml --- app/jira.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jira.yml b/app/jira.yml index 7e8c60540..3703a6e17 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -119,7 +119,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 63d8d6fbfcc4710deb9196fa4260e704cc9204a9 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 6 Dec 2023 06:01:04 +0100 Subject: [PATCH 044/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jsm.yml --- app/jsm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jsm.yml b/app/jsm.yml index a54bf0ffe..5152e5a23 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -171,7 +171,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "119.0.6045.105" # Supports Chrome version 119. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 67774089078dccb2efb2cd293689fbc3780dd85f Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 6 Dec 2023 14:08:52 +0200 Subject: [PATCH 045/152] fix delete network interface --- app/util/k8s/terminate_cluster.py | 151 +++++++++++++++++++++++++----- 1 file changed, 125 insertions(+), 26 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 6f5434965..aa8d653d0 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -109,6 +109,29 @@ def wait_for_rds_delete(rds_client, db_name): logging.error(f"RDS {db_name} was not deleted in {timeout} seconds.") +def wait_for_network_interface_to_be_detached(ec2_client, network_interface_id): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = ec2_client.describe_network_interfaces( + NetworkInterfaceIds=[network_interface_id])['NetworkInterfaces'][0]['Attachment']['Status'] + if status != 'attached': + return + except Exception as e: + logging.info(f"Unexpected error occurs during detaching the network interface {network_interface_id}.") + break + logging.info(f"Network interface {network_interface_id} is in status {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"Network interface {network_interface_id} is not detached in {timeout} seconds.") + + def delete_record_from_hosted_zone(route53_client, hosted_zone_id, record): change_batch = { 'Changes': [ @@ -175,32 +198,62 @@ def delete_cluster(aws_region, cluster_name): def delete_hosted_zone_record_if_exists(aws_region, cluster_name): environment_name = cluster_name.replace('atlas-', '').replace('-cluster', '') + eks_client = boto3.client('eks', region_name=aws_region) + elb_client = boto3.client('elb', region_name=aws_region) + acm_client = boto3.client('acm', region_name=aws_region) + domain_name = None try: - route53_client = boto3.client('route53', region_name=aws_region) - existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] - if not existed_hosted_zones: - return - for hosted_zone in existed_hosted_zones: - if environment_name in hosted_zone['Name']: - hosted_zone_to_delete = hosted_zone - records_hosted_zone_to_delete = route53_client.list_resource_record_sets( - HostedZoneId=hosted_zone['Id'])['ResourceRecordSets'] - for record in records_hosted_zone_to_delete: - if record['Type'] not in ['NS', 'SOA']: - delete_record_from_hosted_zone(route53_client, hosted_zone['Id'], record) - route53_client.delete_hosted_zone(Id=hosted_zone_to_delete['Id']) - wait_for_hosted_zone_delete(route53_client, hosted_zone['Id']) - break - - existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] - existed_hosted_zones_ids = [zone["Id"] for zone in existed_hosted_zones] - for hosted_zone_id in existed_hosted_zones_ids: - records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id)['ResourceRecordSets'] - for record in records_set: - if environment_name in record['Name']: - delete_record_from_hosted_zone(route53_client, hosted_zone_id, record) - except Exception as e: - logging.error(f"Unexpected error occurs: {e}") + cluster_info = eks_client.describe_cluster(name=cluster_name)['cluster'] + cluster_vpc_config = cluster_info['resourcesVpcConfig'] + cluster_vpc_id = cluster_vpc_config['vpcId'] + cluster_elb = [lb + for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] + if lb['VPCId'] == cluster_vpc_id] + if cluster_elb: + cluster_elb_listeners = cluster_elb[0]['ListenerDescriptions'] + for listener in cluster_elb_listeners: + if listener['Listener']['Protocol'] == 'HTTPS': + if 'SSLCertificateId' in listener['Listener']: + certificate_arn = listener['Listener']['SSLCertificateId'] + certificate_info = acm_client.describe_certificate( + CertificateArn=certificate_arn)['Certificate'] + certificate_domain_name = certificate_info['DomainName'] + domain_name = certificate_domain_name.replace('*.', '').replace(environment_name, '') + break + else: + return + + except Exception: + logging.info(f'No hosted zone found for cluster: {cluster_name}') + return + + if domain_name: + try: + route53_client = boto3.client('route53', region_name=aws_region) + existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] + if not existed_hosted_zones: + return + for hosted_zone in existed_hosted_zones: + if f'{environment_name}{domain_name}.' == hosted_zone['Name']: + hosted_zone_to_delete = hosted_zone + records_hosted_zone_to_delete = route53_client.list_resource_record_sets( + HostedZoneId=hosted_zone['Id'])['ResourceRecordSets'] + for record in records_hosted_zone_to_delete: + if record['Type'] not in ['NS', 'SOA']: + delete_record_from_hosted_zone(route53_client, hosted_zone['Id'], record) + route53_client.delete_hosted_zone(Id=hosted_zone_to_delete['Id']) + wait_for_hosted_zone_delete(route53_client, hosted_zone['Id']) + break + + existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] + existed_hosted_zones_ids = [zone["Id"] for zone in existed_hosted_zones] + for hosted_zone_id in existed_hosted_zones_ids: + records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id)['ResourceRecordSets'] + for record in records_set: + if environment_name in record['Name']: + delete_record_from_hosted_zone(route53_client, hosted_zone_id, record) + except Exception as e: + logging.error(f"Unexpected error occurs: {e}") def delete_lb(aws_region, vpc_id): @@ -291,7 +344,6 @@ def delete_subnets(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) subnets_all = vpc_resource.subnets.all() subnets = [ec2_resource.Subnet(subnet.id) for subnet in subnets_all] - if subnets: try: for sub in subnets: @@ -356,6 +408,9 @@ def get_vpc_region_by_name(vpc_name): def delete_rds(aws_region, vpc_id): rds_client = boto3.client('rds', region_name=aws_region) + ec2_client = boto3.client('ec2', region_name=aws_region) + network_interface_id = None + try: db_instances = rds_client.describe_db_instances()['DBInstances'] except exceptions.EndpointConnectionError as e: @@ -364,6 +419,31 @@ def delete_rds(aws_region, vpc_id): db_names_and_subnets = [(db_instance['DBInstanceIdentifier'], db_instance['DBSubnetGroup']['DBSubnetGroupName']) for db_instance in db_instances if vpc_id == db_instance['DBSubnetGroup']['VpcId']] + if db_names_and_subnets: + db_name = db_names_and_subnets[0][0] + try: + response = rds_client.describe_db_instances(DBInstanceIdentifier=db_name)['DBInstances'][0] + if 'VpcSecurityGroups' in response: + db_security_groups = response['VpcSecurityGroups'] + if db_security_groups: + db_security_group_id = db_security_groups[0]['VpcSecurityGroupId'] + response = ec2_client.describe_network_interfaces( + Filters=[ + { + 'Name': 'group-id', + 'Values': [db_security_group_id] + } + ] + ) + if response['NetworkInterfaces']: + network_interface_id = response['NetworkInterfaces'][0]['NetworkInterfaceId'] + else: + return + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'DBInstanceNotFound': + logging.error(f'Could not found the RDS, name: {db_name}') + return + for db_name, subnet_name in db_names_and_subnets: try: logging.info(f"Deleting RDS {db_name} for VPC id: {vpc_id}.") @@ -375,6 +455,24 @@ def delete_rds(aws_region, vpc_id): except Boto3Error as e: logging.error(f"Delete RDS {db_name} failed with error: {e}") + if network_interface_id: + try: + network_interface_info = ec2_client.describe_network_interfaces(NetworkInterfaceIds=[network_interface_id]) + if 'NetworkInterfaces' in network_interface_info: + if network_interface_info['NetworkInterfaces']: + if network_interface_info['NetworkInterfaces'][0]['Attachment']['Status'] == 'attached': + network_interface_attach_id = network_interface_info['NetworkInterfaces'][0] \ + ['Attachment']['AttachmentId'] + ec2_client.detach_network_interface( + AttachmentId=network_interface_attach_id, Force=True + ) + wait_for_network_interface_to_be_detached(ec2_client, network_interface_id) + ec2_client.delete_network_interface(NetworkInterfaceId=network_interface_id) + logging.info(f'Network interface {network_interface_id} is deleted.') + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound': + return + def terminate_vpc(vpc_name, aws_region=None): if not aws_region: @@ -800,3 +898,4 @@ def main(): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main() + From 3da46f3f03ec1a6cdb30807a0f3be3c3fc71d93a Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 6 Dec 2023 14:10:31 +0200 Subject: [PATCH 046/152] fix delete network interface --- app/util/k8s/terminate_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index aa8d653d0..d398e018c 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -60,7 +60,7 @@ def wait_for_hosted_zone_delete(route53_client, hosted_zone_id): except route53_client.exceptions.NoSuchHostedZone: logging.info(f"Hosted zone {hosted_zone_id} was successfully deleted.") break - logging.info(f"Hosted zone {hosted_zone_id} is still exists. " + logging.info(f"Hosted zone {hosted_zone_id} still exists. " f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") sleep(sleep_time) attempt += 1 From 21290ea5d81e1c8f3d4df23fce0b61a6d9bb367f Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 6 Dec 2023 22:01:10 +0200 Subject: [PATCH 047/152] fix delete network interface --- app/util/k8s/terminate_cluster.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index d398e018c..b42fcecc2 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -122,7 +122,7 @@ def wait_for_network_interface_to_be_detached(ec2_client, network_interface_id): if status != 'attached': return except Exception as e: - logging.info(f"Unexpected error occurs during detaching the network interface {network_interface_id}.") + logging.info(f"Unexpected error occurs during detaching the network interface {network_interface_id}, {e}") break logging.info(f"Network interface {network_interface_id} is in status {status}. " f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") @@ -151,7 +151,6 @@ def delete_record_from_hosted_zone(route53_client, hosted_zone_id, record): logging.error(f'Unexpected error occurs, could not delete record from hosted zone {hosted_zone_id}: {e}') - def delete_nodegroup(aws_region, cluster_name): try: eks_client = boto3.client('eks', region_name=aws_region) @@ -206,9 +205,8 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): cluster_info = eks_client.describe_cluster(name=cluster_name)['cluster'] cluster_vpc_config = cluster_info['resourcesVpcConfig'] cluster_vpc_id = cluster_vpc_config['vpcId'] - cluster_elb = [lb - for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] - if lb['VPCId'] == cluster_vpc_id] + cluster_elb = [lb for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] \ + if lb['VPCId'] == cluster_vpc_id] if cluster_elb: cluster_elb_listeners = cluster_elb[0]['ListenerDescriptions'] for listener in cluster_elb_listeners: @@ -248,7 +246,8 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] existed_hosted_zones_ids = [zone["Id"] for zone in existed_hosted_zones] for hosted_zone_id in existed_hosted_zones_ids: - records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id)['ResourceRecordSets'] + records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id) \ + ['ResourceRecordSets'] for record in records_set: if environment_name in record['Name']: delete_record_from_hosted_zone(route53_client, hosted_zone_id, record) @@ -461,8 +460,8 @@ def delete_rds(aws_region, vpc_id): if 'NetworkInterfaces' in network_interface_info: if network_interface_info['NetworkInterfaces']: if network_interface_info['NetworkInterfaces'][0]['Attachment']['Status'] == 'attached': - network_interface_attach_id = network_interface_info['NetworkInterfaces'][0] \ - ['Attachment']['AttachmentId'] + network_interface_attach_id = \ + network_interface_info['NetworkInterfaces'][0]['Attachment']['AttachmentId'] ec2_client.detach_network_interface( AttachmentId=network_interface_attach_id, Force=True ) @@ -471,7 +470,7 @@ def delete_rds(aws_region, vpc_id): logging.info(f'Network interface {network_interface_id} is deleted.') except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidNetworkInterfaceID.NotFound': - return + return def terminate_vpc(vpc_name, aws_region=None): @@ -898,4 +897,3 @@ def main(): if __name__ == '__main__': logging.basicConfig(level=logging.INFO) main() - From af385f4df732d3a00c5eaffbf56325a344ac5a3e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Wed, 6 Dec 2023 22:07:04 +0200 Subject: [PATCH 048/152] fix delete network interface --- app/util/k8s/terminate_cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index b42fcecc2..0ba61191b 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -205,7 +205,7 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): cluster_info = eks_client.describe_cluster(name=cluster_name)['cluster'] cluster_vpc_config = cluster_info['resourcesVpcConfig'] cluster_vpc_id = cluster_vpc_config['vpcId'] - cluster_elb = [lb for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] \ + cluster_elb = [lb for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] if lb['VPCId'] == cluster_vpc_id] if cluster_elb: cluster_elb_listeners = cluster_elb[0]['ListenerDescriptions'] @@ -247,7 +247,7 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): existed_hosted_zones_ids = [zone["Id"] for zone in existed_hosted_zones] for hosted_zone_id in existed_hosted_zones_ids: records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id) \ - ['ResourceRecordSets'] + ['ResourceRecordSets'] for record in records_set: if environment_name in record['Name']: delete_record_from_hosted_zone(route53_client, hosted_zone_id, record) From fee9520df80a1440c503a0e6ff6af1bd9e4ea599 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Thu, 7 Dec 2023 06:02:13 +0100 Subject: [PATCH 049/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bamboo.yml --- app/bamboo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 0ea9db586..3d9aded22 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -125,7 +125,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From e4bb8cc4ad451389b6a5556756f5e03ffb3b6544 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Thu, 7 Dec 2023 06:02:14 +0100 Subject: [PATCH 050/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/confluence.yml --- app/confluence.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/confluence.yml b/app/confluence.yml index fbd10721e..54407a1cd 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -118,7 +118,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 1e80bcb94fe98172ca64f2acbe3cb886ae177579 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Thu, 7 Dec 2023 06:02:14 +0100 Subject: [PATCH 051/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bitbucket.yml --- app/bitbucket.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 3636f780f..d8d11b14d 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -91,7 +91,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 098edbbe4988e6a7813aa957acc3f20b69a22901 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Thu, 7 Dec 2023 06:02:15 +0100 Subject: [PATCH 052/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jira.yml --- app/jira.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jira.yml b/app/jira.yml index 3703a6e17..333a31455 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -119,7 +119,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 497297d3751dfd6e8a18b32737167f252f51bc51 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Thu, 7 Dec 2023 06:02:15 +0100 Subject: [PATCH 053/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jsm.yml --- app/jsm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jsm.yml b/app/jsm.yml index 5152e5a23..f1c9fe1e3 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -171,7 +171,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.62" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 1abafc857b4d028f3fbab3250b18d5f4cfc5d78f Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 7 Dec 2023 10:51:24 +0200 Subject: [PATCH 054/152] fix delete network interface --- app/util/k8s/terminate_cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 0ba61191b..1ea86da8d 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -246,8 +246,8 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): existed_hosted_zones = route53_client.list_hosted_zones()["HostedZones"] existed_hosted_zones_ids = [zone["Id"] for zone in existed_hosted_zones] for hosted_zone_id in existed_hosted_zones_ids: - records_set = route53_client.list_resource_record_sets(HostedZoneId=hosted_zone_id) \ - ['ResourceRecordSets'] + records_set = route53_client.list_resource_record_sets( + HostedZoneId=hosted_zone_id)['ResourceRecordSets'] for record in records_set: if environment_name in record['Name']: delete_record_from_hosted_zone(route53_client, hosted_zone_id, record) From aeb45bf7ff3159bba1cd6a2d38ccad86877bb23a Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Thu, 7 Dec 2023 14:10:26 +0100 Subject: [PATCH 055/152] DCA-2150 update windows docs --- app/util/k8s/README.MD | 38 +- ...s-performance-toolkit-user-guide-bamboo.md | 10 +- ...erformance-toolkit-user-guide-bitbucket.md | 24 +- ...rformance-toolkit-user-guide-confluence.md | 24 +- ...ps-performance-toolkit-user-guide-crowd.md | 16 +- ...-performance-toolkit-user-guide-jira-cf.md | 1010 -------------- ...pps-performance-toolkit-user-guide-jira.md | 26 +- ...s-performance-toolkit-user-guide-jsm-cf.md | 1167 ----------------- ...apps-performance-toolkit-user-guide-jsm.md | 26 +- 9 files changed, 76 insertions(+), 2265 deletions(-) delete mode 100644 docs/dc-apps-performance-toolkit-user-guide-jira-cf.md delete mode 100644 docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index d484fe1a4..03141257d 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -1,17 +1,5 @@ # Development environment -## Note for Windows users -For some Windows setups, Git Bash `$PWD` command does not return correct full path. -In this case $PWD needs to be changed to absolute path directory in Windows format. -E.g. -```bash -docker run --pull=always --env-file aws_envs \ --v "//c//Users//user//dc-app-performance-toolkit//app//util//k8s//dcapt.tfvars:/data-center-terraform/conf.tfvars" \ --v "//c//Users//user//dc-app-performance-toolkit//app//util//k8s//dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ --v "//c//Users//user//dc-app-performance-toolkit//app//util//k8s///logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./install.sh -c conf.tfvars -``` - ## Create development environment * set AWS credential in [aws_envs](./aws_envs) file * set correct values in [dcapt-small.tfvars](./dcapt-small.tfvars) file: @@ -21,16 +9,16 @@ docker run --pull=always --env-file aws_envs \ * run install development environment command: ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ --v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ --v "$PWD/logs:/data-center-terraform/logs" \ +-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ +-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ +-v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` ## Terminate development environment ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ --v "$PWD/logs:/data-center-terraform/logs" \ +-v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ +-v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -c conf.tfvars ``` @@ -44,9 +32,9 @@ docker run --pull=always --env-file aws_envs \ * run install enterprise-scale environment command: ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ --v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ --v "$PWD/logs:/data-center-terraform/logs" \ +-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ +-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ +-v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` ## Terminate enterprise-scale environment @@ -55,8 +43,8 @@ Option `-t` deletes Terraform state files for all installed environment in the s If state files are needed, e.g. there are other running clusters for other product, do not use `-t` flag in below command. ``` bash docker run --pull=always --env-file aws_envs \ --v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ --v "$PWD/logs:/data-center-terraform/logs" \ +-v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ +-v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -t -c conf.tfvars ``` @@ -67,8 +55,8 @@ export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 docker run --pull=always --env-file aws_envs \ --v "$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ --v "$PWD/logs:/data-center-terraform/logs" \ +-v "/$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ +-v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs ``` @@ -81,6 +69,6 @@ export REGION=us-east-2 docker run --pull=always --env-file aws_envs \ --workdir="/data-center-terraform" \ --entrypoint="python" \ --v "$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ +-v "/$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION ``` \ No newline at end of file diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index f4e2410e7..5e1193d68 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -56,13 +56,13 @@ specifically for performance testing during the DC app review process. Use `BX02-9YO1-IN86-LO5G` Server ID for generation. {{% /note %}} -6. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash user) -7. +6. From local terminal (Git Bash for Windows users) start the installation: + ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 75c07187f..194c783fb 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -77,12 +77,12 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~20 min): ``` bash docker run --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -227,12 +227,12 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~40min): ``` bash docker run --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -424,12 +424,12 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. -3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +3. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 36ad0572a..97d0c2ba2 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -77,12 +77,12 @@ Below process describes how to install low-tier Confluence DC with "small" datas 6. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -313,12 +313,12 @@ Below process describes how to install enterprise-scale Confluence DC with "larg 6. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~40min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -501,12 +501,12 @@ To receive scalability benchmark results for two-node Confluence DC **with** app 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. -3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +3. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 28b5c6da5..63efe8aa0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -54,12 +54,12 @@ specifically for performance testing during the DC app review process. Use `BX02-9YO1-IN86-LO5G` Server ID for generation. {{% /note %}} -6. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +6. From local terminal (Git Bash for Windows users) start the installation (~40min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. @@ -281,12 +281,12 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. -3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +3. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md deleted file mode 100644 index 698c5750f..000000000 --- a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md +++ /dev/null @@ -1,1010 +0,0 @@ ---- -title: "Data Center App Performance Toolkit User Guide For Jira (CloudFormation deployment)" -platform: platform -product: marketplace -category: devguide -subcategory: build -date: "2023-09-20" ---- -# Data Center App Performance Toolkit User Guide For Jira (CloudFormation deployment) - -{{% warning %}} -CloudFormation deployment option will be no longer supported starting from January 2024. -It is recommended to use TerraForm deployment. More details could be found in [User Guide](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/). -{{% /warning %}} - -This document walks you through the process of testing your app on Jira using the Data Center App Performance Toolkit. These instructions focus on producing the required [performance and scale benchmarks for your Data Center app](/platform/marketplace/dc-apps-performance-and-scale-testing/). - -{{% note %}} -Data Center App Performance Toolkit is focused on applications performance testing for Marketplace approval process. -For Jira DataCenter functional testing consider [JPT](http://go.atlassian.com/jpt). -{{% /note %}} - -In this document, we cover the use of the Data Center App Performance Toolkit on two types of environments: - -**[Development environment](#mainenvironmentdev)**: Jira Data Center environment for a test run of Data Center App Performance Toolkit and development of [app-specific actions](#appspecificactions). We recommend you use the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) with the parameters prescribed here. - -1. [Set up a development environment Jira Data Center on AWS](#devinstancesetup). -2. [Create a dataset for the development environment](#devdataset). -3. [Run toolkit on the development environment locally](#devtestscenario). -4. [Develop and test app-specific actions locally](#devappaction). - -**[Enterprise-scale environment](#mainenvironmententerprise)**: Jira Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. Preferably, use the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) with the parameters prescribed below. These parameters provision larger, more powerful infrastructure for your Jira Data Center. - -5. [Set up an enterprise-scale environment Jira Data Center on AWS](#instancesetup). -6. [Load an enterprise-scale dataset on your Jira Data Center deployment](#preloading). -7. [Set up an execution environment for the toolkit](#executionhost). -8. [Running the test scenarios from execution environment against enterprise-scale Jira Data Center](#testscenario). - ---- - -## Development environment - -Running the tests in a development environment helps familiarize you with the toolkit. It'll also provide you with a lightweight and less expensive environment for developing. Once you're ready to generate test results for the Marketplace Data Center Apps Approval process, run the toolkit in an **enterprise-scale environment**. - -### 1. Setting up Jira Data Center development environment - -We recommend that you set up development environment using the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) (**How to deploy** tab). All the instructions on this page are optimized for AWS. If you already have an existing Jira Data Center environment, you can also use that too (if so, skip to [Create a dataset for the development environment](#devdataset)). - - -#### Using the AWS Quick Start for Jira - -If you are a new user, perform an end-to-end deployment. This involves deploying Jira into a _new_ ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into a new ASI** link. - -If you have already deployed the ASI separately by using the [ASI Quick Start](https://aws.amazon.com/quickstart/architecture/atlassian-standard-infrastructure/)ASI Quick Start or by deploying another Atlassian product (Jira, Bitbucket, or Confluence Data Center development environment) with ASI, deploy Jira into your existing ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into your existing ASI** link. - -{{% note %}} -You are responsible for the cost of AWS services used while running this Quick Start reference deployment. This Quick Start doesn't have any additional prices. See [Amazon EC2 pricing](https://aws.amazon.com/ec2/pricing/) for more detail. -{{% /note %}} - -To reduce costs, we recommend you to keep your deployment up and running only during the performance runs. - -#### AWS cost estimation for the development environment - -AWS Jira Data Center development environment infrastructure costs about 20 - 40$ per working week depending on such factors like region, instance type, deployment type of DB, and other. - -#### Quick Start parameters for development environment - -All important parameters are listed and described in this section. For all other remaining parameters, we recommend using the Quick Start defaults. - -**Jira setup** - -| Parameter | Recommended value | -| --------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Jira Product | Software | -| Version | The Data Center App Performance Toolkit officially supports `8.20.26`, `9.4.10` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | - -**Cluster nodes** - -| Parameter | Recommended value | -| --------- | ----------------- | -| Cluster node instance type | [t3.medium](https://aws.amazon.com/ec2/instance-types/t3/) (we recommend this instance type for its good balance between price and performance in testing environments) | -| Maximum number of cluster nodes | 1 | -| Minimum number of cluster nodes | 1 | -| Cluster node instance volume size | 50 | - - -**Database** - -| Parameter | Recommended value | -| --------- | ----------------- | -| The database engine to deploy with | PostgresSQL | -| The database engine version to use | 11 | -| Database instance class | [db.t3.medium](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Summary) | -| RDS Provisioned IOPS | 1000 | -| Master (admin) password | Password1! | -| Enable RDS Multi-AZ deployment | false | -| Application user database password | Password1! | -| Database storage | 200 | - - -**Networking (for new ASI)** - -| Parameter | Recommended value | -| --------- | ----------------- | -| Trusted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Availability Zones | _Select two availability zones in your region_ | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Make instance internet facing | True | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -**Networking (for existing ASI)** - -| Parameter | Recommended value | -| --------- | ----------------- | -| Make instance internet facing | True | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -#### Running the setup wizard - -After successfully deploying the Jira Data Center on AWS, configure it as follows: - -1. In the AWS console, go to **Services** > **CloudFormation** > **Stack** > **Stack details** > **Select your stack**. -1. On the **Outputs** tab, copy the value of the **LoadBalancerURL** key. -1. Open **LoadBalancerURL** in your browser. This will take you to the Jira setup wizard. -1. On the **Set up application properties** page, fill in the following fields: - - **Application Title**: any name for your Jira Data Center deployment - - **Mode**: private - - **Base URL**: your stack's Elastic LoadBalancer URL - - Then select **Next**. -1. On the next page, fill in the **Your License Key** field in one of the following ways: - - Using your existing license - - Generating a Jira trial license - - Contacting Atlassian to be provided two time-bomb licenses for testing. Ask for the licenses in your ECOHELP ticket. - - Then select **Next**. -1. On the **Set up administrator account** page, fill in the following fields: - - **Full name**: a full name of the admin user - - **Email Address**: email address of the admin user - - **Username**: admin _(recommended)_ - - **Password**: admin _(recommended)_ - - **Confirm Password**: admin _(recommended)_ - - Then select **Next**. - -1. On the **Set up email notifications** page, configure your email notifications, and then select **Finish**. -1. On the first page of the welcome setup select **English (United States)** language. Other languages are not supported by the toolkit. -1. After going through the welcome setup, select **Create new project** to create a new project. - ---- - -### 2. Generate dataset for development environment - -After creating the development environment Jira Data Center, generate test dataset to run Data Center App Performance Toolkit: -- 1 Scrum software development project with 1-5 issues -- 1 Kanban software development project with 1-5 issues - ---- - -### 3. Run toolkit on the development environment locally - -{{% warning %}} -Make sure **English (United States)** language is selected as a default language on the **![cog icon](/platform/marketplace/images/cog.png) > System > General configuration** page. Other languages are **not supported** by the toolkit. -{{% /warning %}} - -1. Clone [Data Center App Performance Toolkit](https://github.com/atlassian/dc-app-performance-toolkit) locally. -1. Follow the [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md) instructions to set up toolkit locally. -1. Navigate to `dc-app-performance-toolkit/app` folder. -1. Open the `jira.yml` file and fill in the following variables: - - `application_hostname`: your_dc_jira_instance_hostname without protocol. - - `application_protocol`: http or https. - - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: set to empty for CloudFormation deployment; e.g., /jira for url like this http://localhost:2990/jira. - - `admin_login`: admin user username. - - `admin_password`: admin user password. - - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). - - `concurrency`: `2` - number of concurrent JMeter/Locust users. - - `test_duration`: `5m` - duration of the performance run. - - `ramp-up`: `3s` - amount of time it will take JMeter or Locust to add all test users to test execution. - - `total_actions_per_hour`: `5450` - number of total JMeter/Locust actions per hour. - - `WEBDRIVER_VISIBLE`: visibility of Chrome browser during selenium execution (False is by default). - -1. Run bzt. - - ``` bash - bzt jira.yml - ``` - -1. Review the resulting table in the console log. All JMeter/Locust and Selenium actions should have 95+% success rate. -In case some actions does not have 95+% success rate refer to the following logs in `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: - - - `results_summary.log`: detailed run summary - - `results.csv`: aggregated .csv file with all actions and timings - - `bzt.log`: logs of the Taurus tool execution - - `jmeter.*`: logs of the JMeter tool execution - - `locust.*`: logs of the Locust tool execution (in case you use Locust as load_executor in jira.yml) - - `pytest.*`: logs of Pytest-Selenium execution - -{{% warning %}} -Do not proceed with the next step until you have all actions 95+% success rate. Ask [support](#support) if above logs analysis did not help. -{{% /warning %}} - ---- - -### 4. Develop and test app-specific action locally -Data Center App Performance Toolkit has its own set of default test actions for Jira Data Center: JMeter/Locust and Selenium for load and UI tests respectively. - -**App-specific action** - action (performance test) you have to develop to cover main use cases of your application. Performance test should focus on the common usage of your application and not to cover all possible functionality of your app. For example, application setup screen or other one-time use cases are out of scope of performance testing. - -1. Define main use case of your app. Usually it is one or two main app use cases. -1. Your app adds new UI elements in Jira Data Center - Selenium app-specific action has to be developed. -1. Your app introduces new endpoint or extensively calls existing Jira Data Center API - JMeter/Locust app-specific actions has to be developed. -JMeter and Locust actions are interchangeable, so you could select the tool you prefer: - -- JMeter - UI-based [performance tool](https://jmeter.apache.org/). -- Locust - code-based (Python requests library) [performance tool](https://locust.io/). - - -{{% note %}} -We strongly recommend developing your app-specific actions on the development environment to reduce AWS infrastructure costs. -{{% /note %}} - - -#### Custom dataset -You can filter your own app-specific issues for your app-specific actions. - -1. Create app-specific issues that have specific anchor in summary, e.g. *AppIssue* anchor and issues summaries like *AppIssue1*, *AppIssue2*, *AppIssue3*. -1. Go to the search page of your Jira Data Center - `JIRA_URL/issues/?jql=` and select `Advanced`. -1. Write [JQL](https://www.atlassian.com/blog/jira-software/jql-the-most-flexible-way-to-search-jira-14) that filter just your issues from step 1, e.g. `summary ~ 'AppIssue*'`. -1. Edit Jira configuration file `dc-app-performance-toolkit/app/jira.yml`: - - `custom_dataset_query:` JQL from step 3. - -Next time when you run toolkit, custom dataset issues will be stored to the `dc-app-performance-toolkit/app/datasets/jira/custom-issues.csv` with columns: `issue_key`, `issue_id`, `project_key`. - -#### Example of app-specific Selenium action development with custom dataset -You develop an app that adds some additional fields to specific types of Jira issues. In this case, you should develop Selenium app-specific action: - -1. Create app-specific Jira issues with *AppIssue* anchor in summary: *AppIssue1*, *AppIssue2*, etc. -2. Go to the search page of your Jira Data Center - `JIRA_URL/issues/?jql=` and check if JQL is correct: `summary ~ 'AppIssue*'`. -3. Edit `dc-app-performance-toolkit/app/jira.yml` configuration file and set `custom_dataset_query: summary ~ 'AppIssue*'`. -4. Extend example of app-specific action in `dc-app-performance-toolkit/app/extension/jira/extension_ui.py`. -[Code example.](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jira/extension_ui.py) -So, our test has to open app-specific issues and measure time to load of this app-specific issues. -5. If you need to run `app_speicifc_action` as specific user uncomment `app_specific_user_login` function in [code example](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jira/extension_ui.py). Note, that in this case `test_1_selenium_custom_action` should follow just before `test_2_selenium_z_log_out` action. -6. In `dc-app-performance-toolkit/app/selenium_ui/jira_ui.py`, review and uncomment the following block of code to make newly created app-specific actions executed: -``` python -# def test_1_selenium_custom_action(webdriver, datasets, screen_shots): -# app_specific_action(webdriver, datasets) -``` - -7. Run toolkit with `bzt jira.yml` command to ensure that all Selenium actions including `app_specific_action` are successful. - -#### Example of app-specific Locust/JMeter action development - -You develop an app that introduces new GET and POST endpoints in Jira Data Center. In this case, you should develop Locust or JMeter app-specific action. - -**Locust app-specific action development example** - -1. Extend example of app-specific action in `dc-app-performance-toolkit/app/extension/jira/extension_locust.py`, so that test will call the endpoint with GET request, parse response use these data to call another endpoint with POST request and measure response time. -[Code example.](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jira/extension_locust.py) -1. In `dc-app-performance-toolkit/app/jira.yml` set `load_executor: locust` to make `locust` as load executor. -1. Set desired execution percentage for `standalone_extension`. Default value is `0`, which means that `standalone_extension` action will not be executed. Locust uses actions percentage as relative [weights](https://docs.locust.io/en/stable/writing-a-locustfile.html#weight-attribute), so if `some_action: 10` and `standalone_extension: 20` that means that `standalone_extension` will be called twice more. -Set `standalone_extension` weight in accordance with the expected frequency of your app use case compared with other base actions. -1. App-specific tests could be run (if needed) as a specific user. Use `@run_as_specific_user(username='specific_user_username', password='specific_user_password')` decorator for that. -1. Run toolkit with `bzt jira.yml` command to ensure that all Locust actions including `app_specific_action` are successful. - -**JMeter app-specific action development example** - -1. Check that `jira.yml` file has correct settings of `application_hostname`, `application_protocol`, `application_port`, `application_postfix`, etc. -1. Set desired execution percentage for `standalone_extension`. Default value is `0`, which means that `standalone_extension` action will not be executed. -For example, for app-specific action development you could set percentage of `standalone_extension` to 100 and for all other actions to 0 - this way only `login_and_view_dashboard` and `standalone_extension` actions would be executed. -1. Navigate to `dc-app-performance-toolkit/app` folder and run from virtualenv(as described in `dc-app-performance-toolkit/README.md`): - - ```python util/jmeter/start_jmeter_ui.py --app jira``` - -1. Open `Jira` thread group > `actions per login` and navigate to `standalone_extension` -![Jira JMeter standalone extension](/platform/marketplace/images/jira-standalone-extension.png) -1. Add GET `HTTP Request`: right-click to `standalone_extension` > `Add` > `Sampler` `HTTP Request`, chose method GET and set endpoint in Path. -![Jira JMeter standalone GET](/platform/marketplace/images/jira-standalone-get-request.png) -1. Add `Regular Expression Extractor`: right-click to to newly created `HTTP Request` > `Add` > `Post processor` > `Regular Expression Extractor` -![Jira JMeter standalone regexp](/platform/marketplace/images/jira-standalone-regexp.png) -1. Add `Response Assertion`: right-click to newly created `HTTP Request` > `Add` > `Assertions` > `Response Assertion` and add assertion with `Contains`, `Matches`, `Equals`, etc types. -![Jira JMeter standalone assertions](/platform/marketplace/images/jira-standalone-assertions.png) -1. Add POST `HTTP Request`: right-click to `standalone_extension` > `Add` > `Sampler` `HTTP Request`, chose method POST, set endpoint in Path and add Parameters or Body Data if needed. -1. Right-click on `View Results Tree` and enable this controller. -1. Click **Start** button and make sure that `login_and_view_dashboard` and `standalone_extension` are successful. -1. Right-click on `View Results Tree` and disable this controller. It is important to disable `View Results Tree` controller before full-scale results generation. -1. Click **Save** button. -1. To make `standalone_extension` executable during toolkit run edit `dc-app-performance-toolkit/app/jira.yml` and set execution percentage of `standalone_extension` accordingly to your use case frequency. -1. App-specific tests could be run (if needed) as a specific user. In the `standalone_extension` uncomment `login_as_specific_user` controller. Navigate to the `username:password` config element and update values for `app_specific_username` and `app_specific_password` names with your specific user credentials. Also make sure that you located your app-specific tests between `login_as_specific_user` and `login_as_default_user_if_specific_user_was_loggedin` controllers. -1. Run toolkit to ensure that all JMeter actions including `standalone_extension` are successful. - - -##### Using JMeter variables from the base script - -Use or access the following variables in your `standalone_extension` script if needed. - -- `${issue_key}` - issue key being viewed or modified (e.g. ABC-123) -- `${issue_id}` - issue id being viewed or modified (e.g. 693484) -- `${project_key}` - project key being viewed or modified (e.g. ABC) -- `${project_id}` - project id being viewed or modified (e.g. 3423) -- `${scrum_board_id}` - scrum board id being viewed (e.g. 328) -- `${kanban_board_id}` - kanban board id being viewed (e.g. 100) -- `${jql}` - jql query being used (e.g. text ~ "qrk*" order by key) -- `${username}` - the logged in username (e.g. admin) - -{{% warning %}} -App-specific actions are required. Do not proceed with the next step until you have completed app-specific actions development and got successful results from toolkit run. -{{% /warning %}} - ---- -## Enterprise-scale environment - -After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. - -### 5. Set up an enterprise-scale environment Jira Data Center on AWS - -We recommend that you use the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) (**How to deploy** tab) to deploy a Jira Data Center enterprise-scale environment. This Quick Start will allow you to deploy Jira Data Center with a new [Atlassian Standard Infrastructure](https://aws.amazon.com/quickstart/architecture/atlassian-standard-infrastructure/) (ASI) or into an existing one. - -The ASI is a Virtual Private Cloud (VPC) consisting of subnets, NAT gateways, security groups, bastion hosts, and other infrastructure components required by all Atlassian applications, and then deploys Jira into this new VPC. Deploying Jira with a new ASI takes around 50 minutes. With an existing one, it'll take around 30 minutes. - -#### Using the AWS Quick Start for Jira - -If you are a new user, perform an end-to-end deployment. This involves deploying Jira into a _new_ ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into a new ASI** link. - -If you have already deployed the ASI separately by using the [ASI Quick Start](https://aws.amazon.com/quickstart/architecture/atlassian-standard-infrastructure/)ASI Quick Start or by deploying another Atlassian product (Jira, Bitbucket, or Confluence Data Center development environment) with ASI, deploy Jira into your existing ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into your existing ASI** link. - -{{% note %}} -You are responsible for the cost of the AWS services used while running this Quick Start reference deployment. There is no additional price for using this Quick Start. For more information, go to [aws.amazon.com/pricing](https://aws.amazon.com/ec2/pricing/). -{{% /note %}} - -To reduce costs, we recommend you to keep your deployment up and running only during the performance runs. - -#### AWS cost estimation -[AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. -Monthly charges will be based on your actual usage of AWS services and may vary from the estimates the Calculator has provided. - -*The prices below are approximate and may vary depending on such factors like region, instance type, deployment type of DB, and other. - - -| Stack | Estimated hourly cost ($) | -| ----- | ------------------------- | -| One Node Jira DC | 0.8 - 1.1 -| Two Nodes Jira DC | 1.2 - 1.7 -| Four Nodes Jira DC | 2.0 - 3.0 - -#### Stop cluster nodes - -To reduce AWS infrastructure costs you could stop cluster nodes when the cluster is standing idle. -Cluster node might be stopped by using [Suspending and Resuming Scaling Processes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html). - -To stop one node within the cluster, follow the instructions below: - -1. In the AWS console, go to **Services** > **EC2** > **Auto Scaling Groups** and open the necessary group to which belongs the node you want to stop. -1. Click **Edit** (in case you have New EC2 experience UI mode enabled, press `Edit` on `Advanced configuration`) and add `HealthCheck` to the `Suspended Processes`. Amazon EC2 Auto Scaling stops marking instances unhealthy as a result of EC2 and Elastic Load Balancing health checks. -1. Go to EC2 **Instances**, select instance, click **Instance state** > **Stop instance**. - -To return node into a working state follow the instructions: - -1. Go to EC2 **Instances**, select instance, click **Instance state** > **Start instance**, wait a few minutes for node to become available. -1. Go to EC2 **Auto Scaling Groups** and open the necessary group to which belongs the node you want to start. -1. Press **Edit** (in case you have New EC2 experience UI mode enabled, press `Edit` on `Advanced configuration`) and remove `HealthCheck` from `Suspended Processes` of Auto Scaling Group. - -#### Stop database - -To reduce AWS infrastructure costs database could be stopped when the cluster is standing idle. -Keep in mind that database would be **automatically started** in **7** days. - -To stop database: - -1. In the AWS console, go to **Services** > **RDS** > **Databases**. -1. Select cluster database. -1. Click on **Actions** > **Stop**. - -To start database: - -1. In the AWS console, go to **Services** > **RDS** > **Databases**. -1. Select cluster database. -1. Click on **Actions** > **Start**. - -#### Quick Start parameters - -All important parameters are listed and described in this section. For all other remaining parameters, we recommend using the Quick Start defaults. - -**Jira setup** - -| Parameter | Recommended Value | -| --------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Jira Product | Software | -| Version | The Data Center App Performance Toolkit officially supports `8.20.26`, `9.4.10` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | - -**Cluster nodes** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| Cluster node instance type | [m5.2xlarge](https://aws.amazon.com/ec2/instance-types/m5/) (This differs from our [public recommendation on c4.8xlarge](https://confluence.atlassian.com/enterprise/infrastructure-recommendations-for-enterprise-jira-instances-on-aws-969532459.html) for production instances but is representative for a lot of our Jira Data Center customers. The Data Center App Performance Toolkit framework is set up for concurrency we expect on this instance size. As such, underprovisioning will likely show a larger performance impact than expected.)| -| Maximum number of cluster nodes | 1 | -| Minimum number of cluster nodes | 1 | -| Cluster node instance volume size | 100 | - -**Database** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| The database engine to deploy with | PostgresSQL | -| The database engine version to use | 11 | -| Database instance class | [db.m5.xlarge](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Summary) | -| RDS Provisioned IOPS | 1000 | -| Master (admin) password | Password1! | -| Enable RDS Multi-AZ deployment | false | -| Application user database password | Password1! | -| Database storage | 200 | - -{{% note %}} -The **Master (admin) password** will be used later when restoring the SQL database dataset. If password value is not set to default, you'll need to change `DB_PASS` value manually in the restore database dump script (later in [Preloading your Jira deployment with an enterprise-scale dataset](#preloading)). -{{% /note %}} - -**Networking (for new ASI)** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| Trusted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Availability Zones | _Select two availability zones in your region_ | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Make instance internet facing | true | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -**Networking (for existing ASI)** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| Make instance internet facing | true | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -#### Running the setup wizard - -After successfully deploying Jira Data Center in AWS, you'll need to configure it: - -1. In the AWS console, go to **Services** > **CloudFormation** > **Stack** > **Stack details** > **Select your stack**. -1. On the **Outputs** tab, copy the value of the **LoadBalancerURL** key. -1. Open **LoadBalancerURL** in your browser. This will take you to the Jira setup wizard. -1. On the **Set up application properties** page, populate the following fields: - - **Application Title**: any name for your Jira Data Center deployment - - **Mode**: Private - - **Base URL**: your stack's Elastic LoadBalancer URL - Click **Next**. -1. On the next page, populate the **Your License Key** field by either: - - Using your existing license, or - - Generating a Jira trial license, or - - Contacting Atlassian to be provided two time-bomb licenses for testing. Ask for it in your ECOHELP ticket. - Click **Next**. -1. On the **Set up administrator account** page, populate the following fields: - - **Full name**: any full name of the admin user - - **Email Address**: email address of the admin user - - **Username**: admin _(recommended)_ - - **Password**: admin _(recommended)_ - - **Confirm Password**: admin _(recommended)_ - Click **Next**. -1. On the **Set up email notifications** page, configure your email notifications, and then click **Finish**. -1. On the first page of the welcome setup select **English (United States)** language. Other languages are not supported by the toolkit. -1. After going through the welcome setup, click **Create new project** to create a new project. - ---- - -### 6. Preloading your Jira deployment with an enterprise-scale dataset - -Data dimensions and values for an enterprise-scale dataset are listed and described in the following table. - -| Data dimensions | Value for an enterprise-scale dataset | -| --------------- | ------------------------------------- | -| Attachments | ~2 000 000 | -| Comments | ~6 000 000 | -| Components | ~2 500 | -| Custom fields | ~800 | -| Groups | ~1 000 | -| Issue security levels | 10 | -| Issue types | ~300 | -| Issues | ~1 000 000 | -| Priorities | 5 | -| Projects | 500 | -| Resolutions | 34 | -| Screen schemes | ~200 | -| Screens | ~200 | -| Statuses | ~400 | -| Users | ~21 000 | -| Versions | ~20 000 | -| Workflows | 50 | - -{{% note %}} -All the datasets use the standard `admin`/`admin` credentials. -{{% /note %}} - -#### Pre-loading the dataset is a three-step process: - -1. [Importing the main dataset](#importingdataset). To help you out, we provide an enterprise-scale dataset you can import either via the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script or restore from xml backup file. -1. [Restoring attachments](#copyingattachments). We also provide attachments, which you can pre-load via an [upload_attachments.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/upload_attachments.sh) script. -1. [Re-indexing Jira Data Center](#reindexing). For more information, go to [Re-indexing Jira](https://confluence.atlassian.com/adminjiraserver/search-indexing-938847710.html). - -The following subsections explain each step in greater detail. - -#### Importing the main dataset - -You can load this dataset directly into the database (via a [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script), or import it via XML. - -##### Option 1 (recommended): Loading the dataset via populate_db.sh script (~1 hour) - - -To populate the database with SQL: - -1. In the AWS console, go to **Services** > **EC2** > **Instances**. -1. On the **Description** tab, do the following: - - Copy the _Public IP_ of the Bastion instance. - - Copy the _Private IP_ of the Jira node instance. -1. Using SSH, connect to the Jira node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script and make it executable: - - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/populate_db.sh && chmod +x populate_db.sh - ``` -1. Review the following `Variables section` of the script: - - ``` bash - DB_CONFIG="/var/atlassian/application-data/jira/dbconfig.xml" - JIRA_CURRENT_DIR="/opt/atlassian/jira-software/current" - CATALINA_PID_FILE="${JIRA_CURRENT_DIR}/work/catalina.pid" - JIRA_DB_NAME="jira" - JIRA_DB_USER="postgres" - JIRA_DB_PASS="Password1!" - JIRA_SETENV_FILE="${JIRA_CURRENT_DIR}/bin/setenv.sh" - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-software.version" - DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/jira" - ``` -1. Run the script: - - ``` bash - ./populate_db.sh 2>&1 | tee -a populate_db.log - ``` - -{{% note %}} -Do not close or interrupt the session. It will take about an hour to restore SQL database. When SQL restoring is finished, an admin user will have `admin`/`admin` credentials. - -In case of a failure, check the `Variables` section and run the script one more time. -{{% /note %}} - -##### Option 2: Loading the dataset through XML import (~4 hours) - -We recommend that you only use this method if you are having problems with the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script. - -1. In the AWS console, go to **Services** > **EC2** > **Instances**. -1. On the **Description** tab, do the following: - - Copy the _Public IP_ of the Bastion instance. - - Copy the _Private IP_ of the Jira node instance. -1. Using SSH, connect to the Jira node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the xml_backup.zip file corresponding to your Jira version. - - ``` bash - JIRA_VERSION=$(sudo su jira -c "cat /media/atl/jira/shared/jira-software.version") - sudo su jira -c "wget https://centaurus-datasets.s3.amazonaws.com/jira/${JIRA_VERSION}/large/xml_backup.zip -O /media/atl/jira/shared/import/xml_backup.zip" - ``` -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Restore System.** from the menu. -1. Populate the **File name** field with `xml_backup.zip`. -1. Click **Restore** and wait until the import is completed. - -#### Restoring attachments (~2 hours) - -After [Importing the main dataset](#importingdataset), you'll now have to pre-load an enterprise-scale set of attachments. - -{{% note %}} -Populate DB and restore attachments scripts could be run in parallel in separate terminal sessions to save time. -{{% /note %}} - -1. Using SSH, connect to the Jira node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the [upload_attachments.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/upload_attachments.sh) script and make it executable: - - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/upload_attachments.sh && chmod +x upload_attachments.sh - ``` -1. Review the following `Variables section` of the script: - - ``` bash - DATASETS_AWS_BUCKET="https://centaurus-datasets.s3.amazonaws.com/jira" - ATTACHMENTS_TAR="attachments.tar.gz" - ATTACHMENTS_DIR="attachments" - TMP_DIR="/tmp" - EFS_DIR="/media/atl/jira/shared/data" - ``` -1. Run the script: - - ``` bash - ./upload_attachments.sh 2>&1 | tee -a upload_attachments.log - ``` - -{{% note %}} -Do not close or interrupt the session. It will take about two hours to upload attachments to Elastic File Storage (EFS). -{{% /note %}} - -#### Re-indexing Jira Data Center - -For more information, go to [Re-indexing Jira](https://confluence.atlassian.com/adminjiraserver/search-indexing-938847710.html). - -{{% note %}} -The re-index time for Jira 8.20.x is about ~30-50 minutes, while for Jira 9.4.x it can take significantly longer at around 110-130 minutes. This increase in re-index time is due to a known issue which affects Jira 9.4.x, and you can find more information about it in this ticket: [Re-Index: Jira 9.4.x](https://jira.atlassian.com/browse/JRASERVER-74787). -{{% /note %}} - -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -2. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -3. Select the **Full re-index** option. -4. Click **Re-Index** and wait until re-indexing is completed. -5. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. -6. Attach the screenshot to your ECOHELP ticket. - -Jira will be unavailable for some time during the re-indexing process. When finished, the **Acknowledge** button will be available on the re-indexing page. - ---- - -#### Index Recovery (~15 min, only for Jira versions 9.0.x and below. For Jira 9.1.0+ skip this step.) - -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -2. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -3. In the **Index Recovery** click **Edit Settings**. -4. Set the recovery index schedule to 5min ahead of the current server time. -5. Wait ~10min until the index snapshot is created. - -Jira will be unavailable for some time during the index recovery process. - -6. Using SSH, connect to the Jira node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` -7. Once you're in the node, run command corresponding to your Jira version: - - - **Jira 9** - ```bash - sudo su -c "du -sh /media/atl/jira/shared/caches/indexesV2/snapshots/IndexSnapshot*" | tail -1 - ``` - **Jira 8** - ```bash - sudo su -c "du -sh /media/atl/jira/shared/export/indexsnapshots/IndexSnapshot*" | tail -1 - ``` - -8. The snapshot size and name will be shown in the console output. - -{{% note %}} -Please note that the snapshot size must be around 6GB or larger. -{{% /note %}} - ---- -{{% note %}} -After [Preloading your Jira deployment with an enterprise-scale dataset](#preloading), the admin user will have `admin`/`admin` credentials. -It's recommended to change default password from UI account page for security reasons. -{{% /note %}} ---- - -### 7. Setting up an execution environment - -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. - -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -1. Clone the fork locally, then edit the `jira.yml` configuration file. Set enterprise-scale Jira Data Center parameters: - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} - - ``` yaml - application_hostname: test_jira_instance.atlassian.com # Jira DC hostname without protocol and port e.g. test-jira.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # set to empty for CloudFromation deployment. e.g. /jira in case of url like http://localhost:2990/jira - admin_login: admin - admin_password: admin - load_executor: jmeter # jmeter and locust are supported. jmeter by default. - concurrency: 200 # number of concurrent virtual users for jmeter or locust scenario - test_duration: 45m - ramp-up: 3m # time to spin all concurrent users - total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP - ``` - -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Clone forked repository. - -{{% note %}} -At this stage app-specific actions are not needed yet. Use code from `master` branch with your `jira.yml` changes. -{{% /note %}} - -You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. - ---- - -### 8. Running the test scenarios from execution environment against enterprise-scale Jira Data Center - -Using the Data Center App Performance Toolkit for [Performance and scale testing your Data Center app](/platform/marketplace/developing-apps-for-atlassian-data-center-products/) involves two test scenarios: - -- [Performance regression](#testscenario1) -- [Scalability testing](#testscenario2) - -Each scenario will involve multiple test runs. The following subsections explain both in greater detail. - -#### Scenario 1: Performance regression - -This scenario helps to identify basic performance issues without a need to spin up a multi-node Jira DC. Make sure the app does not have any performance impact when it is not exercised. - -##### Run 1 (~50 min) - -To receive performance baseline results **without** an app installed: - -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` - -1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: - - `results_summary.log`: detailed run summary - - `results.csv`: aggregated .csv file with all actions and timings - - `bzt.log`: logs of the Taurus tool execution - - `jmeter.*`: logs of the JMeter tool execution - - `pytest.*`: logs of Pytest-Selenium execution - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - -##### Run 2 (~50 min + Lucene Index timing test) - -If you are submitting a Jira app, you are required to conduct a Lucene Index timing test. This involves conducting a foreground re-index on a single-node Data Center deployment (with your app installed) and a dataset that has 1M issues. - -{{% note %}} -Jira 8 index time is about ~30 min. -{{% /note %}} - -{{% note %}} -If your Amazon RDS DB instance class is lower than `db.m5.xlarge` it is required to wait ~2 hours after previous reindex finish before starting a new one. -{{% /note %}} - -**Benchmark your re-index time with your app installed:** - -1. Install the app you want to test. -1. Setup app license. -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -1. Select the **Full re-index** option. -1. Click **Re-Index** and wait until re-indexing is completed. -1. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. -1. Attach the screenshot to your ECOHELP ticket. - -**Performance results generation with the app installed:** - -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -##### Generating a performance regression report - -To generate a performance regression report: - -1. Use SSH to connect to execution environment. -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: - ``` bash - python csv_chart_generator.py performance_profile.yml - ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. - - -#### Scenario 2: Scalability testing - -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. - -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Jira DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Jira DC app in a cluster. - - -###### Run 3 (~50 min) - -To receive scalability benchmark results for one-node Jira DC **with** app-specific actions: - -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -##### Run 4 (~50 min) -{{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. -{{% /note %}} - -To receive scalability benchmark results for two-node Jira DC **with** app-specific actions: - -1. In the AWS console, go to **CloudFormation** > **Stack details** > **Select your stack**. -2. On the **Update** tab, select **Use current template**, and then click **Next**. -3. Enter `2` in the **Maximum number of cluster nodes** and the **Minimum number of cluster nodes** fields. -4. Click **Next** > **Next** > **Update stack** and wait until stack is updated. - -{{% warning %}} -In case if you got error during update - `BastionPrivIp cannot be updated`. -Please use those steps for a workaround: -1. In the AWS console, go to **EC2** > **Auto Scailng** > **Auto Scaling Groups**. -2. On the **Auto Scaling Groups** page, select **your stack ASG** and click **Edit** -3. Enter `2` in the **Desired capacity**, **Minimum capacity** and **Maximum capacity** fields. -4. Scroll down, click **Update** button and wait until stack is updated. -{{% /warning %}} - -5. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -6. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Clustering** and check there is expected number of nodes with node status `ACTIVE` and application status `RUNNING`. To make sure that Jira index successfully synchronized to the second node. - -{{% warning %}} -In case if index synchronization is failed by some reason (e.g. application status is `MAINTENANCE`) follow those steps: - 1. Get back and go through **[Index Recovery steps](#indexrecovery)**. - 2. Proceed to AWS console, go to EC2 > Instances > Select problematic node > Instances state >Terminate instance. - 3. Wait until the new node will be recreated by ASG, the index should be picked up by a new node automatically. -{{% /warning %}} - -7. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` - - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -##### Run 5 (~50 min) -{{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. -{{% /note %}} - -To receive scalability benchmark results for four-node Jira DC with app-specific actions: - -1. Scale your Jira Data Center deployment to 3 nodes as described in [Run 4](#run4). -1. Check Index is synchronized to the new node #3 the same way as in [Run 4](#run4). -1. Scale your Jira Data Center deployment to 4 nodes as described in [Run 4](#run4). -1. Check Index is synchronized to the new node #4 the same way as in [Run 4](#run4). -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -#### Generating a report for scalability scenario - -To generate a scalability report: - -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "1 Node"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "2 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "4 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the activated `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): - ``` bash - python csv_chart_generator.py scale_profile.yml - ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review action timings on Jira Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. - -{{% warning %}} -After completing all your tests, delete your Jira Data Center stacks. -{{% /warning %}} - -#### Attaching testing results to ECOHELP ticket - -{{% warning %}} -Do not forget to attach performance testing results to your ECOHELP ticket. -{{% /warning %}} - -1. Make sure you have two reports folders: one with performance profile and second with scale profile results. - Each folder should have `profile.csv`, `profile.png`, `profile_summary.log` and profile run result archives. Archives - should contain all raw data created during the run: `bzt.log`, selenium/jmeter/locust logs, .csv and .yml files, etc. -2. Attach two reports folders to your ECOHELP ticket. - -## Support -In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. - diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index dbf011055..73cee2a21 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -41,7 +41,7 @@ run the toolkit in an **enterprise-scale environment**. --- {{% note %}} -DCAPT has fully transitioned to Terraform deployment. CloudFormation deployment option will be no longer supported starting from January 2024. +DCAPT has fully transitioned to Terraform deployment. CloudFormation deployment option is no longer supported. {{% /note %}} ### 1. Setting up Jira Data Center development environment @@ -88,12 +88,12 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc 6. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` @@ -344,12 +344,12 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat 6. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -555,12 +555,12 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. -3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +3. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md deleted file mode 100644 index f04322a03..000000000 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md +++ /dev/null @@ -1,1167 +0,0 @@ ---- -title: "Data Center App Performance Toolkit User Guide For Jira Service Management (CloudFormation deployment)" -platform: platform -product: marketplace -category: devguide -subcategory: build -date: "2023-09-20" ---- -# Data Center App Performance Toolkit User Guide For Jira Service Management (CloudFormation deployment) - -{{% warning %}} -CloudFormation deployment option will be no longer supported starting from January 2024. -It is recommended to use TerraForm deployment. More details could be found in [User Guide](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/). -{{% /warning %}} - -This document walks you through the process of testing your app on Jira Service Management using the Data Center App Performance Toolkit. These instructions focus on producing the required [performance and scale benchmarks for your Data Center app](/platform/marketplace/dc-apps-performance-and-scale-testing/): - -If your application relays or extends the functionality of **Insight** ([What is Insight?](https://confluence.atlassian.com/servicemanagementserver/what-is-insight-1044784313.html)): - -Please, make sure you have enabled Insight-specific tests in the `jsm.yml` file, by setting `True` value next to the `insight` variable. - - -In this document, we cover the use of the Data Center App Performance Toolkit on two types of environments: - -**[Development environment](#mainenvironmentdev)**: Jira Service Management Data Center environment for a test run of Data Center App Performance Toolkit and development of [app-specific actions](#appspecificactions). We recommend you use the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) with the parameters prescribed here. - -1. [Set up a development environment Jira Service Management Data Center on AWS](#devinstancesetup). -2. [Load a "small" dataset for the development environment](#devdataset). -3. [Run toolkit on the development environment locally](#devtestscenario). -4. [Develop and test app-specific actions locally](#devappaction). - -**[Enterprise-scale environment](#mainenvironmententerprise)**: Jira Service Management Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. Preferably, use the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) with the parameters prescribed below. These parameters provision larger, more powerful infrastructure for your Jira Service Management Data Center. - -5. [Set up an enterprise-scale environment Jira Service Management Data Center on AWS](#instancesetup). -6. [Load an enterprise-scale dataset on your Jira Service Management Data Center deployment](#preloading). -7. [Set up an execution environment for the toolkit](#executionhost). -8. [Running the test scenarios from execution environment against enterprise-scale Jira Service Management Data Center](#testscenario). - ---- - -## Development environment - -Running the tests in a development environment helps familiarize you with the toolkit. It'll also provide you with a lightweight and less expensive environment for developing. Once you're ready to generate test results for the Marketplace Data Center Apps Approval process, run the toolkit in an **enterprise-scale environment**. - -### 1. Setting up Jira Service Management Data Center development environment - -We recommend that you set up development environment using the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) (**How to deploy** tab). All the instructions on this page are optimized for AWS. - - -#### Using the AWS Quick Start for Jira Service Management - -If you are a new user, perform an end-to-end deployment. This involves deploying Jira Service Management into a _new_ ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into a new ASI** link. - -If you have already deployed the ASI separately by using the [ASI Quick Start](https://aws.amazon.com/quickstart/architecture/atlassian-standard-infrastructure/)ASI Quick Start or by deploying another Atlassian product (Jira, Bitbucket, or Confluence Data Center development environment) with ASI, deploy Jira Service Management into your existing ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into your existing ASI** link. - -{{% note %}} -You are responsible for the cost of AWS services used while running this Quick Start reference deployment. This Quick Start doesn't have any additional prices. See [Amazon EC2 pricing](https://aws.amazon.com/ec2/pricing/) for more detail. -{{% /note %}} - -To reduce costs, we recommend you to keep your deployment up and running only during the performance runs. - -#### AWS cost estimation for the development environment - -AWS Jira Service Management Data Center development environment infrastructure costs about 25 - 50$ per working week depending on such factors like region, instance type, deployment type of DB, and other. - -#### Quick Start parameters for development environment - -All important parameters are listed and described in this section. For all other remaining parameters, we recommend using the Quick Start defaults. - -**Jira setup** - -| Parameter | Recommended value | -| --------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Jira Product | ServiceManagement | -| Version | The Data Center App Performance Toolkit officially supports `4.20.26`, `5.4.10` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | - -**Cluster nodes** - -| Parameter | Recommended value | -| --------- | ----------------- | -| Cluster node instance type | [t3.large](https://aws.amazon.com/ec2/instance-types/t3/) (we recommend this instance type for its good balance between price and performance in testing environments) | -| Maximum number of cluster nodes | 1 | -| Minimum number of cluster nodes | 1 | -| Cluster node instance volume size | 50 | - - -**Database** - -| Parameter | Recommended value | -| --------- | ----------------- | -| The database engine to deploy with | PostgresSQL | -| The database engine version to use | 11 | -| Database instance class | [db.t3.medium](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Summary) | -| RDS Provisioned IOPS | 1000 | -| Master (admin) password | Password1! | -| Enable RDS Multi-AZ deployment | false | -| Application user database password | Password1! | -| Database storage | 200 | - - -**Networking (for new ASI)** - -| Parameter | Recommended value | -| --------- | ----------------- | -| Trusted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Availability Zones | _Select two availability zones in your region_ | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Make instance internet facing | True | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -**Networking (for existing ASI)** - -| Parameter | Recommended value | -| --------- | ----------------- | -| Make instance internet facing | True | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -#### Running the setup wizard - -After successfully deploying the Jira Service Management Data Center on AWS, configure it as follows: - -1. In the AWS console, go to **Services** > **CloudFormation** > **Stack** > **Stack details** > **Select your stack**. -1. On the **Outputs** tab, copy the value of the **LoadBalancerURL** key. -1. Open **LoadBalancerURL** in your browser. This will take you to the Jira Service Management setup wizard. -1. On the **Set up application properties** page, fill in the following fields: - - **Application Title**: any name for your Jira Service Management Data Center deployment - - **Mode**: private - - **Base URL**: your stack's Elastic LoadBalancer URL - - Then select **Next**. -1. On the next page, fill in the **Your License Key** field in one of the following ways: - - Using your existing license - - Generating a Jira Service Management trial license - - Contacting Atlassian to be provided two time-bomb licenses for testing. Ask for the licenses in your ECOHELP ticket. - - Then select **Next**. -1. On the **Set up administrator account** page, fill in the following fields: - - **Full name**: a full name of the admin user - - **Email Address**: email address of the admin user - - **Username**: admin _(recommended)_ - - **Password**: admin _(recommended)_ - - **Confirm Password**: admin _(recommended)_ - - Then select **Next**. - -1. On the **Set up email notifications** page, configure your email notifications, and then select **Finish**. -1. On the first page of the welcome setup select **English (United States)** language. Other languages are not supported by the toolkit. -1. After going through the welcome setup, select **Create new project** to create a new project. - ---- - -### 2. Upload "small" dataset for development environment - -You can load this dataset directly into the database (via a [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script), or import it via XML. - -##### Option 1 (recommended): Loading the "small" dataset via populate_db.sh script - -To populate the database with SQL: - -1. In the AWS console, go to **Services** > **EC2** > **Instances**. -1. On the **Description** tab, do the following: - - Copy the _Public IP_ of the Bastion instance. - - Copy the _Private IP_ of the Jira Service Management node instance. -1. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script and make it executable: - - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/populate_db.sh && chmod +x populate_db.sh - ``` -1. Review the following `Variables section` of the script: - - ``` bash - DB_CONFIG="/var/atlassian/application-data/jira/dbconfig.xml" - JIRA_DB_NAME="jira" - JIRA_DB_USER="postgres" - JIRA_DB_PASS="Password1!" - # JSM section - JIRA_CURRENT_DIR="/opt/atlassian/jira-servicedesk/current" - JIRA_SETENV_FILE="${JIRA_CURRENT_DIR}/bin/setenv.sh" - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-servicedesk.version" - ``` -1. Run the script: - - ``` bash - ./populate_db.sh --jsm --small 2>&1 | tee -a populate_db.log - ``` - -{{% note %}} -In case of a failure, check the `Variables` section and run the script one more time. -{{% /note %}} - -##### Option 2: Loading the "small" dataset through XML import - -We recommend that you only use this method if you are having problems with the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script. - -1. In the AWS console, go to **Services** > **EC2** > **Instances**. -1. On the **Description** tab, do the following: - - Copy the _Public IP_ of the Bastion instance. - - Copy the _Private IP_ of the Jira Service Management node instance. -1. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ``` bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the xml_backup.zip file corresponding to your Jira Service Management version. - - ``` bash - JSM_VERSION=$(sudo su jira -c "cat /media/atl/jira/shared/jira-servicedesk.version") - sudo su jira -c "wget https://centaurus-datasets.s3.amazonaws.com/jsm/${JSM_VERSION}/small/xml_backup.zip -O /media/atl/jira/shared/import/xml_backup.zip" - ``` -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Restore System.** from the menu. -1. Populate the **File name** field with `xml_backup.zip`. -1. Click **Restore** and wait until the import is completed. - -#### Restoring "small" dataset attachments - -1. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the [upload_attachments.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/upload_attachments.sh) script and make it executable: - - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/upload_attachments.sh && chmod +x upload_attachments.sh - ``` -1. Review the following `Variables section` of the script: - - ``` bash - # JSM version file location - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-servicedesk.version" - ``` -1. Run the script: - - ``` bash - ./upload_attachments.sh --jsm --small 2>&1 | tee -a upload_attachments.log - ``` - -#### Re-indexing development environment Jira Service Management Data Center - -For more information, go to [Re-indexing Jira](https://confluence.atlassian.com/adminjiraserver/search-indexing-938847710.html). -{{% note %}} -The re-index time for JSM 4.20.x is about ~30-50 minutes, while for JSM 5.4.x it can take significantly longer at around 110-130 minutes. This increase in re-index time is due to a known issue which affects Jira 5.4.x, and you can find more information about it in this ticket: [Re-Index: JSM 5.4.x](https://jira.atlassian.com/browse/JRASERVER-74787). -{{% /note %}} -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -1. Select the **Full re-index** option. -1. Click **Re-Index** and wait until re-indexing is completed. - -When finished, the **Acknowledge** button will be available on the re-indexing page. - ---- - -### 3. Run toolkit on the development environment locally - -{{% warning %}} -Make sure **English (United States)** language is selected as a default language on the **![cog icon](/platform/marketplace/images/cog.png) > System > General configuration** page. Other languages are **not supported** by the toolkit. -{{% /warning %}} - -1. Clone [Data Center App Performance Toolkit](https://github.com/atlassian/dc-app-performance-toolkit) locally. -2. Follow the [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md) instructions to set up toolkit locally. -3. Navigate to `dc-app-performance-toolkit/app` folder. -4. Open the `jsm.yml` file and fill in the following variables: - - `application_hostname`: your_dc_jsm_instance_hostname without protocol. - - `application_protocol`: http or https. - - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: set to empty for CloudFormation deployment; e.g., /jira for url like this http://localhost:2990/jira. - - `admin_login`: admin user username. - - `admin_password`: admin user password. - - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). - - `concurrency_agents`: `1` - number of concurrent JMeter/Locust agents. - - `concurrency_customers`: `1` - number of concurrent JMeter/Locust customers. - - `test_duration`: `5m` - duration of the performance run. - - `ramp-up`: `3s` - amount of time it will take JMeter or Locust to add all test users to test execution. - - `total_actions_per_hour_agents`: `500` - number of total JMeter/Locust actions per hour for agents scenario. - - `total_actions_per_hour_customers`: `1500` - number of total JMeter/Locust actions per hour customers scenario. - - `WEBDRIVER_VISIBLE`: visibility of Chrome browser during selenium execution (False is by default). - - `insight`: True or False. Default value is False. Set True to enable Insight specific tests. - - -5. In case your application relays or extends the functionality of **Insight**. Make sure to set `True` value next to `insight` variable. - -6. Run bzt. - - ``` bash - bzt jsm.yml - ``` - -7. Review the resulting table in the console log. All JMeter/Locust and Selenium actions should have 95+% success rate. -In case some actions does not have 95+% success rate refer to the following logs in `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: - - - `results_summary.log`: detailed run summary - - `results.csv`: aggregated .csv file with all actions and timings - - `bzt.log`: logs of the Taurus tool execution - - `jmeter.*`: logs of the JMeter tool execution - - `locust.*`: logs of the Locust tool execution (in case you use Locust as load_executor in jsm.yml) - - `pytest.*`: logs of Pytest-Selenium execution - -{{% warning %}} -Do not proceed with the next step until you have all actions 95+% success rate. Ask [support](#support) if above logs analysis did not help. -{{% /warning %}} - ---- - -### 4. Develop and test app-specific action locally -Data Center App Performance Toolkit has its own set of default test actions for Jira Service Management Data Center: JMeter/Locust and Selenium for load and UI tests respectively. - -**App-specific action** - action (performance test) you have to develop to cover main use cases of your application. Performance test should focus on the common usage of your application and not to cover all possible functionality of your app. For example, application setup screen or other one-time use cases are out of scope of performance testing. - -1. Define main use case of your app. Usually it is one or two main app use cases. -1. Your app adds new UI elements in Jira Service Management Data Center - Selenium app-specific action has to be developed. -1. Your app introduces new endpoint or extensively calls existing Jira Service Management Data Center API - JMeter/Locust app-specific actions has to be developed. -JMeter and Locust actions are interchangeable, so you could select the tool you prefer: - -- JMeter - UI-based [performance tool](https://jmeter.apache.org/). -- Locust - code-based (Python requests library) [performance tool](https://locust.io/). - - -{{% note %}} -We strongly recommend developing your app-specific actions on the development environment to reduce AWS infrastructure costs. -{{% /note %}} - - -#### Custom dataset -You can filter your own app-specific issues for your app-specific actions. - -1. Create app-specific service desk requests that have specific anchor in summary, e.g. *AppRequest* anchor and issues summaries like *AppRequest1*, *AppRequest2*, *AppRequest3*. -1. Go to the search page of your Jira Service Management Data Center - `JSM_URL/issues/?jql=` and select `Advanced`. -1. Write [JQL](https://www.atlassian.com/blog/jira-software/jql-the-most-flexible-way-to-search-jira-14) that filter just your request from step 1, e.g. `summary ~ 'AppRequest*'`. -1. Edit JSM configuration file `dc-app-performance-toolkit/app/jsm.yml`: - - `custom_dataset_query:` JQL from step 3. - -Next time when you run toolkit, custom dataset issues will be stored to the `dc-app-performance-toolkit/app/datasets/jsm/custom-requests.csv` with columns: `request_id`, `request_key`, `service_desk_id`, `project_id`, `project_key`. - -#### Example of app-specific Selenium action development with custom dataset -You develop an app that adds some additional fields to specific types of Jira Service Management requests. In this case, you should develop Selenium app-specific action: -1. Create app-specific service desk requests with *AppRequest* anchor in summary: *AppRequest1*, *AppRequest2*, etc. -1. Go to the search page of your Jira Service Management Data Center - `JSM_URL/issues/?jql=` and check if JQL is correct: `summary ~ 'AppRequest*'`. -1. Edit `dc-app-performance-toolkit/app/jsm.yml` configuration file and set `custom_dataset_query: summary ~ 'AppRequest*'`. -1. Extend example of app-specific action for agent in `dc-app-performance-toolkit/app/extension/jsm/extension_ui_agents.py`. -[Code example.](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jsm/extension_ui_agents.py) -So, our test has to open app-specific requests in agent view and measure time to load of this app-specific request. -1. Extend example of app-specific action for customer in `dc-app-performance-toolkit/app/extension/jsm/extension_ui_customers.py`. -[Code example.](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jsm/extension_ui_customers.py) -So, our test has to open app-specific requests in portal view and measure time to load of this app-specific request. -1. If you need to run `app_specific_action` as specific user uncomment `app_specific_user_login` function in [code example](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jsm/extension_ui_agents.py). Note, that in this case `test_1_selenium_custom_action` should follow just before `test_2_selenium_agent_z_logout` or `test_2_selenium_customer_z_log_out` action. -1. In `dc-app-performance-toolkit/app/selenium_ui/jsm_ui_agents.py`, review and uncomment the following block of code to make newly created app-specific actions executed: -``` python -# def test_1_selenium_agent_custom_action(jsm_webdriver, jsm_datasets, jsm_screen_shots): -# extension_ui_agents.app_specific_action(jsm_webdriver, jsm_datasets) -``` - -1. In `dc-app-performance-toolkit/app/selenium_ui/jsm_ui_customers.py`, review and uncomment the following block of code to make newly created app-specific actions executed: -``` python -# def test_1_selenium_customer_custom_action(jsm_webdriver, jsm_datasets, jsm_screen_shots): -# extension_ui_customers.app_specific_action(jsm_webdriver, jsm_datasets) -``` - -1. Run toolkit with `bzt jsm.yml` command to ensure that all Selenium actions including `app_specific_action` are successful. - -#### Example of app-specific Locust/JMeter action development - -You develop an app that introduces new GET and POST endpoints in Jira Service Management Data Center. In this case, you should develop Locust or JMeter app-specific action. - -**Locust app-specific action development example** - -1. Extend example of app-specific action for agent in `dc-app-performance-toolkit/app/extension/jsm/extension_locust_agents.py`, so that test will call the endpoint with GET request, parse response use these data to call another endpoint with POST request and measure response time. -[Code example.](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jsm/extension_locust_agents.py) -1. Extend example of app-specific action for customers in `dc-app-performance-toolkit/app/extension/jsm/extension_locust_customers.py`, so that test will call the endpoint with GET request, parse response use these data to call another endpoint with POST request and measure response time. -[Code example.](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/extension/jsm/extension_locust_customers.py) -1. In `dc-app-performance-toolkit/app/jsm.yml` set `load_executor: locust` to make `locust` as load executor. -1. Set desired execution percentage for `agent_standalone_extension`/`customer_standalone_extension`. Default value is `0`, which means that `agent_standalone_extension`/`customer_standalone_extension` action will not be executed. Locust uses actions percentage as relative [weights](https://docs.locust.io/en/stable/writing-a-locustfile.html#weight-attribute), so if `some_action: 10` and `standalone_extension: 20` that means that `standalone_extension` will be called twice more. -Set `agent_standalone_extension`/`customer_standalone_extension` weight in accordance with the expected frequency of your app use case compared with other base actions. -1. App-specific tests could be run (if needed) as a specific user. Use `@run_as_specific_user(username='specific_user_username', password='specific_user_password')` decorator for that. -1. Run toolkit with `bzt jsm.yml` command to ensure that all Locust actions including `app_specific_action` are successful. - -**JMeter app-specific action development example** - -1. Check that `jsm.yml` file has correct settings of `application_hostname`, `application_protocol`, `application_port`, `application_postfix`, etc. -1. Set desired execution percentage for `agent_standalone_extension` and/or `customer_standalone_extension`. Default values are `0`, which means that `agent_standalone_extension` and `customer_standalone_extension` actions will not be executed. -For example, for app-specific action development you could set percentage of `agent_standalone_extension` and/or `customer_standalone_extension` to 100 and for all other actions to 0 - this way only `jmeter_agent_login_and_view_dashboard` and `agent_standalone_extension` or `jmeter_customer_login_and_view_dashboard` and `customer_standalone_extension` actions would be executed. -1. Navigate to `dc-app-performance-toolkit/app` folder and run from virtualenv(as described in `dc-app-performance-toolkit/README.md`): - - ``` bash - python util/jmeter/start_jmeter_ui.py --app jsm --type agents - # or - python util/jmeter/start_jmeter_ui.py --app jsm --type customers - ``` - -1. Open `Agents`/`Customers` thread group > `actions per login` and navigate to `agent_standalone_extension`/`customer_standalone_extension` -![Jira Service Management JMeter standalone extension](/platform/marketplace/images/jsm-standalone-extension.png) -1. Add GET `HTTP Request`: right-click to `agent_standalone_extension`/`customer_standalone_extension`` > `Add` > `Sampler` `HTTP Request`, chose method GET and set endpoint in Path. -![Jira Service Management JMeter standalone GET](/platform/marketplace/images/jsm-standalone-get-request.png) -1. Add `Regular Expression Extractor`: right-click to to newly created `HTTP Request` > `Add` > `Post processor` > `Regular Expression Extractor` -![Jira Service Management JMeter standalone regexp](/platform/marketplace/images/jsm-standalone-regexp.png) -1. Add `Response Assertion`: right-click to newly created `HTTP Request` > `Add` > `Assertions` > `Response Assertion` and add assertion with `Contains`, `Matches`, `Equals`, etc types. -![Jira Service Management JMeter standalone assertions](/platform/marketplace/images/jsm-standalone-assertions.png) -1. Add POST `HTTP Request`: right-click to `agent_standalone_extension`/`customer_standalone_extension` > `Add` > `Sampler` `HTTP Request`, chose method POST, set endpoint in Path and add Parameters or Body Data if needed. -1. Right-click on `View Results Tree` and enable this controller. -1. Click **Start** button and make sure that `login_and_view_dashboard` and `agent_standalone_extension`/`customer_standalone_extension` are successful. -1. Right-click on `View Results Tree` and disable this controller. It is important to disable `View Results Tree` controller before full-scale results generation. -1. Click **Save** button. -1. To make `agent_standalone_extension`/`customer_standalone_extension` executable during toolkit run edit `dc-app-performance-toolkit/app/jsm.yml` and set execution percentage of `agent_standalone_extension`/`customer_standalone_extension` accordingly to your use case frequency. -1. App-specific tests could be run (if needed) as a specific user. In the `agent_standalone_extension`/`customer_standalone_extension` uncomment `login_as_specific_user` controller. Navigate to the `username:password` config element and update values for `app_specific_username` and `app_specific_password` names with your specific user credentials. Also make sure that you located your app-specific tests between `login_as_specific_user` and `login_as_default_user_if_specific_user_was_loggedin` controllers. -1. Run toolkit to ensure that all JMeter actions including `agent_standalone_extension` and/or `customer_standalone_extension` are successful. - - -##### Using JMeter variables from the base script - -Use or access the following variables in your `agent_standalone_extension` action if needed: - -- `${request_id}` - request id being viewed or modified (e.g. 693484) -- `${request_key}` - request key being viewed or modified (e.g. ABC-123) -- `${request_project_id}` - project id being viewed or modified (e.g. 3423) -- `${request_project_key}` - project key being viewed or modified (e.g. ABC) -- `${request_service_desk_id}` - service_desk_id being viewed or modified (e.g. 86) -- `${s_prj_key}` - "small" project (<10k requests per project) key being viewed or modified (e.g. ABC) -- `${s_prj_id}` - "small" project id being viewed or modified (e.g. 123) -- `${s_service_desk_id}` - "small" project service_desk_id being viewed or modified (e.g. 12) -- `${s_prj_total_req}` - "small" project total requests (e.g. 444) -- `${s_prj_all_open_queue_id}` - "small" project "all open" queue id (e.g. 44) -- `${s_created_vs_resolved_id}` - "small" project "created vs resolved" report id (e.g. 45) -- `${s_time_to_resolution_id}` - "small" project "time to resolution" report id (e.g. 46) -- `${m_prj_key}` - "medium" project (>10k and <100k requests per project) key being viewed or modified (e.g. ABC) -- `${m_prj_id}` - "medium" project id being viewed or modified (e.g. 123) -- `${m_service_desk_id}` - "medium" project service_desk_id being viewed or modified (e.g. 12) -- `${m_prj_total_req}` - "medium" project total requests (e.g. 444) -- `${m_prj_all_open_queue_id}` - "medium" project "all open" queue id (e.g. 44) -- `${m_created_vs_resolved_id}` - "medium" project "created vs resolved" report id (e.g. 45) -- `${m_time_to_resolution_id}` - "medium" project "time to resolution" report id (e.g. 46) -- `${username}` - the logged in username (e.g. admin) - -Use or access the following variables in your `customer_standalone_extension` action if needed: -- `${s_service_desk_id}` - "small" project (<10k requests per project) service_desk_id being viewed or modified (e.g. 12) -- `${rt_project_id}` - project id (e.g. 12) -- `${rt_service_desk_id}` - service_desk_id (e.g. 12) -- `${rt_id}` - request type id for project with project id `${rt_project_id}` and service_desk_id `${rt_service_desk_id}` (e.g. 123) -- `${username}` - the logged in username (e.g. admin) - -{{% warning %}} -App-specific actions are required. Do not proceed with the next step until you have completed app-specific actions development and got successful results from toolkit run. -{{% /warning %}} - ---- -## Enterprise-scale environment - -After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. - -### 5. Set up an enterprise-scale environment Jira Service Management Data Center on AWS - -We recommend that you use the [AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) (**How to deploy** tab) to deploy a Jira Service Management Data Center enterprise-scale environment. This Quick Start will allow you to deploy Jira Service Management Data Center with a new [Atlassian Standard Infrastructure](https://aws.amazon.com/quickstart/architecture/atlassian-standard-infrastructure/) (ASI) or into an existing one. - -The ASI is a Virtual Private Cloud (VPC) consisting of subnets, NAT gateways, security groups, bastion hosts, and other infrastructure components required by all Atlassian applications, and then deploys Jira Service Management into this new VPC. Deploying Jira Service Management with a new ASI takes around 50 minutes. With an existing one, it'll take around 30 minutes. - -#### Using the AWS Quick Start for Jira Service Management - -If you are a new user, perform an end-to-end deployment. This involves deploying Jira Service Management into a _new_ ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into a new ASI** link. - -If you have already deployed the ASI separately by using the [ASI Quick Start](https://aws.amazon.com/quickstart/architecture/atlassian-standard-infrastructure/)ASI Quick Start or by deploying another Atlassian product (Jira, Bitbucket, or Confluence Data Center development environment) with ASI, deploy Jira Service Management into your existing ASI: - -Navigate to **[AWS Quick Start for Jira Data Center](https://aws.amazon.com/quickstart/architecture/jira/) > How to deploy** tab **> Deploy into your existing ASI** link. - -{{% note %}} -You are responsible for the cost of the AWS services used while running this Quick Start reference deployment. There is no additional price for using this Quick Start. For more information, go to [aws.amazon.com/pricing](https://aws.amazon.com/ec2/pricing/). -{{% /note %}} - -To reduce costs, we recommend you to keep your deployment up and running only during the performance runs. - -#### AWS cost estimation -[AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. -Monthly charges will be based on your actual usage of AWS services and may vary from the estimates the Calculator has provided. - -*The prices below are approximate and may vary depending on such factors like region, instance type, deployment type of DB, and other. - -| Stack | Estimated hourly cost ($) | -| ----- | ------------------------- | -| One Node Jira Service Management DC | 0.8 - 1.1 -| Two Nodes Jira Service Management DC | 1.2 - 1.7 -| Four Nodes Jira Service Management DC | 2.0 - 3.0 - -#### Stop cluster nodes - -To reduce AWS infrastructure costs you could stop cluster nodes when the cluster is standing idle. -Cluster node might be stopped by using [Suspending and Resuming Scaling Processes](https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-suspend-resume-processes.html). - -To stop one node within the cluster, follow the instructions below: - -1. In the AWS console, go to **Services** > **EC2** > **Auto Scaling Groups** and open the necessary group to which belongs the node you want to stop. -1. Click **Edit** (in case you have New EC2 experience UI mode enabled, press `Edit` on `Advanced configuration`) and add `HealthCheck` to the `Suspended Processes`. Amazon EC2 Auto Scaling stops marking instances unhealthy as a result of EC2 and Elastic Load Balancing health checks. -1. Go to EC2 **Instances**, select instance, click **Instance state** > **Stop instance**. - -To return node into a working state follow the instructions: - -1. Go to EC2 **Instances**, select instance, click **Instance state** > **Start instance**, wait a few minutes for node to become available. -1. Go to EC2 **Auto Scaling Groups** and open the necessary group to which belongs the node you want to start. -1. Press **Edit** (in case you have New EC2 experience UI mode enabled, press `Edit` on `Advanced configuration`) and remove `HealthCheck` from `Suspended Processes` of Auto Scaling Group. - -#### Stop database - -To reduce AWS infrastructure costs database could be stopped when the cluster is standing idle. -Keep in mind that database would be **automatically started** in **7** days. - -To stop database: - -1. In the AWS console, go to **Services** > **RDS** > **Databases**. -1. Select cluster database. -1. Click on **Actions** > **Stop**. - -To start database: - -1. In the AWS console, go to **Services** > **RDS** > **Databases**. -1. Select cluster database. -1. Click on **Actions** > **Start**. - -#### Quick Start parameters - -All important parameters are listed and described in this section. For all other remaining parameters, we recommend using the Quick Start defaults. - -**Jira setup** - -| Parameter | Recommended Value | -| --------- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| Jira Product | ServiceManagement | -| Version | The Data Center App Performance Toolkit officially supports `4.20.26`, `5.4.10` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | - -**Cluster nodes** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| Cluster node instance type | [m5.2xlarge](https://aws.amazon.com/ec2/instance-types/m5/) (This differs from our [public recommendation on c4.8xlarge](https://confluence.atlassian.com/enterprise/infrastructure-recommendations-for-enterprise-jira-instances-on-aws-969532459.html) for production instances but is representative for a lot of our Jira Service Management Data Center customers. The Data Center App Performance Toolkit framework is set up for concurrency we expect on this instance size. As such, underprovisioning will likely show a larger performance impact than expected.)| -| Maximum number of cluster nodes | 1 | -| Minimum number of cluster nodes | 1 | -| Cluster node instance volume size | 100 | - -**Database** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| The database engine to deploy with | PostgresSQL | -| The database engine version to use | 11 | -| Database instance class | [db.m5.xlarge](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html#Concepts.DBInstanceClass.Summary) | -| RDS Provisioned IOPS | 1000 | -| Master (admin) password | Password1! | -| Enable RDS Multi-AZ deployment | false | -| Application user database password | Password1! | -| Database storage | 200 | - -{{% note %}} -The **Master (admin) password** will be used later when restoring the SQL database dataset. If password value is not set to default, you'll need to change `DB_PASS` value manually in the restore database dump script (later in [Preloading your Jira Service Management deployment with an enterprise-scale dataset](#preloading)). -{{% /note %}} - -**Networking (for new ASI)** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| Trusted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Availability Zones | _Select two availability zones in your region_ | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Make instance internet facing | true | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -**Networking (for existing ASI)** - -| Parameter | Recommended Value | -| --------- | ----------------- | -| Make instance internet facing | true | -| Permitted IP range | 0.0.0.0/0 _(for public access) or your own trusted IP range_ | -| Key Name | _The EC2 Key Pair to allow SSH access. See [Amazon EC2 Key Pairs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) for more info._ | - -#### Running the setup wizard - -After successfully deploying Jira Service Management Data Center in AWS, you'll need to configure it: - -1. In the AWS console, go to **Services** > **CloudFormation** > **Stack** > **Stack details** > **Select your stack**. -1. On the **Outputs** tab, copy the value of the **LoadBalancerURL** key. -1. Open **LoadBalancerURL** in your browser. This will take you to the Jira Service Management setup wizard. -1. On the **Set up application properties** page, populate the following fields: - - **Application Title**: any name for your Jira Service Management Data Center deployment - - **Mode**: Private - - **Base URL**: your stack's Elastic LoadBalancer URL - Click **Next**. -1. On the next page, populate the **Your License Key** field by either: - - Using your existing license, or - - Generating a Jira Service Management trial license, or - - Contacting Atlassian to be provided two time-bomb licenses for testing. Ask for it in your ECOHELP ticket. - Click **Next**. -1. On the **Set up administrator account** page, populate the following fields: - - **Full name**: any full name of the admin user - - **Email Address**: email address of the admin user - - **Username**: admin _(recommended)_ - - **Password**: admin _(recommended)_ - - **Confirm Password**: admin _(recommended)_ - Click **Next**. -1. On the **Set up email notifications** page, configure your email notifications, and then click **Finish**. -1. On the first page of the welcome setup select **English (United States)** language. Other languages are not supported by the toolkit. -1. After going through the welcome setup, click **Create new project** to create a new project. - ---- - -### 6. Preloading your Jira Service Management deployment with an enterprise-scale dataset - -Data dimensions and values for an enterprise-scale dataset are listed and described in the following table. - -| Data dimensions | Value for an enterprise-scale dataset | -| --------------- | ------------------------------------- | -| Attachments | ~2 000 000 | -| Comments | ~2 000 000 | -| Components | ~1 500 | -| Custom fields | ~400 | -| Organizations | ~300 | -| Requests | ~1 000 000 | -| Projects | 200 | -| Screen schemes | ~500 | -| Screens | ~3000 | -| Users | ~21 000 | -| Workflows | ~700 | -| Insight Schemas | ~ 6 | -| Insight Object types | ~ 50 | -| Insight Schema objects | ~ 1 000 000 | - -{{% note %}} -All the datasets use the standard `admin`/`admin` credentials. -{{% /note %}} - -#### Pre-loading the dataset is a three-step process: - -1. [Importing the main dataset](#importingdataset). To help you out, we provide an enterprise-scale dataset you can import either via the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script or restore from xml backup file. -1. [Restoring attachments](#copyingattachments). We also provide attachments, which you can pre-load via an [upload_attachments.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/upload_attachments.sh) script. -1. [Re-indexing Jira Data Center](#reindexing). For more information, go to [Re-indexing Jira](https://confluence.atlassian.com/adminjiraserver/search-indexing-938847710.html). - -The following subsections explain each step in greater detail. - -#### Importing the main dataset - -You can load this dataset directly into the database (via a [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script), or import it via XML. - -##### Option 1 (recommended): Loading the dataset via populate_db.sh script (~40 min) - - -To populate the database with SQL: - -1. In the AWS console, go to **Services** > **EC2** > **Instances**. -1. On the **Description** tab, do the following: - - Copy the _Public IP_ of the Bastion instance. - - Copy the _Private IP_ of the Jira Service Management node instance. -1. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script and make it executable: - - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/populate_db.sh && chmod +x populate_db.sh - ``` -1. Review the following `Variables section` of the script: - - ``` bash - DB_CONFIG="/var/atlassian/application-data/jira/dbconfig.xml" - JIRA_DB_NAME="jira" - JIRA_DB_USER="postgres" - JIRA_DB_PASS="Password1!" - # JSM section - JIRA_CURRENT_DIR="/opt/atlassian/jira-servicedesk/current" - JIRA_SETENV_FILE="${JIRA_CURRENT_DIR}/bin/setenv.sh" - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-servicedesk.version" - ``` -1. Run the script: - - ``` bash - ./populate_db.sh --jsm 2>&1 | tee -a populate_db.log - ``` - -{{% note %}} -Do not close or interrupt the session. It will take about 40 min to restore SQL database. When SQL restoring is finished, an admin user will have `admin`/`admin` credentials. - -In case of a failure, check the `Variables` section and run the script one more time. -{{% /note %}} - -##### Option 2: Loading the dataset through XML import (~4 hours) - -We recommend that you only use this method if you are having problems with the [populate_db.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/populate_db.sh) script. - -1. In the AWS console, go to **Services** > **EC2** > **Instances**. -1. On the **Description** tab, do the following: - - Copy the _Public IP_ of the Bastion instance. - - Copy the _Private IP_ of the Jira Service Management node instance. -1. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the xml_backup.zip file corresponding to your Jira Service Management version. - - ``` bash - JSM_VERSION=$(sudo su jira -c "cat /media/atl/jira/shared/jira-servicedesk.version") - sudo su jira -c "wget https://centaurus-datasets.s3.amazonaws.com/jsm/${JSM_VERSION}/large/xml_backup.zip -O /media/atl/jira/shared/import/xml_backup.zip" - ``` -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Restore System.** from the menu. -1. Populate the **File name** field with `xml_backup.zip`. -1. Click **Restore** and wait until the import is completed. - -#### Restoring attachments (~2 hours) - -After [Importing the main dataset](#importingdataset), you'll now have to pre-load an enterprise-scale set of attachments. - -{{% note %}} -Populate DB and restore attachments scripts could be run in parallel in separate terminal sessions to save time. -{{% /note %}} - -1. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` - For more information, go to [Connecting your nodes over SSH](https://confluence.atlassian.com/adminjiraserver/administering-jira-data-center-on-aws-938846969.html#AdministeringJiraDataCenteronAWS-ConnectingtoyournodesoverSSH). -1. Download the [upload_attachments.sh](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/jira/upload_attachments.sh) script and make it executable: - - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/upload_attachments.sh && chmod +x upload_attachments.sh - ``` -1. Review the following `Variables section` of the script: - - ``` bash - # JSM version file location - JIRA_VERSION_FILE="/media/atl/jira/shared/jira-servicedesk.version" - ``` -1. Run the script: - - ``` bash - ./upload_attachments.sh --jsm 2>&1 | tee -a upload_attachments.log - ``` - -{{% note %}} -Do not close or interrupt the session. It will take about two hours to upload attachments to Elastic File Storage (EFS). -{{% /note %}} - -#### Re-indexing Jira Service Management Data Center - -For more information, go to [Re-indexing Jira](https://confluence.atlassian.com/adminjiraserver/search-indexing-938847710.html). -{{% note %}} -The re-index time for JSM 4.20.x is about ~30-50 minutes, while for JSM 5.4.x it can take significantly longer at around 110-130 minutes. This increase in re-index time is due to a known issue which affects Jira 5.4.x, and you can find more information about it in this ticket: [Re-Index: JSM 5.4.x](https://jira.atlassian.com/browse/JRASERVER-74787). -{{% /note %}} - - -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -1. Select the **Full re-index** option. -1. Click **Re-Index** and wait until re-indexing is completed. -1. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. -1. Attach the screenshot to your ECOHELP ticket. - -Jira Service Management will be unavailable for some time during the re-indexing process. When finished, the **Acknowledge** button will be available on the re-indexing page. - ---- - -#### Index Recovery (~15 min, only for JSM versions 5.0.x and below. For JSM 5.1.0+ skip this step.) - -1. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -2. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -3. In the **Index Recovery** click **Edit Settings**. -4. Set the recovery index schedule to 5min ahead of the current server time. -5. Wait ~10min until the index snapshot is created. - -Jira Service Management will be unavailable for some time during the index recovery process. - -6. Using SSH, connect to the Jira Service Management node via the Bastion instance: - - For Linux or MacOS run following commands in terminal (for Windows use [Git Bash](https://git-scm.com/downloads) terminal): - - ```bash - ssh-add path_to_your_private_key_pem - export BASTION_IP=bastion_instance_public_ip - export NODE_IP=node_private_ip - export SSH_OPTS1='-o ServerAliveInterval=60' - export SSH_OPTS2='-o ServerAliveCountMax=30' - ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} - ``` -7. Once you're in the node, run command corresponding to your Jira Service Management version: - - - **JSM 5** - ```bash - sudo su -c "du -sh /media/atl/jira/shared/caches/indexesV2/snapshots/IndexSnapshot*" | tail -1 - ``` - **JSM 4** - ```bash - sudo su -c "du -sh /media/atl/jira/shared/export/indexsnapshots/IndexSnapshot*" | tail -1 - ``` - -8. The snapshot size and name will be shown in the console output. - -{{% note %}} -Please note that the snapshot size must be around 2GB or larger. -{{% /note %}} - ---- -{{% note %}} -After [Preloading your Jira Service Management deployment with an enterprise-scale dataset](#preloading), the admin user will have `admin`/`admin` credentials. -It's recommended to change default password from UI account page for security reasons. -{{% /note %}} ---- - -### 7. Setting up an execution environment - -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. - -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -2. Clone the fork locally, then edit the `jsm.yml` configuration file. Set enterprise-scale Jira Service Management Data Center parameters -3. In case your application relays or extends the functionality of **Insight**. Make sure to set `True` next to the `insight` variable. - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} - - ``` yaml - application_hostname: test_jsm_instance.atlassian.com # Jira Service Management DC hostname without protocol and port e.g. test-jsm.atlassian.com or localhost - application_protocol: http # http or https - application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # set to empty for CloudFromation deploymente.g. /jira in case of url like http://localhost:2990/jira - admin_login: admin - admin_password: admin - load_executor: jmeter # jmeter and locust are supported. jmeter by default. - concurrency_agents: 50 # number of concurrent virtual agents for jmeter or locust scenario - concurrency_customers: 150 # number of concurrent virtual customers for jmeter or locust scenario - test_duration: 45m - ramp-up: 3m # time to spin all concurrent users - total_actions_per_hour_agents: 5000 # number of total JMeter/Locust actions per hour - total_actions_per_hour_customers: 15000 # number of total JMeter/Locust actions per hour - insight: False # Set True to enable Insight specific tests - - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP - ``` - -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Connect to the AWS EC2 instance and clone forked repository. - -{{% note %}} -At this stage app-specific actions are not needed yet. Use code from `master` branch with your `jsm.yml` changes. -{{% /note %}} - -You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. - ---- - -### 8. Running the test scenarios from execution environment against enterprise-scale Jira Service Management Data Center - -Using the Data Center App Performance Toolkit for [Performance and scale testing your Data Center app](/platform/marketplace/developing-apps-for-atlassian-data-center-products/) involves two test scenarios: - -- [Performance regression](#testscenario1) -- [Scalability testing](#testscenario2) - -Each scenario will involve multiple test runs. The following subsections explain both in greater detail. - -#### Scenario 1: Performance regression - -This scenario helps to identify basic performance issues without a need to spin up a multi-node Jira Service Management DC. Make sure the app does not have any performance impact when it is not exercised. - -##### Run 1 (~50 min) - -To receive performance baseline results **without** an app installed: - -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` - -1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: - - `results_summary.log`: detailed run summary - - `results.csv`: aggregated .csv file with all actions and timings - - `bzt.log`: logs of the Taurus tool execution - - `jmeter.*`: logs of the JMeter tool execution - - `pytest.*`: logs of Pytest-Selenium execution - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - -##### Run 2 (~50 min + Lucene Index timing test) - -If you are submitting a Jira Service Management app, you are required to conduct a Lucene Index timing test. This involves conducting a foreground re-index on a single-node Data Center deployment (with your app installed) and a dataset that has 1M issues. - -{{% note %}} -The re-index time for JSM 4.20.x is about ~30-50 minutes, while for JSM 5.4.x it can take significantly longer at around 110-130 minutes. This increase in re-index time is due to a known issue which affects Jira 5.4.x, and you can find more information about it in this ticket: [Re-Index: JSM 5.4.x](https://jira.atlassian.com/browse/JRASERVER-74787). -{{% /note %}} - -{{% note %}} -If your Amazon RDS DB instance class is lower than `db.m5.xlarge` it is required to wait ~2 hours after previous reindex finish before starting a new one. -{{% /note %}} - -**Benchmark your re-index time with your app installed:** - -1. Install the app you want to test. -1. Setup app license. -1. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. -1. Select the **Full re-index** option. -1. Click **Re-Index** and wait until re-indexing is completed. -1. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. -1. Attach the screenshot to your ECOHELP ticket. - -**Performance results generation with the app installed:** - -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -##### Generating a performance regression report - -To generate a performance regression report: - -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: - - ``` bash - python csv_chart_generator.py performance_profile.yml - ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. - -#### Scenario 2: Scalability testing - -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. - -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Jira Service Management DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Jira Service Management DC app in a cluster. - - -###### Run 3 (~50 min) - -To receive scalability benchmark results for one-node Jira Service Management DC **with** app-specific actions: - -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -##### Run 4 (~50 min) -{{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. -{{% /note %}} - -To receive scalability benchmark results for two-node Jira Service Management DC **with** app-specific actions: - -1. In the AWS console, go to **CloudFormation** > **Stack details** > **Select your stack**. -2. On the **Update** tab, select **Use current template**, and then click **Next**. -3. Enter `2` in the **Maximum number of cluster nodes** and the **Minimum number of cluster nodes** fields. -4. Click **Next** > **Next** > **Update stack** and wait until stack is updated. - -{{% warning %}} -In case if you got error during update - `BastionPrivIp cannot be updated`. -Please use those steps for a workaround: -1. In the AWS console, go to **EC2** > **Auto Scailng** > **Auto Scaling Groups**. -2. On the **Auto Scaling Groups** page, select **your stack ASG** and click **Edit** -3. Enter `2` in the **Desired capacity**, **Minimum capacity** and **Maximum capacity** fields. -4. Scroll down, click **Update** button and wait until stack is updated. -{{% /warning %}} - -5. Log in as a user with the **Jira System Administrators** [global permission](https://confluence.atlassian.com/adminjiraserver/managing-global-permissions-938847142.html). -6. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Clustering** and check there is expected number of nodes with node status `ACTIVE` and application status `RUNNING`. To make sure that Jira Service Management index successfully synchronized to the second node. - -{{% warning %}} -In case if index synchronization is failed by some reason (e.g. application status is `MAINTENANCE`) follow those steps: - 1. Get back and go through **[Index Recovery steps](#indexrecovery)**. - 2. Proceed to AWS console, go to EC2 > Instances > Select problematic node > Instances state >Terminate instance. - 3. Wait until the new node will be recreated by ASG, the index should be picked up by a new node automatically. -{{% /warning %}} - -7. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -##### Run 5 (~50 min) -{{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. -{{% /note %}} - -To receive scalability benchmark results for four-node Jira Service Management DC with app-specific actions: - -1. Scale your Jira Service Management Data Center deployment to 3 nodes as described in [Run 4](#run4). -1. Check Index is synchronized to the new node #3 the same way as in [Run 4](#run4). -1. Scale your Jira Service Management Data Center deployment to 4 nodes as described in [Run 4](#run4). -1. Check Index is synchronized to the new node #4 the same way as in [Run 4](#run4). -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` - -{{% note %}} -Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. -{{% /note %}} - - -#### Generating a report for scalability scenario - -To generate a scalability report: - -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "1 Node"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "2 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "4 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the activated `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): - ``` bash - python csv_chart_generator.py scale_profile.yml - ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder, you will be able to review action timings on Jira Service Management Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. - -{{% warning %}} -After completing all your tests, delete your Jira Service Management Data Center stacks. -{{% /warning %}} - -#### Attaching testing results to ECOHELP ticket - -{{% warning %}} -Do not forget to attach performance testing results to your ECOHELP ticket. -{{% /warning %}} - -1. Make sure you have two reports folders: one with performance profile and second with scale profile results. - Each folder should have `profile.csv`, `profile.png`, `profile_summary.log` and profile run result archives. Archives - should contain all raw data created during the run: `bzt.log`, selenium/jmeter/locust logs, .csv and .yml files, etc. -2. Attach two reports folders to your ECOHELP ticket. - -## Support -In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 42e8b2a07..f63693e00 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -42,7 +42,7 @@ run the toolkit in an **enterprise-scale environment**. --- {{% note %}} -DCAPT has fully transitioned to Terraform deployment. CloudFormation deployment option will be no longer supported starting from January 2024. +DCAPT has fully transitioned to Terraform deployment. CloudFormation deployment option is no longer supported. {{% /note %}} ### 1. Setting up Jira Service Management Data Center development environment @@ -90,12 +90,12 @@ Below process describes how to install low-tier Jira Service Management DC with 6. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -376,12 +376,12 @@ Below process describes how to install enterprise-scale Jira Service Management 6. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). -7. From local terminal start the installation (~40min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +7. From local terminal (Git Bash for Windows users) start the installation (~40min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -594,12 +594,12 @@ To receive scalability benchmark results for two-node Jira Service Management DC 1. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. -3. From local terminal start scaling (~20 min) (see [note](https://github.com/atlassian/dc-app-performance-toolkit/tree/master/app/util/k8s#note-for-windows-users) for Windows git bash users) +3. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ - -v "$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ - -v "$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ - -v "$PWD/logs:/data-center-terraform/logs" \ + -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ + -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ + -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` 4. Use SSH to connect to execution environment. From 71e22b1d90d8c83ff8ceeb4d8ae3bd17f2075503 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 8 Dec 2023 12:10:18 +0200 Subject: [PATCH 056/152] added debug for subnet delete --- app/util/k8s/terminate_cluster.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 1ea86da8d..a5733c03a 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -339,7 +339,7 @@ def delete_igw(ec2_resource, vpc_id): logging.error(f"Deleting igw failed with error: {e}") -def delete_subnets(ec2_resource, vpc_id): +def delete_subnets(ec2_resource, vpc_id, aws_region): vpc_resource = ec2_resource.Vpc(vpc_id) subnets_all = vpc_resource.subnets.all() subnets = [ec2_resource.Subnet(subnet.id) for subnet in subnets_all] @@ -347,7 +347,18 @@ def delete_subnets(ec2_resource, vpc_id): try: for sub in subnets: logging.info(f"Removing subnet with id: {sub.id}") - sub.delete() + try: + sub.delete() + except botocore.exceptions.ClientError as e: + error_code = e.response['Error']['Code'] + if error_code == 'DependencyViolation': + ec2_client = boto3.client('ec2', region_name=aws_region) + subnet_network_interfaces = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'subnet-id', 'Values': [sub.id]}]) + subnet_network_interface = subnet_network_interfaces.get('NetworkInterfaces', []) + logging.info(subnet_network_interface) + raise SystemExit(f'Could not delete subnet {sub.id}, {e}') + except Boto3Error as e: logging.error(f"Delete of subnet failed with error: {e}") @@ -486,6 +497,7 @@ def terminate_vpc(vpc_name, aws_region=None): return vpc_id = vpc[0].id logging.info(f"Checking RDS for VPC {vpc_name}.") + delete_rds(aws_region, vpc_id) logging.info(f"Checking load balancers for VPC {vpc_name}.") @@ -498,7 +510,7 @@ def terminate_vpc(vpc_name, aws_region=None): delete_igw(ec2_resource, vpc_id) logging.info(f"Checking subnets for VPC {vpc_name}.") - delete_subnets(ec2_resource, vpc_id) + delete_subnets(ec2_resource, vpc_id, aws_region) logging.info(f"Checking route tables for VPC {vpc_name}.") delete_route_tables(ec2_resource, vpc_id) From 0a0764ea7df957e642279b2963eecfe52355c0c3 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 8 Dec 2023 21:49:16 +0200 Subject: [PATCH 057/152] handle load tests assetion for jmeter --- app/jmeter/jira.jmx | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/app/jmeter/jira.jmx b/app/jmeter/jira.jmx index 501247ab1..b2aab5a2d 100644 --- a/app/jmeter/jira.jmx +++ b/app/jmeter/jira.jmx @@ -1,5 +1,5 @@ - + @@ -3061,12 +3061,12 @@ if ( sleep_time > 0 ) { - ["project-key"]="\"${project_key}\""; + .*project-key.*${project_key} Assertion.response_data false - 16 + 2 @@ -6247,12 +6247,12 @@ if ( sleep_time > 0 ) { - "{\"currentViewConfig\":{\"id\":${kanban_board_id},\" + .*currentViewConfig.*id.*${kanban_board_id} Assertion.response_data false - 16 + 2 @@ -7107,12 +7107,12 @@ if ( sleep_time > 0 ) { - "{\"currentViewConfig\":{\"id\":${scrum_board_id},\" + .*currentViewConfig.*id.*${scrum_board_id} Assertion.response_data false - 16 + 2 @@ -7967,12 +7967,12 @@ if ( sleep_time > 0 ) { - "{\"currentViewConfig\":{\"id\":${scrum_board_id},\" + .*currentViewConfig.*id.*${scrum_board_id} Assertion.response_data false - 16 + 2 @@ -9602,7 +9602,7 @@ if ( sleep_time > 0 ) { - + false saveConfig From 65123c383fa9382576cde5c44fdafd81ff230f6c Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 8 Dec 2023 22:00:03 +0200 Subject: [PATCH 058/152] Fix locust script assertion for jira --- app/locustio/jira/http_actions.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/app/locustio/jira/http_actions.py b/app/locustio/jira/http_actions.py index 080ccf112..b485878ef 100644 --- a/app/locustio/jira/http_actions.py +++ b/app/locustio/jira/http_actions.py @@ -350,10 +350,7 @@ def view_project_summary(locust): content = r.content.decode('utf-8') logger.locust_info(f"{params.action_name}. View project {project_key}: {content}") - assert_string = f'["project-key"]="\\"{project_key}\\"' - if not (assert_string in content): - logger.error(f'{params.err_message} {project_key}') - assert assert_string in content, params.err_message + assert re.compile(f'.*project-key.*{project_key}').search(content), params.err_message # 505 /rest/webResources/1.0/resources locust.post('/rest/webResources/1.0/resources', @@ -794,7 +791,8 @@ def kanban_board(locust, board_id): if project_plan: project_plan = project_plan.replace('\\', '') logger.locust_info(f"{params.action_name}: key = {project_key}, id = {project_id}, plan = {project_plan}") - assert f'currentViewConfig\"{{\"id\":{board_id}', 'Could not open board' + + assert re.compile(f'currentViewConfig.*id.*{board_id}').search(content), f'Could not open board with id {board_id}' # 1005 /rest/webResources/1.0/resources locust.post('/rest/webResources/1.0/resources', @@ -874,7 +872,7 @@ def scrum_board(locust, board_id): if project_plan: project_plan = project_plan.replace('\\', '') logger.locust_info(f"{params.action_name}: key = {project_key}, id = {project_id}, plan = {project_plan}") - assert f'currentViewConfig\"{{\"id\":{board_id}', 'Could not open board' + assert re.compile(f'currentViewConfig.*id.*{board_id}').search(content), f'Could not open board with id {board_id}' # 1110 /rest/webResources/1.0/resources locust.post('/rest/webResources/1.0/resources', @@ -967,7 +965,8 @@ def backlog_board(locust, board_id): if project_plan: project_plan = project_plan.replace('\\', '') logger.locust_info(f"{params.action_name}: key = {project_key}, id = {project_id}, plan = {project_plan}") - assert f'currentViewConfig\"{{\"id\":{board_id}', 'Could not open board' + + assert re.compile(f'currentViewConfig.*id.*{board_id}').search(content), f'Could not open board with id {board_id}' # 1210 /rest/webResources/1.0/resources locust.post('/rest/webResources/1.0/resources', From 352d2946bcd622e5b32c5a7b010c4f723df5aa9e Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Sat, 9 Dec 2023 23:50:44 +0100 Subject: [PATCH 059/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 3c3bec259..8ee9042f9 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -106,6 +106,35 @@ ] } ] + }, + { + "version": "9.12.0", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-0994fc7e2430e3e96", + "us-west-2": "snap-07aef4838130717c3", + "us-west-1": "snap-0fd621f93382cf26c", + "us-east-1": "snap-0d3ebf63dade0af4d" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-12-0", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-12-0", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-12-0", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-0" + } + ] + } + ] } ] }, @@ -475,4 +504,4 @@ } ] } -} +} \ No newline at end of file From bd6d9437583ddb51965d95cbbca9a47673e3d2e5 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Mon, 11 Dec 2023 11:34:42 +0100 Subject: [PATCH 060/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 8ee9042f9..b73f3614d 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -133,6 +133,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-0" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-03c8a6b3ce1fe313e", + "us-west-2": "snap-0277faeaf7a08baa8", + "us-west-1": "snap-08cd3c15ea0b782a6", + "us-east-1": "snap-0a1eee5018811941a" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-12-0", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-12-0", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-12-0", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-12-0" + } + ] } ] } From a8fab7ff05e84ca3ce6898be24e472feb8376af6 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Mon, 11 Dec 2023 22:05:22 +0200 Subject: [PATCH 061/152] disable result tree --- app/jmeter/jira.jmx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jmeter/jira.jmx b/app/jmeter/jira.jmx index b2aab5a2d..f04943348 100644 --- a/app/jmeter/jira.jmx +++ b/app/jmeter/jira.jmx @@ -9602,7 +9602,7 @@ if ( sleep_time > 0 ) { - + false saveConfig From b3a2c52017e510487df7b64985382436bbca02a1 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Mon, 11 Dec 2023 23:56:51 +0100 Subject: [PATCH 062/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index b73f3614d..112609789 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -269,6 +269,35 @@ ] } ] + }, + { + "version": "5.12.0", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-04c3a6b9c4b2bf64c", + "us-west-2": "snap-00f9f5eae07afa172", + "us-west-1": "snap-0e5788fc1422bee45", + "us-east-1": "snap-01e1bb89b157d01de" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-12-0", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-12-0", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-12-0", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-0" + } + ] + } + ] } ] }, From 7fa99675a50268272d99ef9d8dc550958080199d Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 12 Dec 2023 22:25:43 +0100 Subject: [PATCH 063/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 112609789..ec5f79af2 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -296,6 +296,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-0" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-0b2cf3b7f74da3fa8", + "us-west-2": "snap-01143a8a415ce534d", + "us-west-1": "snap-06eec09639ee66634", + "us-east-1": "snap-0045671a003a875bf" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-12-0", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-12-0", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-12-0", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-12-0" + } + ] } ] } From b637f1449f73dc4276fbcbcb7c05f19e3317d5bd Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Mon, 18 Dec 2023 12:42:32 +0200 Subject: [PATCH 064/152] fix subnet delete --- app/util/k8s/terminate_cluster.py | 37 ++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 8 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index a5733c03a..820930107 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -348,17 +348,18 @@ def delete_subnets(ec2_resource, vpc_id, aws_region): for sub in subnets: logging.info(f"Removing subnet with id: {sub.id}") try: + ec2_client = boto3.client('ec2', region_name=aws_region) + subnet_network_interfaces = ec2_client.describe_network_interfaces( + Filters=[{'Name': 'subnet-id', 'Values': [sub.id]}]) + subnet_network_interfaces = subnet_network_interfaces.get('NetworkInterfaces', []) + if subnet_network_interfaces: + logging.info(f'VPC {sub.id} has dependency - network interfaces: {subnet_network_interfaces}') + for subnet_network_interface in subnet_network_interfaces: + delete_network_interface(ec2_client, + subnet_network_interface['NetworkInterfaceId']) sub.delete() except botocore.exceptions.ClientError as e: - error_code = e.response['Error']['Code'] - if error_code == 'DependencyViolation': - ec2_client = boto3.client('ec2', region_name=aws_region) - subnet_network_interfaces = ec2_client.describe_network_interfaces( - Filters=[{'Name': 'subnet-id', 'Values': [sub.id]}]) - subnet_network_interface = subnet_network_interfaces.get('NetworkInterfaces', []) - logging.info(subnet_network_interface) raise SystemExit(f'Could not delete subnet {sub.id}, {e}') - except Boto3Error as e: logging.error(f"Delete of subnet failed with error: {e}") @@ -404,6 +405,26 @@ def delete_security_groups(ec2_resource, vpc_id): logging.error(f"Delete of security group failed with error: {e}") +def delete_network_interface(ec2_client, network_interface_id): + timeout = 180 # 3 min + sleep_time = 10 + attempts = timeout // sleep_time + + for attempt in range(1, attempts): + try: + # Attempt to delete the network interface + ec2_client.delete_network_interface(NetworkInterfaceId=network_interface_id) + logging.info(f"Network interface {network_interface_id} deleted successfully.") + return + + except botocore.exceptions.ClientError as e: + if attempt == attempts: + raise e + else: + print(f"Attempt {attempt}: {e}") + sleep(sleep_time) + + def get_vpc_region_by_name(vpc_name): for rgn in REGIONS: ec2_resource = boto3.resource('ec2', region_name=rgn) From f9642fd4585262f52acf4e5683f0c456f5ec03cb Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Mon, 18 Dec 2023 19:22:52 +0200 Subject: [PATCH 065/152] Add new Jsm/Jira version --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index da2d449fe..d5bb5f030 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,10 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.4.10` and `8.20.26` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.4.10` and `9.12.0` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.4.10` and `4.20.26` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.4.10` and `5.12.0` * Supported Confluence versions: * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.1` From e0d9e3e39d1fbdbdea123b4a9e235fe0c413e5dd Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:01:06 +0100 Subject: [PATCH 066/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bamboo.yml --- app/bamboo.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 3d9aded22..22864dae5 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -125,7 +125,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From 2a0300cfb6828f9f2f9605a977078d1f4e3549e1 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:01:07 +0100 Subject: [PATCH 067/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/confluence.yml --- app/confluence.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/confluence.yml b/app/confluence.yml index 54407a1cd..8dc6cba0d 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -118,7 +118,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From d61dc55a5149b9d98a50568cd38f122490d50628 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:01:07 +0100 Subject: [PATCH 068/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/bitbucket.yml --- app/bitbucket.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/bitbucket.yml b/app/bitbucket.yml index d8d11b14d..215271e10 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -91,7 +91,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From be8b666317f1be0415f6873f632fd42792ce0aab Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:01:08 +0100 Subject: [PATCH 069/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jira.yml --- app/jira.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jira.yml b/app/jira.yml index 333a31455..08dd8bc2a 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -119,7 +119,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From dd2333b6a3b84d8c1b7086145f3866cc73c7d941 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 19 Dec 2023 06:01:08 +0100 Subject: [PATCH 070/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/jsm.yml --- app/jsm.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jsm.yml b/app/jsm.yml index f1c9fe1e3..4eaea5eb2 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -171,7 +171,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.71" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml From a0d44218d425d9402ea25b1f1e7a1116b0d4fbab Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 19 Dec 2023 10:56:07 +0200 Subject: [PATCH 071/152] remove snapshots from .json --- app/util/k8s/dcapt-snapshots.json | 53 ------------------------------- 1 file changed, 53 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index ec5f79af2..9dd6e9d78 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -1,59 +1,6 @@ { "jira": { "versions": [ - { - "version": "8.20.26", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-0c0c388d53cd4153b", - "us-west-2": "snap-04c97a366b279c0c8", - "us-west-1": "snap-0c4828c55fb868c85", - "us-east-1": "snap-0ed5739736a819039" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-0592bc60820536611", - "us-west-2": "snap-09c303e2256c39750", - "us-west-1": "snap-009f8618111b0df8a", - "us-east-1": "snap-0e54d81f7913b7519" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-26", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-8-20-26", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-8-20-26", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-8-20-26" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-26", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-8-20-26", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-8-20-26", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-8-20-26" - } - ] - } - ] - }, { "version": "9.4.10", "data": [ From 02df990343e4d177dc428d93a012ea2fa30bcd86 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 19 Dec 2023 10:57:11 +0200 Subject: [PATCH 072/152] remove snapshots from .json --- app/util/k8s/dcapt-snapshots.json | 53 ------------------------------- 1 file changed, 53 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 9dd6e9d78..5f751dd4e 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -111,59 +111,6 @@ }, "jsm": { "versions": [ - { - "version": "4.20.26", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-0f7aa03eea37f3304", - "us-west-2": "snap-0db2080204c6af65b", - "us-west-1": "snap-0e59b22bdf8afc48a", - "us-east-1": "snap-0039e3761f9a41435" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-096d1185a5fee02ea", - "us-west-2": "snap-0e8f9a8451a8a5471", - "us-west-1": "snap-0ef9883afd1b1c4c8", - "us-east-1": "snap-0b53ebf533e7497fc" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-26", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-4-20-26", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-4-20-26", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-4-20-26" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-26", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-4-20-26", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-4-20-26", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-4-20-26" - } - ] - } - ] - }, { "version": "5.4.10", "data": [ From 3780df0e93dfd8fd562f342bc4bb8ea888998e47 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 19 Dec 2023 11:21:16 +0200 Subject: [PATCH 073/152] another order --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d5bb5f030..bd54fe6fe 100644 --- a/README.md +++ b/README.md @@ -5,10 +5,10 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.4.10` and `9.12.0` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.0` and `9.4.10` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.4.10` and `5.12.0` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.0` and `5.4.10` * Supported Confluence versions: * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.1` From 30f627dd3323c721cc664dc315073470ead5da1e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 19 Dec 2023 11:41:55 +0200 Subject: [PATCH 074/152] fix flake8 --- app/util/k8s/terminate_cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 820930107..798eeb0f7 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -359,7 +359,7 @@ def delete_subnets(ec2_resource, vpc_id, aws_region): subnet_network_interface['NetworkInterfaceId']) sub.delete() except botocore.exceptions.ClientError as e: - raise SystemExit(f'Could not delete subnet {sub.id}, {e}') + raise SystemExit(f'Could not delete subnet {sub.id}, {e}') except Boto3Error as e: logging.error(f"Delete of subnet failed with error: {e}") From d657c48cd38584664a5cfed5256558a7d35496d0 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 20 Dec 2023 12:13:52 +0200 Subject: [PATCH 075/152] reporting/add-rep-type-to-rep-name --- app/reports_generation/csv_chart_generator.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/app/reports_generation/csv_chart_generator.py b/app/reports_generation/csv_chart_generator.py index 9319ef736..f61a93594 100644 --- a/app/reports_generation/csv_chart_generator.py +++ b/app/reports_generation/csv_chart_generator.py @@ -6,9 +6,9 @@ def main(): - results_dir = __get_results_dir() - config = config_provider.get_config() + results_dir = __get_results_dir(config) + agg_csv = csv_aggregator.aggregate(config, results_dir) agg, scenario_status = summary_aggregator.aggregate(config, results_dir) chart_generator_config = config_provider.get_chart_generator_config(config, agg_csv) @@ -21,9 +21,9 @@ def main(): judgement.judge(**judgement_kwargs) -def __get_results_dir() -> Path: +def __get_results_dir(config) -> Path: path = (Path(__file__).absolute().parents[1] / "results" / "reports" / - datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) + f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{config['profile']}") path.mkdir(parents=True, exist_ok=True) return path From 7fa831b6a0e4d6b8540c90f38fecb9cfc8481ac6 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Thu, 21 Dec 2023 11:11:03 +0200 Subject: [PATCH 076/152] add product name to report --- app/reports_generation/csv_chart_generator.py | 7 ++++--- .../scripts/summary_aggregator.py | 14 ++++++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/app/reports_generation/csv_chart_generator.py b/app/reports_generation/csv_chart_generator.py index f61a93594..3f5b024e3 100644 --- a/app/reports_generation/csv_chart_generator.py +++ b/app/reports_generation/csv_chart_generator.py @@ -7,7 +7,8 @@ def main(): config = config_provider.get_config() - results_dir = __get_results_dir(config) + product_name = summary_aggregator.__get_product_name(config) + results_dir = __get_results_dir(config, product_name) agg_csv = csv_aggregator.aggregate(config, results_dir) agg, scenario_status = summary_aggregator.aggregate(config, results_dir) @@ -21,9 +22,9 @@ def main(): judgement.judge(**judgement_kwargs) -def __get_results_dir(config) -> Path: +def __get_results_dir(config, product_name) -> Path: path = (Path(__file__).absolute().parents[1] / "results" / "reports" / - f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{config['profile']}") + f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{product_name}_{config['profile']}") path.mkdir(parents=True, exist_ok=True) return path diff --git a/app/reports_generation/scripts/summary_aggregator.py b/app/reports_generation/scripts/summary_aggregator.py index 32f9e1722..3eebd3926 100644 --- a/app/reports_generation/scripts/summary_aggregator.py +++ b/app/reports_generation/scripts/summary_aggregator.py @@ -1,6 +1,7 @@ from pathlib import Path from typing import List +from constants import SUPPORTED_TEST_ATLASSIAN_PRODUCTS from scripts.utils import validate_file_exists, resolve_path, validate_config SUMMARY_FILE_NAME = "results_summary.log" @@ -17,6 +18,19 @@ def __get_summary_files(config: dict) -> List[Path]: return summary_files +def __get_product_name(config): + summary_files = __get_summary_files(config) + for file in summary_files: + with file.open('r') as f: + for line in f: + if "Application" in line: + file_content = line + for product in SUPPORTED_TEST_ATLASSIAN_PRODUCTS: + if product in file_content: + return product + print("WARNING: No product name found in log files.") + + def __get_run_names(config: dict) -> list: run_names = [] for run in config['runs']: From d17189400582e122bee482cfeb3bda78439efe85 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 22 Dec 2023 11:22:05 +0200 Subject: [PATCH 077/152] Fix locust tests for Jira --- app/locustio/common_utils.py | 1 - app/locustio/jira/http_actions.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/app/locustio/common_utils.py b/app/locustio/common_utils.py index 27227d718..5034d361b 100644 --- a/app/locustio/common_utils.py +++ b/app/locustio/common_utils.py @@ -42,7 +42,6 @@ 'Connection': 'keep-alive', 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 'User-Agent': 'xx', - 'Authorization': 'Basic' } NO_TOKEN_HEADERS = { "Accept-Language": "en-US,en;q=0.5", diff --git a/app/locustio/jira/http_actions.py b/app/locustio/jira/http_actions.py index b485878ef..b55e404f6 100644 --- a/app/locustio/jira/http_actions.py +++ b/app/locustio/jira/http_actions.py @@ -165,10 +165,10 @@ def create_issue(locust): @jira_measure('locust_create_issue:open_quick_create') def create_issue_open_quick_create(): raise_if_login_failed(locust) - + print(f'storage: {locust.session_data_storage}') # 200 /secure/QuickCreateIssue!default.jspa?decorator=none - r = locust.post(f'/secure/QuickCreateIssue!default.jspa?', - json={'atl_token': locust.session_data_storage["token"]}, + r = locust.post(f'/secure/QuickCreateIssue!default.jspa', + json={'atlassian.xsrf.token': locust.session_data_storage["token"]}, headers=ADMIN_HEADERS, catch_response=True) content = r.content.decode('utf-8') From fe456671fda394ffadf937e20fa1d2427b95a03d Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 22 Dec 2023 11:23:39 +0200 Subject: [PATCH 078/152] Fix locust tests for Jira --- app/locustio/jira/http_actions.py | 1 - 1 file changed, 1 deletion(-) diff --git a/app/locustio/jira/http_actions.py b/app/locustio/jira/http_actions.py index b55e404f6..9cbd18c50 100644 --- a/app/locustio/jira/http_actions.py +++ b/app/locustio/jira/http_actions.py @@ -165,7 +165,6 @@ def create_issue(locust): @jira_measure('locust_create_issue:open_quick_create') def create_issue_open_quick_create(): raise_if_login_failed(locust) - print(f'storage: {locust.session_data_storage}') # 200 /secure/QuickCreateIssue!default.jspa?decorator=none r = locust.post(f'/secure/QuickCreateIssue!default.jspa', json={'atlassian.xsrf.token': locust.session_data_storage["token"]}, From 17de4d7a61315c2cbdde1def9bfe9b0aed57205e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 22 Dec 2023 11:28:48 +0200 Subject: [PATCH 079/152] Fix locust tests for Jira --- app/locustio/jira/http_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/locustio/jira/http_actions.py b/app/locustio/jira/http_actions.py index 9cbd18c50..9d588b241 100644 --- a/app/locustio/jira/http_actions.py +++ b/app/locustio/jira/http_actions.py @@ -166,7 +166,7 @@ def create_issue(locust): def create_issue_open_quick_create(): raise_if_login_failed(locust) # 200 /secure/QuickCreateIssue!default.jspa?decorator=none - r = locust.post(f'/secure/QuickCreateIssue!default.jspa', + r = locust.post('/secure/QuickCreateIssue!default.jspa', json={'atlassian.xsrf.token': locust.session_data_storage["token"]}, headers=ADMIN_HEADERS, catch_response=True) From ae89fcda8a5bc7fb0e4fcbf0d52f0ecee716aa81 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 24 Dec 2023 06:25:35 +0000 Subject: [PATCH 080/152] Update dependency com.fasterxml.jackson.core:jackson-core to v2.16.1 --- app/util/bamboo/bamboo_dataset_generator/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 766cd6679..70f477a55 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -67,7 +67,7 @@ com.fasterxml.jackson.core jackson-core - 2.15.2 + 2.16.1 com.fasterxml.jackson.core From d3ef17e4efe0bf89a62d100ec9208c412dd3d3dd Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Sun, 24 Dec 2023 06:25:39 +0000 Subject: [PATCH 081/152] Update dependency com.fasterxml.jackson.core:jackson-databind to v2.16.1 --- app/util/bamboo/bamboo_dataset_generator/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 766cd6679..de809807e 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -72,7 +72,7 @@ com.fasterxml.jackson.core jackson-databind - 2.15.2 + 2.16.1 com.jayway.jsonpath From 7cdbd4dd10e7ebfbb9e5e31a9cbe11cfe2fff30b Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 26 Dec 2023 11:20:52 +0200 Subject: [PATCH 082/152] make general selector for both versions --- app/selenium_ui/jsm/pages/customer_selectors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/selenium_ui/jsm/pages/customer_selectors.py b/app/selenium_ui/jsm/pages/customer_selectors.py index d09273023..f2f5142a8 100644 --- a/app/selenium_ui/jsm/pages/customer_selectors.py +++ b/app/selenium_ui/jsm/pages/customer_selectors.py @@ -79,7 +79,7 @@ class RequestSelectors: comment_request_field = OrderedDict({"4.13.0": (By.CSS_SELECTOR, 'textarea#comment-on-request'), "4.15.0": (By.CLASS_NAME, 'ProseMirror')}) add_comment_button = (By.XPATH, "//button[contains(text(),'Add')]") - share_request_button = (By.CSS_SELECTOR, 'a.js-share-request') + share_request_button = (By.XPATH, "//*[self::a or self::span][normalize-space(text())='Share']") share_request_search_field = (By.ID, 's2id_participants') share_request_dropdown = (By.ID, 'select2-drop') share_request_dropdown_results = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li') From 9c2b3c98cbb4fa1f9f85f3aae502bdc2ff6fb929 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 26 Dec 2023 12:16:50 +0100 Subject: [PATCH 083/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 5f751dd4e..2862b583a 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -438,6 +438,35 @@ ] } ] + }, + { + "version": "8.9.8", + "data": [ + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-04b1409ae2afa2d65", + "us-west-2": "snap-0d84a0253f8c204a6", + "us-west-1": "snap-0c4d6529a7fd04179", + "us-east-1": "snap-02a3125029b85438b" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-8" + } + ] + } + ] } ] }, From 59935c4e30d997b4afa92ee27b4fc0a006c5edc0 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Tue, 26 Dec 2023 13:52:02 +0100 Subject: [PATCH 084/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 2862b583a..d35eff105 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -465,6 +465,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-8" } ] + }, + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-06d634d448d684fba", + "us-west-2": "snap-059f151fb3be40498", + "us-west-1": "snap-011826a57a02a31e2", + "us-east-1": "snap-0759f03d54c2138cc" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-9-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-9-8", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-8" + } + ] } ] } From 32d6ff03a7e7bcc8633cc7f0d4a1b2b67bf88065 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 27 Dec 2023 12:52:45 +0100 Subject: [PATCH 085/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index d35eff105..8a9fcff6c 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -491,6 +491,35 @@ ] } ] + }, + { + "version": "7.21.20", + "data": [ + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-01e565f1a0c5d3f2c", + "us-west-2": "snap-0ed9ca01d9c27755c", + "us-west-1": "snap-05fcaa3d397e7e507", + "us-east-1": "snap-046e472e93ae1ad2b" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-20" + } + ] + } + ] } ] }, From 7d302fb8b08ba4d78b044bc7b0e39c2fb4e7c76a Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 27 Dec 2023 14:33:25 +0100 Subject: [PATCH 086/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 8a9fcff6c..186e8b941 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -518,6 +518,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-20" } ] + }, + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-0de936ce723f9582c", + "us-west-2": "snap-0380b8972fb9f088e", + "us-west-1": "snap-0178b8609cb5396ab", + "us-east-1": "snap-02f3a73aef1b80ffe" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-20", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-20", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-20", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-20" + } + ] } ] } From f4db04b8d11eba58e18d545925826824fab9e0e8 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 27 Dec 2023 17:54:33 +0100 Subject: [PATCH 087/152] DCA-2142 remove old snapshots --- README.md | 2 +- app/util/k8s/dcapt-snapshots.json | 106 ------------------------------ 2 files changed, 1 insertion(+), 107 deletions(-) diff --git a/README.md b/README.md index bd54fe6fe..628fa0136 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.1` * Supported Bitbucket Server versions: - * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.21.16` and `8.9.5` + * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.8` and `7.21.20` * Supported Crowd versions: * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.4` diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 186e8b941..9091de3f7 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -333,112 +333,6 @@ }, "bitbucket": { "versions": [ - { - "version": "7.21.16", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-0d4bbe0cf3056c0ee", - "us-west-2": "snap-011517fb12a96d7f6", - "us-west-1": "snap-0a7ef1f68ad32c5a0", - "us-east-1": "snap-0f86be2be1f2f4ddf" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-04351bd6779e3ee76", - "us-west-2": "snap-0bebe8f1c94590199", - "us-west-1": "snap-0ce49affbc7cc1b07", - "us-east-1": "snap-06eb9677c8467e578" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-16", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-16", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-16", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-16" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-16", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-16", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-16", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-16" - } - ] - } - ] - }, - { - "version": "8.9.5", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-0261a9130a9fd7618", - "us-west-2": "snap-0562a2bce19b8da31", - "us-west-1": "snap-0af7263fa69303a6c", - "us-east-1": "snap-0acd7f844242a09ee" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-01806166c1afe8bd5", - "us-west-2": "snap-0b274a971cccb6a27", - "us-west-1": "snap-07a647d51a0b5028f", - "us-east-1": "snap-035b01bc7520af6fb" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-5", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-9-5", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-9-5", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-5" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-5", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-5", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-5", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-5" - } - ] - } - ] - }, { "version": "8.9.8", "data": [ From a9dfed2cda624df09e5d802607a2df9b3c6f8200 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 28 Dec 2023 15:17:39 +0200 Subject: [PATCH 088/152] added tests for jsm 5.12 --- app/selenium_ui/jsm/modules_customers.py | 10 ++++++-- app/selenium_ui/jsm/pages/customer_pages.py | 24 +++++++++++++++++++ .../jsm/pages/customer_selectors.py | 10 +++++++- 3 files changed, 41 insertions(+), 3 deletions(-) diff --git a/app/selenium_ui/jsm/modules_customers.py b/app/selenium_ui/jsm/modules_customers.py index 3efea7a8d..feadc04ab 100644 --- a/app/selenium_ui/jsm/modules_customers.py +++ b/app/selenium_ui/jsm/modules_customers.py @@ -157,12 +157,18 @@ def measure(): @print_timing("selenium_customer_share_request_with_customer:search_for_customer_to_share_with") def sub_measure(): - customer_request.search_for_customer_to_share_with(customer_name='performance_customer') + if webdriver.app_version.base_version.startswith('5.12'): + customer_request.search_for_customer_to_share_with_react_ui(customer_name='performance_customer') + else: + customer_request.search_for_customer_to_share_with(customer_name='performance_customer') sub_measure() @print_timing("selenium_customer_share_request:share_request_with_customer") def sub_measure(): - customer_request.share_request() + if webdriver.app_version.base_version.startswith('5.12'): + customer_request.share_request_react() + else: + customer_request.share_request() sub_measure() measure() diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index f0e4b5b1a..49dfbfb3f 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -1,4 +1,5 @@ import random +import time from datetime import datetime from packaging import version @@ -118,6 +119,26 @@ def comment_request(self): self.get_element(RequestSelectors.add_comment_button).click() self.wait_until_invisible(RequestSelectors.add_comment_button) + def search_for_customer_to_share_with_react_ui(self, customer_name): + self.wait_until_visible(RequestSelectors.share_request_button).click() + self.wait_until_visible(RequestSelectors.share_request_search_field_react) + self.action_chains().move_to_element(self.get_element(RequestSelectors.share_request_search_field_react)).\ + send_keys(customer_name).perform() + self.wait_until_visible(RequestSelectors.share_request_dropdown_react) + # Chose random customer to share with + self.wait_until_visible(RequestSelectors.share_request_dropdown_one_elem_react) + + random_customer_name = random.choice([i.text for i in + self.get_elements(RequestSelectors.share_request_dropdown_one_elem_react)]) + + self.action_chains().move_to_element( + self.get_element(RequestSelectors.share_request_search_field_arrow_react)).click().perform() + self.action_chains().move_to_element(self.get_element( + RequestSelectors.share_request_search_field_react)).send_keys( + random_customer_name).perform() + self.wait_until_visible(RequestSelectors.share_request_dropdown_one_elem_react).click() + + def search_for_customer_to_share_with(self, customer_name): if not self.element_exists(RequestSelectors.share_request_button): print(f'Request {self.page_url} does not have Share button') @@ -153,6 +174,9 @@ def share_request(self): self.wait_until_visible(RequestSelectors.share_request_modal_button).click() self.wait_until_invisible(RequestSelectors.share_request_modal_button) + def share_request_react(self): + self.wait_until_invisible(RequestSelectors.share_request_dropdown_one_elem_react) + self.wait_until_clickable(RequestSelectors.share_request_button_request_widget).click() class Requests(BasePage): diff --git a/app/selenium_ui/jsm/pages/customer_selectors.py b/app/selenium_ui/jsm/pages/customer_selectors.py index f2f5142a8..07189dd18 100644 --- a/app/selenium_ui/jsm/pages/customer_selectors.py +++ b/app/selenium_ui/jsm/pages/customer_selectors.py @@ -80,15 +80,23 @@ class RequestSelectors: "4.15.0": (By.CLASS_NAME, 'ProseMirror')}) add_comment_button = (By.XPATH, "//button[contains(text(),'Add')]") share_request_button = (By.XPATH, "//*[self::a or self::span][normalize-space(text())='Share']") + share_request_button_request_widget = (By.XPATH, "//button[contains(@data-testid, 'popup-share')]") share_request_search_field = (By.ID, 's2id_participants') share_request_dropdown = (By.ID, 'select2-drop') share_request_dropdown_results = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li') share_request_dropdown_one_elem = (By.CSS_SELECTOR, '#select2-drop>ul.select2-results>li>div>span.user-picker-display-name') - share_request_modal_button = (By.XPATH, "//button[contains(text(),'Share')]") list_of_requests_types = (By.ID, "cv-request-content") + # JSM 5.12 Changes in UI + share_request_search_field_react = (By.XPATH, "//div[starts-with(@id, 'react-select-') and contains(@id, '-placeholder')]") # JSM 5.12+ + share_request_search_field_arrow_react = (By.XPATH, "//div[contains(@class, 'indicatorContainer')]//span[contains(@role, 'img')]") + share_request_dropdown_react = (By.XPATH, "//div[starts-with(@id, 'react-select-') and contains(@id, '-listbox')]") + share_request_dropdown_one_elem_react = (By.XPATH, + "//div[starts-with(@id, 'react-select-') and contains(@id, 'option')]/div/div[2]/div[1]/span") + share_request_widget_react = (By.ID, 'cp-request-share-root') + class InsightSelectors: insight_field_icon = (By.CLASS_NAME, "js-rlabs-sd-customfield-object-picker") From cb92d44dbbf7149b6c933b452af0f64d3f5d9c67 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 28 Dec 2023 23:43:52 +0200 Subject: [PATCH 089/152] debug --- app/selenium_ui/jsm/modules_customers.py | 1 + app/selenium_ui/jsm/pages/customer_pages.py | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/app/selenium_ui/jsm/modules_customers.py b/app/selenium_ui/jsm/modules_customers.py index feadc04ab..2580ac5a7 100644 --- a/app/selenium_ui/jsm/modules_customers.py +++ b/app/selenium_ui/jsm/modules_customers.py @@ -150,6 +150,7 @@ def share_request_with_customer(webdriver, datasets): customer_request = CustomerRequest(webdriver, portal_id=datasets['customer_service_desk_id'], request_key=datasets['customer_request_key']) customer_request.go_to() + customer_request.if_error_message(datasets) customer_request.wait_for_page_loaded() @print_timing("selenium_customer_share_request_with_customer") diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index 49dfbfb3f..850c753f2 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -178,6 +178,12 @@ def share_request_react(self): self.wait_until_invisible(RequestSelectors.share_request_dropdown_one_elem_react) self.wait_until_clickable(RequestSelectors.share_request_button_request_widget).click() + def if_error_message(self, dataset): + from selenium.webdriver.common.by import By + if self.element_exists((By.CSS_SELECTOR, "p.cp-error-panel-message")): + print(self.get_element((By.CSS_SELECTOR, "p.cp-error-panel-message")).text) + print(f'Customer {dataset["customer_username"]} does not have access to {dataset["customer_service_desk_id"]}, {dataset["customer_service_key"]}') + class Requests(BasePage): def __init__(self, driver, all_requests=False): From 37d7575751e615124efd1544275fc2dac8cbe1c5 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 28 Dec 2023 23:55:07 +0200 Subject: [PATCH 090/152] added wait --- app/selenium_ui/jsm/pages/customer_pages.py | 1 + 1 file changed, 1 insertion(+) diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index 850c753f2..d9fafaab5 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -133,6 +133,7 @@ def search_for_customer_to_share_with_react_ui(self, customer_name): self.action_chains().move_to_element( self.get_element(RequestSelectors.share_request_search_field_arrow_react)).click().perform() + self.wait_until_invisible(RequestSelectors.share_request_dropdown_react) self.action_chains().move_to_element(self.get_element( RequestSelectors.share_request_search_field_react)).send_keys( random_customer_name).perform() From a6f7464eb8c72f23175a63da2c2c2425a99175bd Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Thu, 28 Dec 2023 23:52:49 +0100 Subject: [PATCH 091/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 9091de3f7..54f828705 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -328,6 +328,36 @@ } ], "build_number": "8804" + }, + { + "version": "8.5.4", + "data": [ + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-021574360a781464f", + "us-west-2": "snap-0684fba1bfe25da2e", + "us-west-1": "snap-0cfc6520219671dd1", + "us-east-1": "snap-00d1f3a18d176ceca" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-5-4", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-8-5-4", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-8-5-4", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-8-5-4" + } + ] + } + ], + "build_number": "9012" } ] }, From df4b6b1619b5f53c5653eeecc01c5d1f63bfcf09 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Fri, 29 Dec 2023 04:13:26 +0100 Subject: [PATCH 092/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 9091de3f7..9fdf00d55 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -328,6 +328,36 @@ } ], "build_number": "8804" + }, + { + "version": "8.5.4", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-09802dd4106f2686a", + "us-west-2": "snap-0c063b8fc243497b8", + "us-west-1": "snap-05513a2b127a63d6c", + "us-east-1": "snap-08d42b48214eaf3bf" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-5-4", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-8-5-4", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-8-5-4", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-8-5-4" + } + ] + } + ], + "build_number": "9012" } ] }, From db2eeb340deb4f731df0cb4fe73797aa5e36da0d Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 11:30:33 +0200 Subject: [PATCH 093/152] added locator to error message --- app/selenium_ui/conftest.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index 60839ce1a..6a66c02dd 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -145,7 +145,10 @@ def wrapper(*args, **kwargs): success = False # https://docs.python.org/2/library/sys.html#sys.exc_info exc_type, full_exception = sys.exc_info()[:2] - error_msg = f"Failed measure: {interaction} - {exc_type.__name__}" + locator_debug_message = "" + if 'Locator' in full_exception.msg: + locator_debug_message = f" - {full_exception.msg.split('Locator:')[-1].strip()}" + error_msg = f"Failed measure: {interaction} - {exc_type.__name__}{locator_debug_message}" end = time() timing = str(int((end - start) * 1000)) From cb759427c30ca2e1a41ccf2d8e6f79d97a376dfd Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 13:03:32 +0200 Subject: [PATCH 094/152] fix msg --- app/selenium_ui/conftest.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index 6a66c02dd..39c0c1a55 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -146,8 +146,9 @@ def wrapper(*args, **kwargs): # https://docs.python.org/2/library/sys.html#sys.exc_info exc_type, full_exception = sys.exc_info()[:2] locator_debug_message = "" - if 'Locator' in full_exception.msg: - locator_debug_message = f" - {full_exception.msg.split('Locator:')[-1].strip()}" + if 'msg' in dir(full_exception): + if 'Locator' in full_exception.msg: + locator_debug_message = f" - {full_exception.msg.split('Locator:')[-1].strip()}" error_msg = f"Failed measure: {interaction} - {exc_type.__name__}{locator_debug_message}" end = time() timing = str(int((end - start) * 1000)) From 60363e06c8d5f1048d05d91d8d1eba29a4af4af9 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Fri, 29 Dec 2023 12:12:03 +0100 Subject: [PATCH 095/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 634ab765f..520803907 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -106,6 +106,35 @@ ] } ] + }, + { + "version": "9.4.14", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-084abf5dfca234b9d", + "us-west-2": "snap-0dab6fba99c0995b9", + "us-west-1": "snap-0061ace6d46497f56", + "us-east-1": "snap-0934c1aa5c62be5dc" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-14", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-14" + } + ] + } + ] } ] }, From aa8130d0ae84ae5ac4f7ab475fabec80d3f84794 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 14:29:37 +0200 Subject: [PATCH 096/152] fix comma --- app/selenium_ui/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index 39c0c1a55..e709fa6a5 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -148,7 +148,7 @@ def wrapper(*args, **kwargs): locator_debug_message = "" if 'msg' in dir(full_exception): if 'Locator' in full_exception.msg: - locator_debug_message = f" - {full_exception.msg.split('Locator:')[-1].strip()}" + locator_debug_message = f" - {full_exception.msg.split('Locator:')[-1].strip().replace(',','')}" error_msg = f"Failed measure: {interaction} - {exc_type.__name__}{locator_debug_message}" end = time() timing = str(int((end - start) * 1000)) From 3dce7c61fb43901fce7f523073336f3291a23286 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Fri, 29 Dec 2023 13:43:31 +0100 Subject: [PATCH 097/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 520803907..cb77d5202 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -133,6 +133,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-14" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-00af725c87690569d", + "us-west-2": "snap-06e76435bf1cba625", + "us-west-1": "snap-098b6daa64c6f0a28", + "us-east-1": "snap-04417460cb27d17cb" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-14", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-14" + } + ] } ] } From 1134de8531944ade4f96a858eb4fdc3878c2aa5d Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 17:56:15 +0200 Subject: [PATCH 098/152] added 7197 --- app/util/k8s/dcapt-snapshots.json | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index cb77d5202..6b7c762d9 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -435,6 +435,36 @@ } ], "build_number": "9012" + }, + { + "version": "7.19.17", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-0b8723cc5a8f8becc", + "us-west-2": "snap-05f02fd97ba755034", + "us-west-1": "snap-065efd48cb9072b1d", + "us-east-1": "snap-030f0bb7870b60c73" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-17", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-17", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-17", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-17" + } + ] + } + ], + "build_number": "8804" } ] }, From eee9fd15d2bdf8f09c250953f0ab2e7243968653 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 18:10:08 +0200 Subject: [PATCH 099/152] remove 8.5.1 Confluence --- README.md | 2 +- app/util/k8s/dcapt-small.tfvars | 2 +- app/util/k8s/dcapt-snapshots.json | 54 ------------------------------- app/util/k8s/dcapt.tfvars | 2 +- 4 files changed, 3 insertions(+), 57 deletions(-) diff --git a/README.md b/README.md index 628fa0136..865f30af5 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.0` and `5.4.10` * Supported Confluence versions: - * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.1` + * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.4` * Supported Bitbucket Server versions: * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.8` and `7.21.20` diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 30fa95084..87ae0c6dd 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -155,7 +155,7 @@ jira_db_master_password = "Password1!" ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "8.5.1" +confluence_version_tag = "8.5.4" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large confluence_dataset_size = "small" diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 6b7c762d9..e5b57d26b 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -274,60 +274,6 @@ }, "confluence": { "versions": [ - { - "version": "8.5.1", - "build_number": "9012", - "data": [ - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-8-5-1", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-5-1", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-8-5-1", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-8-5-1" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-8-5-1", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-5-1", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-8-5-1", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-8-5-1" - } - ] - }, - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-1": "snap-099a9ce9e9c902815", - "us-east-2": "snap-074a2fdca0497b6b6", - "us-west-1": "snap-01b07710d49b113b2", - "us-west-2": "snap-031dad82fa7367921" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-1": "snap-09d97f19261bd463e", - "us-east-2": "snap-008cc496f440198de", - "us-west-1": "snap-02b3a229b530c8a87", - "us-west-2": "snap-01d8ebf9701613c4c" - } - ] - } - ] - }, { "version": "7.19.14", "data": [ diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 35f6bd466..82d3ef909 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -166,7 +166,7 @@ jira_db_master_password = "Password1!" ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "8.5.1" +confluence_version_tag = "8.5.4" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large confluence_dataset_size = "large" From 2c3af972d4e09f5a3dc9642ef59d05a08d3c7506 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 22:10:23 +0200 Subject: [PATCH 100/152] new confluence 7.19.17 small dataset --- app/util/k8s/dcapt-snapshots.json | 78 ++++++++++--------------------- 1 file changed, 24 insertions(+), 54 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index e5b57d26b..e3a2ca03e 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -274,60 +274,6 @@ }, "confluence": { "versions": [ - { - "version": "7.19.14", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-00f5e8147604a017e", - "us-west-2": "snap-0e8cda8b822c13483", - "us-west-1": "snap-00ec3df2af108617f", - "us-east-1": "snap-02132c2d6b67b6a87" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-14", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-14", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-14", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-14" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-0a175c4fd76039985", - "us-west-2": "snap-09b197387bd1b013f", - "us-west-1": "snap-0f50de55b7ebc1a2f", - "us-east-1": "snap-002beb3b098f557f2" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-14", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-14", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-14", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-14" - } - ] - } - ], - "build_number": "8804" - }, { "version": "8.5.4", "data": [ @@ -408,6 +354,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-17" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-062952d964320477f", + "us-west-2": "snap-0dec73af33bec28f5", + "us-west-1": "snap-07d8242990b92dd0c", + "us-east-1": "snap-0720b5df2ed27b435" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-17", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-17", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-17", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-17" + } + ] } ], "build_number": "8804" From 5a8d39319bd6677e8efe0c2168605047acce7a55 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 29 Dec 2023 22:19:29 +0200 Subject: [PATCH 101/152] remove 9.4.10 jira --- app/util/k8s/dcapt-snapshots.json | 53 ------------------------------- 1 file changed, 53 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index e3a2ca03e..873dc8c4f 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -1,59 +1,6 @@ { "jira": { "versions": [ - { - "version": "9.4.10", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-084e99e384dcfbe31", - "us-west-2": "snap-08202454ed728b840", - "us-west-1": "snap-05ec9d6216ead357c", - "us-east-1": "snap-00b18b7028faaefb5" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-019fd367ec397b1f3", - "us-west-2": "snap-04b680cf28fd7a8c2", - "us-west-1": "snap-00d008f66bb5e7f35", - "us-east-1": "snap-0d9855f9597b68de9" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-10", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-10", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-10", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-10" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-10", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-10", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-10", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-10" - } - ] - } - ] - }, { "version": "9.12.0", "data": [ From a307a56357c90eca0653d9a3f19f54acd88a41a3 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Fri, 29 Dec 2023 22:28:39 +0100 Subject: [PATCH 102/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 873dc8c4f..9e6d1fca4 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -106,6 +106,35 @@ ] } ] + }, + { + "version": "9.12.1", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-0e413a90c64812130", + "us-west-2": "snap-0149ccaaeb547726a", + "us-west-1": "snap-02c808bcecdac15b3", + "us-east-1": "snap-0dedc16a22652e0f1" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-12-1", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-12-1", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-12-1", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-1" + } + ] + } + ] } ] }, From 8f0d6b1c7753ab842b462695c2e05992bc7052ec Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sat, 30 Dec 2023 14:25:30 +0200 Subject: [PATCH 103/152] bump jira version 9.12.1 --- README.md | 2 +- app/util/k8s/dcapt-snapshots.json | 77 ++++++++++--------------------- 2 files changed, 25 insertions(+), 54 deletions(-) diff --git a/README.md b/README.md index 865f30af5..1025cd740 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.0` and `9.4.10` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.1` and `9.4.10` * Supported Jira Service Management versions: * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.0` and `5.4.10` diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 9e6d1fca4..19aca2b79 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -1,59 +1,6 @@ { "jira": { "versions": [ - { - "version": "9.12.0", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-0994fc7e2430e3e96", - "us-west-2": "snap-07aef4838130717c3", - "us-west-1": "snap-0fd621f93382cf26c", - "us-east-1": "snap-0d3ebf63dade0af4d" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-12-0", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-12-0", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-12-0", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-0" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-03c8a6b3ce1fe313e", - "us-west-2": "snap-0277faeaf7a08baa8", - "us-west-1": "snap-08cd3c15ea0b782a6", - "us-east-1": "snap-0a1eee5018811941a" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-12-0", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-12-0", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-12-0", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-12-0" - } - ] - } - ] - }, { "version": "9.4.14", "data": [ @@ -133,6 +80,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-1" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-0a7b4d27c09013274", + "us-west-2": "snap-0cd172e21b743dae6", + "us-west-1": "snap-0d6437e1830772993", + "us-east-1": "snap-07d85f52da6a564ed" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-12-1", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-12-1", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-12-1", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-12-1" + } + ] } ] } From a2bcaf913f19c6b811f2082f9e7710058e2d49dd Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Sat, 30 Dec 2023 14:36:28 +0100 Subject: [PATCH 104/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 19aca2b79..6f09fd78c 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -216,6 +216,35 @@ ] } ] + }, + { + "version": "5.4.14", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-02757b69de7aeb3f8", + "us-west-2": "snap-00886aef7ae2c209b", + "us-west-1": "snap-0a120110a512b97fb", + "us-east-1": "snap-0be0df5470e3a312d" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-14", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-14" + } + ] + } + ] } ] }, From e61898017b4076d166bbf92e54e4376d84b5c06d Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sat, 30 Dec 2023 17:28:57 +0200 Subject: [PATCH 105/152] added EC for debug --- app/selenium_ui/base_page.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/selenium_ui/base_page.py b/app/selenium_ui/base_page.py index 6392eabcc..4bb1210fd 100644 --- a/app/selenium_ui/base_page.py +++ b/app/selenium_ui/base_page.py @@ -119,15 +119,15 @@ def __wait_until(self, expected_condition, locator, time_out=timeout): elif ec_type == ec.invisibility_of_element_located: message += (f"Timed out after {time_out} sec waiting for {str(expected_condition)}. \n" - f"Locator: {locator}") + f"Locator: {locator}{str(expected_condition)}") elif ec_type == ec.frame_to_be_available_and_switch_to_it: message += (f"Timed out after {time_out} sec waiting for {str(expected_condition)}. \n" - f"Locator: {locator}") + f"Locator: {locator}{str(expected_condition)}") else: message += (f"Timed out after {time_out} sec waiting for {str(expected_condition)}. \n" - f"Locator: {locator}") + f"Locator: {locator}{str(expected_condition)}") return WebDriverWait(self.driver, time_out).until(expected_condition, message=message) From 4a0d54a3666f6b36b9c2c8f6f2e9a1a2725423a2 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Sat, 30 Dec 2023 17:19:51 +0100 Subject: [PATCH 106/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 6f09fd78c..2ce58fff2 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -243,6 +243,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-14" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-007de06d38fcd95c6", + "us-west-2": "snap-0edcc47a82eccdbd5", + "us-west-1": "snap-0cb31741d2aa37fde", + "us-east-1": "snap-075e397f427e6d6c8" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-14", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-14" + } + ] } ] } From b825a4421d21306813885a8aee4e8866206ba82e Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sat, 30 Dec 2023 22:01:10 +0200 Subject: [PATCH 107/152] check debug --- app/selenium_ui/jsm/pages/customer_pages.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index d9fafaab5..d0866d9b2 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -124,8 +124,7 @@ def search_for_customer_to_share_with_react_ui(self, customer_name): self.wait_until_visible(RequestSelectors.share_request_search_field_react) self.action_chains().move_to_element(self.get_element(RequestSelectors.share_request_search_field_react)).\ send_keys(customer_name).perform() - self.wait_until_visible(RequestSelectors.share_request_dropdown_react) - # Chose random customer to share with + #self.wait_until_visible(RequestSelectors.share_request_dropdown_react) self.wait_until_visible(RequestSelectors.share_request_dropdown_one_elem_react) random_customer_name = random.choice([i.text for i in From 5cbc2821ec18c33af57917552815da1e667fb9d1 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sun, 31 Dec 2023 14:23:58 +0200 Subject: [PATCH 108/152] fix actions chains --- app/selenium_ui/jsm/pages/customer_pages.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index d0866d9b2..51a3f30ea 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -122,6 +122,8 @@ def comment_request(self): def search_for_customer_to_share_with_react_ui(self, customer_name): self.wait_until_visible(RequestSelectors.share_request_button).click() self.wait_until_visible(RequestSelectors.share_request_search_field_react) + self.action_chains().move_to_element( + self.get_element(RequestSelectors.share_request_search_field_react)).click().perform() self.action_chains().move_to_element(self.get_element(RequestSelectors.share_request_search_field_react)).\ send_keys(customer_name).perform() #self.wait_until_visible(RequestSelectors.share_request_dropdown_react) From 6c2c844f988c30d64495a3114dca86ee1586ad2f Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sun, 31 Dec 2023 14:26:07 +0200 Subject: [PATCH 109/152] remove 5410 jsm --- app/util/k8s/dcapt-snapshots.json | 53 ------------------------------- 1 file changed, 53 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 2ce58fff2..ad7039375 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -111,59 +111,6 @@ }, "jsm": { "versions": [ - { - "version": "5.4.10", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-0381cc00e37231565", - "us-west-2": "snap-0b1fd6df7f4edf2cb", - "us-west-1": "snap-0d50a4c16236384a8", - "us-east-1": "snap-08622b061a708cca0" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-0e340e980918e45f6", - "us-west-2": "snap-0f717ad41f9234b17", - "us-west-1": "snap-0040e1af99841ae15", - "us-east-1": "snap-0052087e82b1cfe82" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-10", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-10", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-10", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-10" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-10", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-10", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-10", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-10" - } - ] - } - ] - }, { "version": "5.12.0", "data": [ From 6014ad00c0204519828fecb49099d1bb5ea0821c Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sun, 31 Dec 2023 22:11:03 +0200 Subject: [PATCH 110/152] remove time import and debug functions --- app/selenium_ui/jsm/modules_customers.py | 1 - app/selenium_ui/jsm/pages/customer_pages.py | 6 ------ 2 files changed, 7 deletions(-) diff --git a/app/selenium_ui/jsm/modules_customers.py b/app/selenium_ui/jsm/modules_customers.py index 2580ac5a7..feadc04ab 100644 --- a/app/selenium_ui/jsm/modules_customers.py +++ b/app/selenium_ui/jsm/modules_customers.py @@ -150,7 +150,6 @@ def share_request_with_customer(webdriver, datasets): customer_request = CustomerRequest(webdriver, portal_id=datasets['customer_service_desk_id'], request_key=datasets['customer_request_key']) customer_request.go_to() - customer_request.if_error_message(datasets) customer_request.wait_for_page_loaded() @print_timing("selenium_customer_share_request_with_customer") diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index 51a3f30ea..3776c8dbb 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -1,5 +1,4 @@ import random -import time from datetime import datetime from packaging import version @@ -180,11 +179,6 @@ def share_request_react(self): self.wait_until_invisible(RequestSelectors.share_request_dropdown_one_elem_react) self.wait_until_clickable(RequestSelectors.share_request_button_request_widget).click() - def if_error_message(self, dataset): - from selenium.webdriver.common.by import By - if self.element_exists((By.CSS_SELECTOR, "p.cp-error-panel-message")): - print(self.get_element((By.CSS_SELECTOR, "p.cp-error-panel-message")).text) - print(f'Customer {dataset["customer_username"]} does not have access to {dataset["customer_service_desk_id"]}, {dataset["customer_service_key"]}') class Requests(BasePage): From 5e63a302ca1383f38c0abcc8a807967c2908bfbf Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sun, 31 Dec 2023 22:17:03 +0200 Subject: [PATCH 111/152] fix comments --- app/selenium_ui/jsm/pages/customer_pages.py | 5 ++--- app/selenium_ui/jsm/pages/customer_selectors.py | 6 ++++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index 3776c8dbb..faf9806dc 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -125,11 +125,10 @@ def search_for_customer_to_share_with_react_ui(self, customer_name): self.get_element(RequestSelectors.share_request_search_field_react)).click().perform() self.action_chains().move_to_element(self.get_element(RequestSelectors.share_request_search_field_react)).\ send_keys(customer_name).perform() - #self.wait_until_visible(RequestSelectors.share_request_dropdown_react) self.wait_until_visible(RequestSelectors.share_request_dropdown_one_elem_react) - random_customer_name = random.choice([i.text for i in - self.get_elements(RequestSelectors.share_request_dropdown_one_elem_react)]) + random_customer_name = random.choice( + [i.text for i in self.get_elements(RequestSelectors.share_request_dropdown_one_elem_react)]) self.action_chains().move_to_element( self.get_element(RequestSelectors.share_request_search_field_arrow_react)).click().perform() diff --git a/app/selenium_ui/jsm/pages/customer_selectors.py b/app/selenium_ui/jsm/pages/customer_selectors.py index 07189dd18..ae94d67d1 100644 --- a/app/selenium_ui/jsm/pages/customer_selectors.py +++ b/app/selenium_ui/jsm/pages/customer_selectors.py @@ -90,8 +90,10 @@ class RequestSelectors: list_of_requests_types = (By.ID, "cv-request-content") # JSM 5.12 Changes in UI - share_request_search_field_react = (By.XPATH, "//div[starts-with(@id, 'react-select-') and contains(@id, '-placeholder')]") # JSM 5.12+ - share_request_search_field_arrow_react = (By.XPATH, "//div[contains(@class, 'indicatorContainer')]//span[contains(@role, 'img')]") + share_request_search_field_react = (By.XPATH, + "//div[starts-with(@id, 'react-select-') and contains(@id, '-placeholder')]") + share_request_search_field_arrow_react = (By.XPATH, + "//div[contains(@class, 'indicatorContainer')]//span[contains(@role, 'img')]") share_request_dropdown_react = (By.XPATH, "//div[starts-with(@id, 'react-select-') and contains(@id, '-listbox')]") share_request_dropdown_one_elem_react = (By.XPATH, "//div[starts-with(@id, 'react-select-') and contains(@id, 'option')]/div/div[2]/div[1]/span") From 80e8f6d85cf172775ec206fb7c1f65b8eb8abc0b Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 2 Jan 2024 15:18:07 +0200 Subject: [PATCH 112/152] fix version comparison --- app/selenium_ui/jsm/modules_customers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/app/selenium_ui/jsm/modules_customers.py b/app/selenium_ui/jsm/modules_customers.py index feadc04ab..756e6d422 100644 --- a/app/selenium_ui/jsm/modules_customers.py +++ b/app/selenium_ui/jsm/modules_customers.py @@ -2,6 +2,8 @@ from selenium_ui.jsm.pages.customer_pages import Login, TopPanel, CustomerPortals, CustomerPortal, CustomerRequest, \ Requests, ViewRequestWithInsight import random +from packaging import version + REQUESTS = "requests" CUSTOMERS = "customers" @@ -157,7 +159,7 @@ def measure(): @print_timing("selenium_customer_share_request_with_customer:search_for_customer_to_share_with") def sub_measure(): - if webdriver.app_version.base_version.startswith('5.12'): + if webdriver.app_version >= version.parse('5.12'): customer_request.search_for_customer_to_share_with_react_ui(customer_name='performance_customer') else: customer_request.search_for_customer_to_share_with(customer_name='performance_customer') @@ -165,7 +167,7 @@ def sub_measure(): @print_timing("selenium_customer_share_request:share_request_with_customer") def sub_measure(): - if webdriver.app_version.base_version.startswith('5.12'): + if webdriver.app_version >= version.parse('5.12'): customer_request.share_request_react() else: customer_request.share_request() From b6efcac3fb1509eb0a1835afd9d8a757af007e9b Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Tue, 2 Jan 2024 22:27:41 +0200 Subject: [PATCH 113/152] fix flake8 --- app/util/k8s/terminate_cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 798eeb0f7..559fa796b 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -668,7 +668,7 @@ def terminate_open_id_providers(cluster_name=None): iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) return if name == 'Alfred': - logging.info(f"Skipping Alfred Open ID provider") + logging.info("Skipping Alfred Open ID provider") continue persist_days = next((tag["Value"] for tag in tags if tag["Key"] == "persist_days"), None) if persist_days: @@ -899,7 +899,7 @@ def main(): delete_ebs_volumes_by_id(aws_region=args.aws_region, volumes=volumes) return - logging.info(f"--cluster_name parameter was not specified.") + logging.info("--cluster_name parameter was not specified.") logging.info("Searching for clusters to remove.") clusters = get_clusters_to_terminate() for cluster_name in clusters: From 398894bc5ceb703c8d9eec4122a5455c492aba6d Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Wed, 3 Jan 2024 11:34:14 +0100 Subject: [PATCH 114/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index ad7039375..76dee0830 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -216,6 +216,35 @@ ] } ] + }, + { + "version": "5.12.1", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-011d04a19c6b93529", + "us-west-2": "snap-0beb90b0d0f478697", + "us-west-1": "snap-0ff5fc36c9666dfbe", + "us-east-1": "snap-0edfff503a2605803" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-12-1", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-12-1", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-12-1", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-1" + } + ] + } + ] } ] }, From 87ec512fc7c3f5987c4321f002c50f8ed0ceec53 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 3 Jan 2024 14:46:49 +0200 Subject: [PATCH 115/152] bamboo/bump-9-2-9 --- README.md | 2 +- app/util/bamboo/bamboo_dataset_generator/pom.xml | 2 +- app/util/k8s/dcapt.tfvars | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 1025cd740..c27c3688f 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.4` * Supported Bamboo versions: - * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.5` + * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.9` ## Support In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 766cd6679..46b9342c4 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -5,7 +5,7 @@ com.atlassian.bamboo bamboo-specs-parent - 9.2.5 + 9.2.9 diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 82d3ef909..da05f7679 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -374,8 +374,8 @@ crowd_db_master_password = "Password1!" # By default, latest supported by DCAPT version is set. # https://hub.docker.com/r/atlassian/bamboo/tags # https://hub.docker.com/r/atlassian/bamboo-agent-base/tags -bamboo_version_tag = "9.2.5" -bamboo_agent_version_tag = "9.2.5" +bamboo_version_tag = "9.2.9" +bamboo_agent_version_tag = "9.2.9" # Helm chart version of Bamboo and Bamboo agent instances # bamboo_helm_chart_version = "" From 345068db772cebf7d96a4546d4dc9dab6ebe0dae Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 3 Jan 2024 15:02:37 +0200 Subject: [PATCH 116/152] crowd/bump-5-2-2 --- README.md | 2 +- app/util/k8s/dcapt.tfvars | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 1025cd740..e2c564afe 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.8` and `7.21.20` * Supported Crowd versions: - * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.4` + * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.2.2` * Supported Bamboo versions: * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.5` diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 82d3ef909..5410fec75 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -314,7 +314,7 @@ bitbucket_db_master_password = "Password1!" ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -crowd_version_tag = "5.1.4" +crowd_version_tag = "5.2.2" # Helm chart version of Crowd and Crowd agent instances. By default the latest version is installed. # crowd_helm_chart_version = "" From 6c49ae086bfbb9e31a193b639fd6b7beeb82731d Mon Sep 17 00:00:00 2001 From: Yevhen Ivantsov Date: Fri, 5 Jan 2024 10:09:28 +1100 Subject: [PATCH 117/152] Use retry lib in cleanup script --- app/util/k8s/terminate_cluster.py | 16 ++++++++++++---- requirements.txt | 1 + 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 559fa796b..554c7a822 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -8,6 +8,11 @@ from boto3.exceptions import Boto3Error from botocore import exceptions +from retry import retry + +DEFAULT_RETRY_COUNT = 3 +DEFAULT_RETRY_DELAY = 10 + US_EAST_2 = "us-east-2" US_EAST_1 = "us-east-1" REGIONS = [US_EAST_2, US_EAST_1] @@ -254,7 +259,7 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): except Exception as e: logging.error(f"Unexpected error occurs: {e}") - +@retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_lb(aws_region, vpc_id): elb_client = boto3.client('elb', region_name=aws_region) try: @@ -299,6 +304,8 @@ def wait_for_nat_gateway_delete(ec2, nat_gateway_id): logging.error(f"NAT gateway with id {nat_gateway_id} was not deleted in {timeout} seconds.") + +@retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_nat_gateway(aws_region, vpc_id): ec2_client = boto3.client('ec2', region_name=aws_region) filters = [{'Name': 'vpc-id', 'Values': [f'{vpc_id}', ]}, ] @@ -317,7 +324,7 @@ def delete_nat_gateway(aws_region, vpc_id): except Boto3Error as e: logging.error(f"Deleting NAT gateway with id {nat_gateway_id} failed with error: {e}") - +@retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_igw(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) igws = vpc_resource.internet_gateways.all() @@ -339,6 +346,7 @@ def delete_igw(ec2_resource, vpc_id): logging.error(f"Deleting igw failed with error: {e}") +@retry(Exception, tries=12, delay=DEFAULT_RETRY_DELAY) def delete_subnets(ec2_resource, vpc_id, aws_region): vpc_resource = ec2_resource.Vpc(vpc_id) subnets_all = vpc_resource.subnets.all() @@ -363,7 +371,7 @@ def delete_subnets(ec2_resource, vpc_id, aws_region): except Boto3Error as e: logging.error(f"Delete of subnet failed with error: {e}") - +@retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_route_tables(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) rtbs = vpc_resource.route_tables.all() @@ -379,7 +387,7 @@ def delete_route_tables(ec2_resource, vpc_id): except Boto3Error as e: logging.error(f"Delete of route table failed with error: {e}") - +@retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_security_groups(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) sgps = vpc_resource.security_groups.all() diff --git a/requirements.txt b/requirements.txt index 137b2d299..e77195061 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,3 +10,4 @@ packaging==23.1 prettytable==3.9.0 bzt==1.16.26 boto3==1.28.56 +retry==0.9.2 From 15fc1e96470fd708b813e6b1614548cd1332dc79 Mon Sep 17 00:00:00 2001 From: Yevhen Ivantsov Date: Fri, 5 Jan 2024 10:13:53 +1100 Subject: [PATCH 118/152] Fix linting --- app/util/k8s/terminate_cluster.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 554c7a822..ce6160a67 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -259,6 +259,7 @@ def delete_hosted_zone_record_if_exists(aws_region, cluster_name): except Exception as e: logging.error(f"Unexpected error occurs: {e}") + @retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_lb(aws_region, vpc_id): elb_client = boto3.client('elb', region_name=aws_region) @@ -304,7 +305,6 @@ def wait_for_nat_gateway_delete(ec2, nat_gateway_id): logging.error(f"NAT gateway with id {nat_gateway_id} was not deleted in {timeout} seconds.") - @retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_nat_gateway(aws_region, vpc_id): ec2_client = boto3.client('ec2', region_name=aws_region) @@ -324,6 +324,7 @@ def delete_nat_gateway(aws_region, vpc_id): except Boto3Error as e: logging.error(f"Deleting NAT gateway with id {nat_gateway_id} failed with error: {e}") + @retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_igw(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) @@ -371,6 +372,7 @@ def delete_subnets(ec2_resource, vpc_id, aws_region): except Boto3Error as e: logging.error(f"Delete of subnet failed with error: {e}") + @retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_route_tables(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) @@ -387,6 +389,7 @@ def delete_route_tables(ec2_resource, vpc_id): except Boto3Error as e: logging.error(f"Delete of route table failed with error: {e}") + @retry(Exception, tries=DEFAULT_RETRY_COUNT, delay=DEFAULT_RETRY_DELAY) def delete_security_groups(ec2_resource, vpc_id): vpc_resource = ec2_resource.Vpc(vpc_id) From 05243a590e68309b162c71feb5f9000178d214b5 Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Fri, 5 Jan 2024 11:00:09 +0100 Subject: [PATCH 119/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 76dee0830..e7f0bcb09 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -243,6 +243,30 @@ "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-1" } ] + }, + { + "type": "ebs", + "size": "small", + "snapshots": [ + { + "us-east-2": "snap-053193245de30778c", + "us-west-2": "snap-090ac1a5a1738bcca", + "us-west-1": "snap-044bbf0d8785d88a6", + "us-east-1": "snap-0c18a374ecc344221" + } + ] + }, + { + "type": "rds", + "size": "small", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-12-1", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-12-1", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-12-1", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-12-1" + } + ] } ] } From b35f5ea7ed68953b2576caa9cd133e2971c9b913 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 5 Jan 2024 12:41:39 +0200 Subject: [PATCH 120/152] bump 5.12.1 jsm --- README.md | 2 +- app/util/k8s/dcapt-snapshots.json | 53 ------------------------------- 2 files changed, 1 insertion(+), 54 deletions(-) diff --git a/README.md b/README.md index 1025cd740..73969f137 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.1` and `9.4.10` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.0` and `5.4.10` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.1` and `5.4.10` * Supported Confluence versions: * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.4` diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index e7f0bcb09..0e2008e11 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -111,59 +111,6 @@ }, "jsm": { "versions": [ - { - "version": "5.12.0", - "data": [ - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-2": "snap-04c3a6b9c4b2bf64c", - "us-west-2": "snap-00f9f5eae07afa172", - "us-west-1": "snap-0e5788fc1422bee45", - "us-east-1": "snap-01e1bb89b157d01de" - } - ] - }, - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-12-0", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-12-0", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-12-0", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-0" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-2": "snap-0b2cf3b7f74da3fa8", - "us-west-2": "snap-01143a8a415ce534d", - "us-west-1": "snap-06eec09639ee66634", - "us-east-1": "snap-0045671a003a875bf" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-12-0", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-12-0", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-12-0", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-12-0" - } - ] - } - ] - }, { "version": "5.4.14", "data": [ From c75bac99d66f5bb1b5a24dd5d288d769f9d4f972 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Mon, 8 Jan 2024 11:46:46 +0100 Subject: [PATCH 121/152] Docs: TF execution environment --- app/reports_generation/README.md | 4 +- app/reports_generation/bamboo_profile.yml | 10 +- app/reports_generation/csv_chart_generator.py | 1 + .../performance_profile.yml | 7 +- app/reports_generation/scale_profile.yml | 9 +- .../scripts/csv_aggregator.py | 4 +- app/reports_generation/scripts/judgement.py | 4 +- .../scripts/results_archivator.py | 4 +- .../scripts/summary_aggregator.py | 6 +- app/reports_generation/scripts/utils.py | 15 +- app/selenium_ui/jsm_ui_customers.py | 4 +- app/util/k8s/README.MD | 29 +- app/util/k8s/bzt_on_pod.sh | 46 +++ ...s-performance-toolkit-user-guide-bamboo.md | 179 +++++------ ...erformance-toolkit-user-guide-bitbucket.md | 288 +++++++++--------- ...rformance-toolkit-user-guide-confluence.md | 261 ++++++++-------- ...ps-performance-toolkit-user-guide-crowd.md | 284 +++++++++-------- ...pps-performance-toolkit-user-guide-jira.md | 275 +++++++++-------- ...apps-performance-toolkit-user-guide-jsm.md | 286 ++++++++--------- 19 files changed, 913 insertions(+), 803 deletions(-) create mode 100644 app/util/k8s/bzt_on_pod.sh diff --git a/app/reports_generation/README.md b/app/reports_generation/README.md index 19b24515d..d1beca92f 100644 --- a/app/reports_generation/README.md +++ b/app/reports_generation/README.md @@ -7,13 +7,13 @@ To create reports, run the
`python csv_chart_generator.py [performance_profile.yml or scale_profile.yml]` command from the `reports_generation` folder. The aggregated .csv files, charts and summary report are stored in the `results/reports` directory. -Before run, you should edit `performance_profile.yml` or `scale_profile.yml` and set appropriate `fullPath` values. +Before run, you should edit `performance_profile.yml` or `scale_profile.yml` and set appropriate `relativePath` values. **Configuration** - `column_name` - column name from results.csv used for aggregation - `runName` - label for specific run - `runType` - label for run type -- `fullPath` - the full path to result folder of specific run +- `relativePath` - the relative path to result folder of specific run starting from dc-app-performance-toolkit folder - `index_col` - index column - `title` - chart title (also this value is used to generate file name) - `image_height_px` - chart image height in pixels diff --git a/app/reports_generation/bamboo_profile.yml b/app/reports_generation/bamboo_profile.yml index 405bef746..288080238 100644 --- a/app/reports_generation/bamboo_profile.yml +++ b/app/reports_generation/bamboo_profile.yml @@ -1,17 +1,17 @@ # Defines which column from test runs is used for aggregated report. Default is "90% Line" column_name: "90% Line" runs: - # fullPath should contain a full path to the directory with run results. - # E.g. /home/$USER/dc-app-performance-toolkit/app/results/bamboo/2021-11-00_17-41-08 + # relativePath should contain a relative path to the directory with run results starting from dc-app-performance-toolkit folder. + # E.g. relativePath: "./app/results/bamboo/2024-01-01_10-10-10" - runName: "without app" runType: "baseline" - fullPath: "" + relativePath: "./app/results/bamboo/{TIMESTAMP}" - runName: "with app" runType: "experiment" - fullPath: "" + relativePath: "./app/results/bamboo/{TIMESTAMP}" - runName: "with app and app-specific actions" runType: "experiment" - fullPath: "" + relativePath: "./app/results/bamboo/{TIMESTAMP}" # Chart generation config index_col: "Action" diff --git a/app/reports_generation/csv_chart_generator.py b/app/reports_generation/csv_chart_generator.py index 3f5b024e3..be6ad8c46 100644 --- a/app/reports_generation/csv_chart_generator.py +++ b/app/reports_generation/csv_chart_generator.py @@ -25,6 +25,7 @@ def main(): def __get_results_dir(config, product_name) -> Path: path = (Path(__file__).absolute().parents[1] / "results" / "reports" / f"{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{product_name}_{config['profile']}") + print(f"Results dir: {path}") path.mkdir(parents=True, exist_ok=True) return path diff --git a/app/reports_generation/performance_profile.yml b/app/reports_generation/performance_profile.yml index 15b3dc34b..d050e58ca 100644 --- a/app/reports_generation/performance_profile.yml +++ b/app/reports_generation/performance_profile.yml @@ -1,13 +1,14 @@ # Defines which column from test runs is used for aggregated report. Default is "90% Line" column_name: "90% Line" runs: - # fullPath should contain a full path to the directory with run results. E.g. /home/$USER/dc-app-performance-toolkit/app/results/jira/2019-08-06_17-41-08 + # relativePath should contain a relative path to the directory with run results starting from dc-app-performance-toolkit folder. + # E.g. relativePath: "./app/results/jira/2024-01-01_10-10-10" - runName: "without app" runType: "baseline" - fullPath: "" + relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}" - runName: "with app" runType: "experiment" - fullPath: "" + relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}" # Chart generation config index_col: "Action" diff --git a/app/reports_generation/scale_profile.yml b/app/reports_generation/scale_profile.yml index 616793e57..5f7ab7477 100644 --- a/app/reports_generation/scale_profile.yml +++ b/app/reports_generation/scale_profile.yml @@ -1,16 +1,17 @@ # Defines which column from test runs is used for aggregated report. Default is "90% Line" column_name: "90% Line" runs: - # fullPath should contain a full path to the directory with run results. E.g. /home/$USER/dc-app-performance-toolkit/app/results/jira/2019-08-06_18-41-08 + # relativePath should contain a relative path to the directory with run results starting from dc-app-performance-toolkit folder. + # E.g. relativePath: "./app/results/jira/2024-01-01_10-10-10" - runName: "1 Node" runType: "baseline" - fullPath: "" + relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}" - runName: "2 Nodes" runType: "experiment" - fullPath: "" + relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}" - runName: "4 Nodes" runType: "experiment" - fullPath: "" + relativePath: "./app/results/{PRODUCT}/{TIMESTAMP}" # Chart generation configs index_col: "Action" diff --git a/app/reports_generation/scripts/csv_aggregator.py b/app/reports_generation/scripts/csv_aggregator.py index 9caa68e17..2312c2eeb 100644 --- a/app/reports_generation/scripts/csv_aggregator.py +++ b/app/reports_generation/scripts/csv_aggregator.py @@ -2,7 +2,7 @@ from pathlib import Path from typing import List -from scripts.utils import validate_file_exists, resolve_path, validate_config +from scripts.utils import validate_file_exists, resolve_relative_path, validate_config RESULTS_CSV_FILE_NAME = "results.csv" @@ -36,7 +36,7 @@ def __get_tests_results(config: dict) -> List[ResultsCSV]: column_name = config['column_name'] for run in config['runs']: value_by_action = {} - absolute_file_path = resolve_path(run['fullPath']) / RESULTS_CSV_FILE_NAME + absolute_file_path = resolve_relative_path(run['relativePath']) / RESULTS_CSV_FILE_NAME with absolute_file_path.open(mode='r') as fs: for row in csv.DictReader(fs): value_by_action[row['Label']] = {column_name: row[column_name], 'App-specific': row['App specific']} diff --git a/app/reports_generation/scripts/judgement.py b/app/reports_generation/scripts/judgement.py index 356637470..ae99f1a34 100644 --- a/app/reports_generation/scripts/judgement.py +++ b/app/reports_generation/scripts/judgement.py @@ -181,8 +181,8 @@ def judge(baseline_dir, tested_dirs, output_dir): def __get_judgement_kwargs(config): baseline_result_dir = next((run for run in config['runs'] - if run['runType'] == constants.DCAPTRunType.baseline))['fullPath'] - tested_result_dirs = [run['fullPath'] for run in config['runs'] + if run['runType'] == constants.DCAPTRunType.baseline))['relativePath'] + tested_result_dirs = [run['relativePath'] for run in config['runs'] if run['runType'] == constants.DCAPTRunType.experiment] return { diff --git a/app/reports_generation/scripts/results_archivator.py b/app/reports_generation/scripts/results_archivator.py index 25fe20b24..290fefb92 100644 --- a/app/reports_generation/scripts/results_archivator.py +++ b/app/reports_generation/scripts/results_archivator.py @@ -1,7 +1,7 @@ from pathlib import Path from shutil import make_archive -from scripts.utils import validate_config, clean_str +from scripts.utils import validate_config, clean_str, resolve_relative_path def __zip_folder(folder_path: Path, destination_path: Path) -> Path: @@ -12,7 +12,7 @@ def __zip_folder(folder_path: Path, destination_path: Path) -> Path: def archive_results(config: dict, results_dir: Path): validate_config(config) for run in config['runs']: - results_folder_path = Path(run["fullPath"]) + results_folder_path = resolve_relative_path(run['relativePath']) destination_name = f"{config['profile']}_run_{clean_str(run['runName'])}_{results_folder_path.name}" destination_path = results_dir / destination_name archive_path = __zip_folder(results_folder_path, destination_path) diff --git a/app/reports_generation/scripts/summary_aggregator.py b/app/reports_generation/scripts/summary_aggregator.py index 3eebd3926..a47265bc7 100644 --- a/app/reports_generation/scripts/summary_aggregator.py +++ b/app/reports_generation/scripts/summary_aggregator.py @@ -2,7 +2,7 @@ from typing import List from constants import SUPPORTED_TEST_ATLASSIAN_PRODUCTS -from scripts.utils import validate_file_exists, resolve_path, validate_config +from scripts.utils import validate_file_exists, resolve_relative_path, validate_config SUMMARY_FILE_NAME = "results_summary.log" DELIMITER = ('\n================================================================================' @@ -12,9 +12,9 @@ def __get_summary_files(config: dict) -> List[Path]: summary_files = [] for run in config['runs']: - file_path = resolve_path(run['fullPath']) / SUMMARY_FILE_NAME + file_path = resolve_relative_path(run['relativePath']) / SUMMARY_FILE_NAME validate_file_exists(file_path, f"File {file_path} does not exists") - summary_files.append(resolve_path(run['fullPath']) / SUMMARY_FILE_NAME) + summary_files.append(resolve_relative_path(run['relativePath']) / SUMMARY_FILE_NAME) return summary_files diff --git a/app/reports_generation/scripts/utils.py b/app/reports_generation/scripts/utils.py index e21b49692..c6cf9ccab 100644 --- a/app/reports_generation/scripts/utils.py +++ b/app/reports_generation/scripts/utils.py @@ -8,8 +8,17 @@ import yaml -def resolve_path(str_path: str) -> Path: - return Path(str_path).resolve().expanduser() +def resolve_relative_path(str_path: str) -> Path: + """ + Resolve relative path from .yml scenario configuration file. + Expected working dir for csv_chart_generator.py: ./dc-app-performance-toolkit/app/reports_generation + Expected relative path starting from ./dc-app-performance-toolkit folder. + """ + expected_working_dir_name = 'reports_generation' + working_dir = Path().resolve().expanduser() + if working_dir.name != expected_working_dir_name: + raise SystemExit(f"ERROR: expected working dir name: {expected_working_dir_name}, actual: {working_dir.name}") + return Path().resolve().expanduser().parents[1] / str_path def validate_str_is_not_blank(config: dict, key: str): @@ -63,7 +72,7 @@ def validate_config(config: dict): raise SystemExit('Config key "run" should be a dictionary') validate_str_is_not_blank(run, 'runName') - validate_str_is_not_blank(run, 'fullPath') + validate_str_is_not_blank(run, 'relativePath') def clean_str(string: str): diff --git a/app/selenium_ui/jsm_ui_customers.py b/app/selenium_ui/jsm_ui_customers.py index 4e806e496..71b9fbe0d 100644 --- a/app/selenium_ui/jsm_ui_customers.py +++ b/app/selenium_ui/jsm_ui_customers.py @@ -40,8 +40,8 @@ def test_1_selenium_customer_add_comment(jsm_webdriver, jsm_datasets, jsm_screen """ -# # def test_1_selenium_customer_custom_action(jsm_webdriver, jsm_datasets, jsm_screen_shots): -# # extension_ui_customers.app_specific_action(jsm_webdriver, jsm_datasets) +# def test_1_selenium_customer_custom_action(jsm_webdriver, jsm_datasets, jsm_screen_shots): +# extension_ui_customers.app_specific_action(jsm_webdriver, jsm_datasets) """ To enable specific test for Insight below, set 'True' next to `insight` variable (False by default) in `app/jsm.yml` diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index 03141257d..270b8d600 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -18,6 +18,7 @@ docker run --pull=always --env-file aws_envs \ ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ +-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -c conf.tfvars ``` @@ -44,6 +45,7 @@ If state files are needed, e.g. there are other running clusters for other produ ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ +-v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -t -c conf.tfvars ``` @@ -53,7 +55,9 @@ Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 +``` +``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ -v "/$PWD/logs:/data-center-terraform/logs" \ @@ -65,10 +69,33 @@ Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 +``` +``` bash docker run --pull=always --env-file aws_envs \ --workdir="/data-center-terraform" \ --entrypoint="python" \ -v "/$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION -``` \ No newline at end of file +``` + +# Connect to product pod +Set your environment name +``` bash +export ENVIRONMENT_NAME=your_environment_name +export REGION=us-east-2 +``` + +SSH to `atlassianlabs/terraform` container +``` bash +docker run --pull=always --env-file aws_envs \ +-e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ +-e REGION=$REGION \ +-it atlassianlabs/terraform bash +``` + +Connect to the product pod. Example below for jira pod with number 0. For other product or pod number change `PRODUCT_POD` accordingly. +``` bash +export PRODUCT_POD=jira-0 +aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION +kubectl exec -it $PRODUCT_POD -n atlassian -- bash \ No newline at end of file diff --git a/app/util/k8s/bzt_on_pod.sh b/app/util/k8s/bzt_on_pod.sh new file mode 100644 index 000000000..fdca91b70 --- /dev/null +++ b/app/util/k8s/bzt_on_pod.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +DCAPT_DOCKER_IMAGE="atlassian/dcapt" +echo "INFO: DCAPT docker image: $DCAPT_DOCKER_IMAGE" + +if [[ -z "$ENVIRONMENT_NAME" ]]; then + echo "ERROR: ENVIRONMENT_NAME variable is not set." + exit 1 +fi +echo "INFO: Environment name: $ENVIRONMENT_NAME" + +if [[ -z "$REGION" ]]; then + echo "ERROR: REGION variable is not set." + exit 1 +fi +echo "INFO: AWS REGION: $REGION" + +if [ $# -eq 0 ]; then + echo "ERROR: No arguments supplied. Product .yml file need to be passed as argument. E.g. jira.yml" + exit 1 +fi +echo "INFO: Product .yml: $1" + +echo "INFO: Update kubeconfig" +aws eks update-kubeconfig --name atlas-"$ENVIRONMENT_NAME"-cluster --region "$REGION" + +echo "INFO: Get execution environment pod name" +exec_pod_name=$(kubectl get pods -n atlassian -l=exec=true --no-headers -o custom-columns=":metadata.name") +echo "INFO: Execution environment pod name: $exec_pod_name" + +echo "INFO: Cleanup dc-app-performance-toolkit folder on the exec env pod" +kubectl exec -it "$exec_pod_name" -n atlassian -- rm -rf /dc-app-performance-toolkit + +echo "INFO: Copy latest dc-app-performance-toolkit folder to the exec env pod" +kubectl cp --retries 10 dc-app-performance-toolkit atlassian/"$exec_pod_name":/dc-app-performance-toolkit + +echo "INFO: Run bzt on the exec env pod" +kubectl exec -it "$exec_pod_name" -n atlassian -- docker run --pull=always --shm-size=4g -v "/dc-app-performance-toolkit:/dc-app-performance-toolkit" $DCAPT_DOCKER_IMAGE "$1" +sleep 10 + +echo "INFO: Copy results folder from the exec env pod to local" +kubectl cp --retries 10 atlassian/"$exec_pod_name":dc-app-performance-toolkit/app/results dc-app-performance-toolkit/app/results +if [[ $? -ne 0 ]]; then + echo "ERROR: Copy results folder failed" + exit 1 +fi \ No newline at end of file diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 5e1193d68..c7fb24fd7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-11-27" +date: "2024-01-05" --- # Data Center App Performance Toolkit User Guide For Bamboo @@ -19,7 +19,7 @@ test results for the Marketplace approval process. Preferably, use the below rec 1. [Set up an enterprise-scale environment Bamboo Data Center on AWS](#instancesetup). 2. [App-specific actions development](#appspecificaction). -3. [Set up an execution environment for the toolkit](#executionhost). +3. [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). 4. [Running the test scenarios from execution environment against enterprise-scale Bamboo Data Center](#testscenario). --- @@ -27,9 +27,16 @@ test results for the Marketplace approval process. Preferably, use the below rec ## 1. Set up an enterprise-scale environment Bamboo Data Center on k8s #### EC2 CPU Limit -The installation of Bamboo requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. +{{% warning %}} +The installation of DC environment and execution pod requires at least **24** vCPU Cores. +Newly created AWS account often has vCPU limit set to low numbers like 5 vCPU per region. +Check your account current vCPU limit for On-Demand Standard instances by visiting [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) page. +**Applied quota value** is the current CPU limit in the specific region. +Make that current limit is large enough to deploy new cluster. The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. +Recommended limit is 30. +{{% /warning %}} #### Setup Bamboo Data Center with an enterprise-scale dataset on k8s @@ -45,6 +52,7 @@ specifically for performance testing during the DC app review process. 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bamboo` - `products` - `bamboo` @@ -85,11 +93,6 @@ Data dimensions and values for default enterprise-scale dataset uploaded are lis --- -#### Terminate Bamboo Data Center -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. - ---- - {{% note %}} You are responsible for the cost of the AWS services running during the reference deployment. For more information, go to [aws.amazon.com/pricing](https://aws.amazon.com/ec2/pricing/). @@ -207,19 +210,13 @@ Note, that `locust_app_specific_action` action execution will start in some time --- -## 3. Setting up an execution environment +### 3. Setting up load configuration for Enterprise-scale runs -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This -is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a -local machine eliminates network fluctuations and guarantees stable CPU and memory performance. +Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) +already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -1. Clone the fork locally, then edit the `bamboo.yml` configuration file. Set enterprise-scale Bamboo Data Center parameters: - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} +1. Check the `bamboo.yml` configuration file. If load configuration settings were changed for dev runs, make sure parameters + were changed back to the defaults: ``` yaml application_hostname: bamboo_host_name or public_ip # Bamboo DC hostname without protocol and port e.g. test-bamboo.atlassian.com or localhost @@ -238,25 +235,8 @@ Instead, set those values directly in `.yml` file on execution environment insta parallel_plans_count: 40 # number of parallel plans execution start_plan_timeout: 60 # maximum timeout of plan to start default_dataset_plan_duration: 60 # expected plan execution duration - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) - or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP ``` -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker - as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Clone forked repository. - - You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. --- @@ -271,14 +251,29 @@ This scenario helps to identify basic performance issues. To receive performance baseline results **without** an app installed and **without** app-specific actions (use code from `master` branch): -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: - +1. Before run: + * Make sure `bamboo.yml` and toolkit code base has default configuration from the `master` branch. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. + * `standalone_extension_locust` set to 0. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + export ENVIRONMENT_NAME=your_environment_name ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bamboo.yml + ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bamboo/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary - `results.csv`: aggregated .csv file with all actions and timings @@ -293,14 +288,23 @@ the next steps. For an enterprise-scale environment run, the acceptable success ##### Run 2 (~50 min) -**Performance results generation with the app installed (still use master branch):** +To receive performance results with an app installed (still use master branch): -1. Run toolkit with docker from the execution environment instance: +1. Install the app you want to test. +1. Setup app license. +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bamboo.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to @@ -311,15 +315,30 @@ the next steps. For an enterprise-scale environment run, the acceptable success To receive results for Bamboo DC **with app** and **with app-specific actions**: -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `bamboo.yml` and toolkit code base has code base with your developed app-specific actions. + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. + * `standalone_extension_locust` set to 1 and Locust app-specific actions code base applied in case of Locust app-specific actions. + * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/bamboo_ui.py#L51-L52) is uncommented and has implementation in case of Selenium app-specific actions. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bamboo.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to @@ -330,42 +349,25 @@ the next steps. For an enterprise-scale environment run, the acceptable success To generate a performance regression report: -1. Use SSH to connect to execution environment. -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `bamboo_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). - - Under `runName: "with app and app-specific actions"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). -1. Run the following command: +1. Edit the `./app/reports_generation/bamboo_profile.yml` file: + - Under `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). + - Under `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). + - Under `runName: "with app and app-specific actions"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash - python csv_chart_generator.py bamboo_profile.yml + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py bamboo_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file - (with consolidated scenario results), the `.png` chart file and bamboo performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` - -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to - see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend - taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +Follow [Terminate enterprise-scale environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -381,9 +383,8 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support -For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. - If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). +For failed cluster uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). -In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +In case of any technical questions or issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 194c783fb..1285ca13b 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-11-27" +date: "2024-01-05" --- # Data Center App Performance Toolkit User Guide For Bitbucket @@ -21,7 +21,7 @@ In this document, we cover the use of the Data Center App Performance Toolkit on **[Enterprise-scale environment](#mainenvironmententerprise)**: Bitbucket Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. 4. [Set up an enterprise-scale environment Bitbucket Data Center on AWS](#instancesetup). -5. [Set up an execution environment for the toolkit](#executionhost). +5. [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). 6. [Running the test scenarios from execution environment against enterprise-scale Bitbucket Data Center](#testscenario). --- @@ -64,6 +64,7 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bitbucket-small` - `products` - `bitbucket` @@ -177,24 +178,31 @@ After adding your custom app-specific actions, you should now be ready to run th {{% warning %}} It is recommended to terminate a development environment before creating an enterprise-scale environment. Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### EC2 CPU Limit -The installation of 4-nodes Bitbucket requires **48** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. +{{% warning %}} +The installation of 4-pods DC environment and execution pod requires at least **40** vCPU Cores. +Newly created AWS account often has vCPU limit set to low numbers like 5 vCPU per region. +Check your account current vCPU limit for On-Demand Standard instances by visiting [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) page. +**Applied quota value** is the current CPU limit in the specific region. +Make that current limit is large enough to deploy new cluster. The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. - +Recommended limit is 50. +{{% /warning %}} ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services, and may vary from the estimates the Calculator has provided. *The prices below are approximate and may vary depending on factors such as (region, instance type, deployment type of DB, etc.) -| Stack | Estimated hourly cost ($) | -| ----- | ------------------------- | -| One Node Bitbucket DC | 1.4 - 2.0 | -| Two Nodes Bitbucket DC | 1.7 - 2.5 | -| Four Nodes Bitbucket DC | 2.4 - 3.6 | +| Stack | Estimated hourly cost ($) | +|-----------------------|---------------------------| +| One pod Bitbucket DC | 1 - 2 +| Two pods Bitbucket DC | 1.5 - 2.5 +| Four pods Bitbucket DC | 2.5 - 4 #### Setup Bitbucket Data Center enterprise-scale environment on k8s. @@ -220,11 +228,18 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bitbucket-large` - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use this server id for generation `BX02-9YO1-IN86-LO5G`. + {{% /note %}} + 6. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). 7. From local terminal (Git Bash for Windows users) start the installation (~40min): @@ -237,33 +252,20 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} - {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- -### 5. Setting up an execution environment +### 5. Setting up load configuration for Enterprise-scale runs -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. +Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) +already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -1. Clone the fork locally, then edit the `bitbucket.yml` configuration file. Set enterprise-scale Bitbucket Data Center parameters: - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} +1. Check the `bitbucket.yml` configuration file. If load configuration settings were changed for dev runs, make sure parameters + were changed back to the defaults: ``` yaml application_hostname: test_bitbucket_instance.atlassian.com # Bitbucket DC hostname without protocol and port e.g. test-bitbucket.atlassian.com or localhost @@ -278,26 +280,8 @@ Instead, set those values directly in `.yml` file on execution environment insta test_duration: 50m ramp-up: 10m # time to spin all concurrent users total_actions_per_hour: 32700 # number of total JMeter actions per hour - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP ``` -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Clone forked repository. - -{{% note %}} -At this stage app-specific actions are not needed yet. Use code from `master` branch with your `bitbucket.yml` changes. -{{% /note %}} - You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. --- @@ -319,12 +303,27 @@ This scenario helps to identify basic performance issues without a need to spin To receive performance baseline results **without** an app installed: -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `bitbucket.yml` and toolkit code base has default configuration from the `master` branch. + * App-specific actions code base is not needed for Run1 and Run2. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder: @@ -340,16 +339,23 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 2 (~1 hour) -To receive performance results with an app installed: +To receive performance results with an app installed (still use master branch): 1. Install the app you want to test. 1. Setup app license. -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -358,56 +364,48 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Generating a performance regression report -To generate a performance regression report: - -1. Use SSH to connect to execution environment. -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: - ``` bash - python csv_chart_generator.py performance_profile.yml +To generate a performance regression report: + +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). + - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. - -#### Scenario 2: Scalability testing - -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. - -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Bitbucket DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Bitbucket DC app in a cluster. - +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. ##### Run 3 (~1 hour) To receive scalability benchmark results for one-node Bitbucket DC **with** app-specific actions: -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml - ``` +1. Before run: + * Make sure `bitbucket.yml` and toolkit code base has code base with your developed app-specific actions. + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/bitbucket_ui.py#L67-L68) is uncommented and has implementation in case of Selenium app-specific actions. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + ``` + {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. {{% /note %}} @@ -415,16 +413,16 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~1 hour) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. -[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/#ec2-cpu-limit) section has instructions on how to increase limit if needed. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Bitbucket DC **with** app-specific actions: 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. -2. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. -3. From local terminal (Git Bash for Windows users) start scaling (~20 min): +1. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. +1. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ @@ -432,12 +430,19 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` -4. Use SSH to connect to execution environment. -5. Run toolkit with docker from the execution environment instance: - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml - ``` +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` + + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -446,20 +451,27 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~1 hour) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. -[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/#ec2-cpu-limit) section has instructions on how to increase limit if needed. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Bitbucket DC with app-specific actions: 1. Scale your Bitbucket Data Center deployment to 4 nodes as described in [Run 4](#run4). -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -470,37 +482,24 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "1 Node"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "2 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "4 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): - ``` bash - python csv_chart_generator.py scale_profile.yml +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). + - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). + - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder, you will be able to review action timings on Bitbucket Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +Follow [Terminate enterprise-scale environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -515,9 +514,8 @@ Do not forget to attach performance testing results to your ECOHELP ticket. 2. Attach two reports folders to your ECOHELP ticket. ## Support -For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. - If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). +For failed cluster uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). -In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +In case of any technical questions or issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 97d0c2ba2..1d624c7d9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-11-27" +date: "2024-01-05" --- # Data Center App Performance Toolkit User Guide For Confluence @@ -21,7 +21,7 @@ In this document, we cover the use of the Data Center App Performance Toolkit on **[Enterprise-scale environment](#mainenvironmententerprise)**: Confluence Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. 4. [Set up an enterprise-scale environment Confluence Data Center on AWS](#instancesetup). -5. [Set up an execution environment for the toolkit](#executionhost). +5. [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). 6. [Running the test scenarios from execution environment against enterprise-scale Confluence Data Center](#testscenario). --- @@ -253,6 +253,7 @@ App-specific actions are required. Do not proceed with the next step until you h {{% warning %}} It is recommended to terminate a development environment before creating an enterprise-scale environment. Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. @@ -260,9 +261,16 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Confluence Data Center enterprise-scale environment with "large" dataset #### EC2 CPU Limit -The installation of 4-nodes Confluence requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. +{{% warning %}} +The installation of 4-pods DC environment and execution pod requires at least **40** vCPU Cores. +Newly created AWS account often has vCPU limit set to low numbers like 5 vCPU per region. +Check your account current vCPU limit for On-Demand Standard instances by visiting [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) page. +**Applied quota value** is the current CPU limit in the specific region. +Make that current limit is large enough to deploy new cluster. The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. +Recommended limit is 50. +{{% /warning %}} ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -272,8 +280,8 @@ Monthly charges will be based on your actual usage of AWS services, and may vary | Cluster | Estimated hourly cost ($) | |-------------------------|---------------------------| -| One pod Confluence DC | 0.8 - 1.0 | -| Two pods Confluence DC | 1.2 - 1.5 | +| One pod Confluence DC | 1 - 2 | +| Two pods Confluence DC | 1.5 - 2 | | Four pods Confluence DC | 2.0 - 3.0 | #### Setup Confluence Data Center enterprise-scale environment on k8s. @@ -300,6 +308,7 @@ Below process describes how to install enterprise-scale Confluence DC with "larg 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-confluence-large` - `products` - `confluence` @@ -330,17 +339,13 @@ It's recommended to change default password from UI account page for security re --- -### 5. Setting up an execution environment +### 5. Setting up load configuration for Enterprise-scale runs -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. +Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) +already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -1. Clone the fork locally, then edit the `confluence.yml` configuration file. Set enterprise-scale Confluence Data Center parameters: - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} +1. Check the `confluence.yml` configuration file. If load configuration settings were changed for dev runs, make sure parameters + were changed back to the defaults: ``` yaml application_hostname: test_confluence_instance.atlassian.com # Confluence DC hostname without protocol and port e.g. test-confluence.atlassian.com or localhost @@ -355,26 +360,8 @@ Instead, set those values directly in `.yml` file on execution environment insta test_duration: 45m ramp-up: 5m # time to spin all concurrent users total_actions_per_hour: 20000 # number of total JMeter/Locust actions per hour. - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP ``` -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Clone forked repository. - -{{% note %}} -At this stage app-specific actions are not needed yet. Use code from `master` branch with your `confluence.yml` changes. -{{% /note %}} - You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. --- @@ -396,14 +383,28 @@ This scenario helps to identify basic performance issues without a need to spin To receive performance baseline results **without** an app installed: -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: - +1. Before run: + * Make sure `jira.yml` and toolkit code base has default configuration from the `master` branch. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + export ENVIRONMENT_NAME=your_environment_name ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/confluence/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary - `results.csv`: aggregated .csv file with all actions and timings @@ -417,16 +418,23 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 2 (~50 min) -To receive performance results with an app installed: +To receive performance results with an app installed (still use master branch): 1. Install the app you want to test. 1. Setup app license. -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -437,53 +445,56 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a performance regression report: -1. Use SSH to connect to execution environment. -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: - ``` bash - python csv_chart_generator.py performance_profile.yml +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). + - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. #### Scenario 2: Scalability testing -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. +The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. +For this, you have to run scale testing on your app. -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Confluence DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Confluence DC app in a cluster. +For many apps and extensions to Atlassian products, +there should not be a significant performance difference between operating on a single node or across many nodes in +Confluence DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Confluence DC app in a cluster. ##### Run 3 (~50 min) To receive scalability benchmark results for one-node Confluence DC **with** app-specific actions: -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `confluence.yml` and toolkit code base has code base with your developed app-specific actions. + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. + * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/confluence_ui.py#L47-L48) is uncommented and has implementation in case of Selenium app-specific actions. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -492,16 +503,16 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. -[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-confluence/#ec2-cpu-limit) section has instructions on how to increase limit if needed. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Confluence DC **with** app-specific actions: 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. -2. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. -3. From local terminal (Git Bash for Windows users) start scaling (~20 min): +1. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. +1. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ @@ -509,12 +520,19 @@ To receive scalability benchmark results for two-node Confluence DC **with** app -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` -4. Use SSH to connect to execution environment. -5. Run toolkit with docker from the execution environment instance: - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml - ``` +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` + + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -523,20 +541,27 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. -[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-confluence/#ec2-cpu-limit) section has instructions on how to increase limit if needed. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Confluence DC with app-specific actions: 1. Scale your Confluence Data Center deployment to 4 nodes as described in [Run 4](#run4). -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -547,37 +572,24 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "1 Node"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "2 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "4 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): - ``` bash - python csv_chart_generator.py scale_profile.yml +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). + - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). + - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder, you will be able to review action timings on Confluence Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +Follow [Terminate enterprise-scale environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -592,9 +604,8 @@ Do not forget to attach performance testing results to your ECOHELP ticket. 2. Attach two reports folders to your ECOHELP ticket. ## Support -For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. - If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). +For failed cluster uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). -In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +In case of any technical questions or issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 63efe8aa0..d7f86dad0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-11-27" +date: "2024-01-05" --- # Data Center App Performance Toolkit User Guide For Crowd @@ -15,8 +15,8 @@ In this document, we cover the use of the Data Center App Performance Toolkit on **Enterprise-scale environment**: Crowd Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. Preferably, use the [AWS Quick Start for Crowd Data Center](https://aws.amazon.com/quickstart/architecture/atlassian-crowd) with the parameters prescribed below. These parameters provision larger, more powerful infrastructure for your Crowd Data Center. 1. [Set up an enterprise-scale environment Crowd Data Center on AWS](#instancesetup). -2. [App-specific actions development](#appspecificaction). -3. [Set up an execution environment for the toolkit](#executionhost). +2. [App-specific actions development](#appspecificaction). +3. [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). 4. [Running the test scenarios from execution environment against enterprise-scale Crowd Data Center](#testscenario). --- @@ -24,10 +24,16 @@ In this document, we cover the use of the Data Center App Performance Toolkit on ## 1. Set up an enterprise-scale environment Crowd Data Center on k8s #### EC2 CPU Limit -The installation of 4-nodes Crowd requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. +{{% warning %}} +The installation of 4-pods DC environment and execution pod requires at least **24** vCPU Cores. +Newly created AWS account often has vCPU limit set to low numbers like 5 vCPU per region. +Check your account current vCPU limit for On-Demand Standard instances by visiting [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) page. +**Applied quota value** is the current CPU limit in the specific region. +Make that current limit is large enough to deploy new cluster. The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. - +Recommended limit is 30. +{{% /warning %}} #### Setup Crowd Data Center with an enterprise-scale dataset on k8s Below process describes how to install Crowd DC with an enterprise-scale dataset included. This configuration was created @@ -42,12 +48,12 @@ specifically for performance testing during the DC app review process. 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-crowd` - `products` - `crowd` - `crowd_license` - one-liner of valid crowd license without spaces and new line symbols - `region` - **Do not change default region (`us-east-2`). If specific region is required, contact support.** - - `instance_types` - `["m5.xlarge"]` {{% note %}} New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). @@ -64,7 +70,6 @@ specifically for performance testing during the DC app review process. ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. - --- Data dimensions and values for an enterprise-scale dataset are listed and described in the following table. @@ -79,11 +84,6 @@ All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} --- -#### Terminate Crowd Data Center -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. - ---- - {{% note %}} You are responsible for the cost of the AWS services running during the reference deployment. For more information, go to [aws.amazon.com/pricing](https://aws.amazon.com/ec2/pricing/). @@ -93,7 +93,7 @@ To reduce costs, we recommend you to keep your deployment up and running only du --- -## 3. App-specific actions development +## 2. App-specific actions development Data Center App Performance Toolkit has its own set of default [JMeter](https://jmeter.apache.org/) test actions for Crowd Data Center. @@ -114,17 +114,13 @@ Data Center App Performance Toolkit has its own set of default [JMeter](https:// --- -## 4. Setting up an execution environment +### 3. Setting up load configuration for Enterprise-scale runs -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. +Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) +already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -1. Clone the fork locally, then edit the `crowd.yml` configuration file. Set enterprise-scale Crowd Data Center parameters: - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} +1. Check the `crowd.yml` configuration file. If load configuration settings were changed for dev runs, make sure parameters + were changed back to the defaults: ``` yaml application_hostname: test_crowd_instance.atlassian.com # Crowd DC hostname without protocol and port e.g. test-crowd.atlassian.com or localhost @@ -141,26 +137,11 @@ Instead, set those values directly in `.yml` file on execution environment insta test_duration: 45m ``` -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP - ``` - -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Clone forked repository. - - You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. --- -## 5. Running the test scenarios from execution environment against enterprise-scale Crowd Data Center +## 4. Running the test scenarios from execution environment against enterprise-scale Crowd Data Center Using the Data Center App Performance Toolkit for [Performance and scale testing your Data Center app](/platform/marketplace/developing-apps-for-atlassian-data-center-products/) involves two test scenarios: @@ -177,14 +158,27 @@ This scenario helps to identify basic performance issues without a need to spin To receive performance baseline results **without** an app installed and **without** app-specific actions (use code from `master` branch): -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: - +1. Before run: + * Make sure `crowd.yml` and toolkit code base has default configuration from the `master` branch. No app-specific actions code applied. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + export ENVIRONMENT_NAME=your_environment_name ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/crowd/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary - `results.csv`: aggregated .csv file with all actions and timings @@ -195,17 +189,25 @@ To receive performance baseline results **without** an app installed and **witho Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. {{% /note %}} -##### Run 2 - +##### Run 2 (~50 min) -**Performance results generation with the app installed (still use master branch):** +To receive performance results with an app installed (still use master branch): -1. Run toolkit with docker from the execution environment instance: +1. Install the app you want to test. +1. Setup app license. +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -214,74 +216,73 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Generating a performance regression report -To generate a performance regression report: - -1. Use SSH to connect to execution environment. -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: - ``` bash - python csv_chart_generator.py performance_profile.yml +To generate a performance regression report: + +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). + - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. - +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. #### Scenario 2: Scalability testing -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. +The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. +For this, you have to run scale testing on your app. -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Crowd DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Crowd DC app in a cluster. +For many apps and extensions to Atlassian products, +there should not be a significant performance difference between operating on a single node or across many nodes in +Crowd DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Crowd DC app in a cluster. ###### Run 3 (~50 min) To receive scalability benchmark results for one-node Crowd DC **with** app-specific actions: -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `crowd.yml` and toolkit code base has code base with your developed app-specific actions. + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. {{% /note %}} - ##### Run 4 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 30. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. -[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/#ec2-cpu-limit) section has instructions on how to increase limit if needed. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Crowd DC **with** app-specific actions: 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. -2. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. -3. From local terminal (Git Bash for Windows users) start scaling (~20 min): +1. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. +1. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ @@ -289,9 +290,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` -4. Use SSH to connect to execution environment. - -5. Edit **run parameters** for 2 nodes run. To do it, left uncommented only 2 nodes scenario parameters in `crowd.yml` file. +1. Edit **run parameters** for 2 nodes run. To do it, left uncommented only 2 nodes scenario parameters in `crowd.yml` file. ``` # 1 node scenario parameters # ramp-up: 20s # time to spin all concurrent threads @@ -305,12 +304,19 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec # ramp-up: 5s # time to spin all concurrent threads # total_actions_per_hour: 720000 # number of total JMeter actions per hour ``` -6. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -319,9 +325,9 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 30. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. -[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/#ec2-cpu-limit) section has instructions on how to increase limit if needed. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Crowd DC with app-specific actions: @@ -340,14 +346,20 @@ To receive scalability benchmark results for four-node Crowd DC with app-specifi # 4 nodes scenario parameters ramp-up: 5s # time to spin all concurrent threads total_actions_per_hour: 720000 # number of total JMeter actions per hour - ``` - -1. Run toolkit with docker from the execution environment instance: + ``` +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -358,43 +370,30 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "Node 1"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "Node 2"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "Node 4"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the activated `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): - ``` bash - python csv_chart_generator.py scale_profile.yml +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). + - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). + - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review action timings on Crowd Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} -After completing all your tests, delete your Crowd Data Center stacks. +It is recommended to terminate an enterprise-scale environment after completing all tests. +Follow [Terminate enterprise-scale environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### Attaching testing results to ECOHELP ticket {{% warning %}} -It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +Do not forget to attach performance testing results to your ECOHELP ticket. {{% /warning %}} 1. Make sure you have two reports folders: one with performance profile and second with scale profile results. @@ -403,9 +402,8 @@ Follow [Terminate development environment](https://github.com/atlassian/dc-app-p 2. Attach two reports folders to your ECOHELP ticket. ## Support -For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. - If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). +For failed cluster uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). -In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +In case of any technical questions or issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 73cee2a21..a3f4a30f0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-11-27" +date: "2024-01-05" --- # Data Center App Performance Toolkit User Guide For Jira @@ -26,7 +26,7 @@ In this document, we cover the use of the Data Center App Performance Toolkit on **[Enterprise-scale environment](#mainenvironmententerprise)**: Jira Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. 4. [Set up an enterprise-scale environment Jira Data Center on AWS](#instancesetup). -5. [Set up an execution environment for the toolkit](#executionhost). +5. [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). 6. [Running the test scenarios from execution environment against enterprise-scale Jira Data Center](#testscenario). --- @@ -75,6 +75,7 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-jira-small` - `products` - `jira` @@ -99,10 +100,6 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. -9. Re-index (only for Jira 8.x, for Jira 9.x skip this step): - - Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. - - Select the **Full re-index** option. - - Click **Re-Index** and wait until re-indexing is completed (~2s). {{% note %}} All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} @@ -267,6 +264,7 @@ App-specific actions are required. Do not proceed with the next step until you h {{% warning %}} It is recommended to terminate a development environment before creating an enterprise-scale environment. Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. @@ -274,9 +272,16 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Jira Data Center enterprise-scale environment with "large" dataset #### EC2 CPU Limit -The installation of 4-nodes Jira requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. +{{% warning %}} +The installation of 4-pods DC environment and execution pod requires at least **40** vCPU Cores. +Newly created AWS account often has vCPU limit set to low numbers like 5 vCPU per region. +Check your account current vCPU limit for On-Demand Standard instances by visiting [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) page. +**Applied quota value** is the current CPU limit in the specific region. +Make that current limit is large enough to deploy new cluster. The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. +Recommended limit is 50. +{{% /warning %}} #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -285,11 +290,11 @@ Monthly charges will be based on your actual usage of AWS services and may vary *The prices below are approximate and may vary depending on such factors like region, instance type, deployment type of DB, and other. -| Stack | Estimated hourly cost ($) | -| ----- | ------------------------- | -| One Node Jira DC | 0.8 - 1.1 -| Two Nodes Jira DC | 1.2 - 1.7 -| Four Nodes Jira DC | 2.0 - 3.0 +| Stack | Estimated hourly cost ($) | +|-------------------| ------------------------- | +| One pod Jira DC | 1 - 2 +| Two pods Jira DC | 1.5 - 2 +| Four pods Jira DC | 2.0 - 3.0 #### Setup Jira Data Center enterprise-scale environment on k8s @@ -331,6 +336,7 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-jira` - `products` - `jira` @@ -361,17 +367,13 @@ It's recommended to change default password from UI account page for security re --- -### 5. Setting up an execution environment +### 5. Setting up load configuration for Enterprise-scale runs -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. +Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) +already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -1. Clone the fork locally, then edit the `jira.yml` configuration file. Set enterprise-scale Jira Data Center parameters: - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} +1. Check the `jira.yml` configuration file. If load configuration settings were changed for dev runs, make sure parameters +were changed back to the defaults: ``` yaml application_hostname: test_jira_instance.atlassian.com # Jira DC hostname without protocol and port e.g. test-jira.atlassian.com or localhost @@ -386,31 +388,13 @@ Instead, set those values directly in `.yml` file on execution environment insta test_duration: 45m ramp-up: 3m # time to spin all concurrent users total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP ``` -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Clone forked repository. - -{{% note %}} -At this stage app-specific actions are not needed yet. Use code from `master` branch with your `jira.yml` changes. -{{% /note %}} - You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. --- -### 6. Running the test scenarios from execution environment against enterprise-scale Jira Data Center +### 6. Running the test scenarios from execution environment pod against enterprise-scale Jira Data Center Using the Data Center App Performance Toolkit for [Performance and scale testing your Data Center app](/platform/marketplace/developing-apps-for-atlassian-data-center-products/) involves two test scenarios: @@ -427,15 +411,29 @@ This scenario helps to identify basic performance issues without a need to spin To receive performance baseline results **without** an app installed: -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: - +1. Before run: + * Make sure `jira.yml` and toolkit code base has default configuration from the `master` branch. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + export ENVIRONMENT_NAME=your_environment_name ``` -1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + ``` +1. View the results files of the run in the local `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary - `results.csv`: aggregated .csv file with all actions and timings - `bzt.log`: logs of the Taurus tool execution @@ -461,9 +459,9 @@ The re-index time for Jira is about ~50-70 minutes. 3. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. 4. Select the **Full re-index** option. 5. Click **Re-Index** and wait until re-indexing is completed. -{{% note %}} +{{% warning %}} Jira will be temporarily unavailable during the re-indexing process - "503 Service Temporarily Unavailable" message will be displayed. Once the process is complete, the system will be fully accessible and operational once again. -{{% /note %}} +{{% /warning %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. {{% note %}} @@ -472,14 +470,21 @@ Re-index information window is displayed on the **Indexing page**. If the window 7. Attach the screenshot(s) to your ECOHELP ticket. -**Performance results generation with the app installed:** +**Performance results generation with the app installed (still use master branch):** -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -490,54 +495,57 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a performance regression report: -1. Use SSH to connect to execution environment. -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). + - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash - python csv_chart_generator.py performance_profile.yml + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. - +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. +If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. #### Scenario 2: Scalability testing -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. +The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. +For this, you have to run scale testing on your app. -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Jira DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Jira DC app in a cluster. +For many apps and extensions to Atlassian products, +there should not be a significant performance difference between operating on a single node or across many nodes in +Jira DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Jira DC app in a cluster. ###### Run 3 (~50 min) To receive scalability benchmark results for one-node Jira DC **with** app-specific actions: -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `jira.yml` and toolkit code base has code base with your developed app-specific actions. + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. + * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/jira_ui.py#L65-L66) is uncommented and has implementation in case of Selenium app-specific actions. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -546,7 +554,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -554,8 +562,8 @@ Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/ho To receive scalability benchmark results for two-node Jira DC **with** app-specific actions: 1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. -2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. -3. From local terminal (Git Bash for Windows users) start scaling (~20 min): +1. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. +1. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ @@ -563,14 +571,19 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` -4. Use SSH to connect to execution environment. -5. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -579,7 +592,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -587,12 +600,19 @@ Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/ho To receive scalability benchmark results for four-node Jira DC with app-specific actions: 1. Scale your Jira Data Center deployment to 4 nodes as described in [Run 4](#run4). -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -603,37 +623,25 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "1 Node"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "2 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "4 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the activated `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). + - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). + - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash - python csv_chart_generator.py scale_profile.yml + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review action timings on Jira Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +Follow [Terminate enterprise-scale environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -648,9 +656,8 @@ Do not forget to attach performance testing results to your ECOHELP ticket. 2. Attach two reports folders to your ECOHELP ticket. ## Support -For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. - If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). +For failed cluster uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). -In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +In case of any technical questions or issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index f63693e00..633aaf3b8 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-11-27" +date: "2024-01-05" --- # Data Center App Performance Toolkit User Guide For Jira Service Management @@ -27,7 +27,7 @@ In this document, we cover the use of the Data Center App Performance Toolkit on **[Enterprise-scale environment](#mainenvironmententerprise)**: Jira Service Management Data Center environment used to generate Data Center App Performance Toolkit test results for the Marketplace approval process. 4. [Set up an enterprise-scale environment Jira Service Management Data Center on AWS](#instancesetup). -5. [Set up an execution environment for the toolkit](#executionhost). +5. [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). 6. [Running the test scenarios from execution environment against enterprise-scale Jira Service Management Data Center](#testscenario). --- @@ -76,6 +76,7 @@ Below process describes how to install low-tier Jira Service Management DC with 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-jsm-small`. - `products` - `jira` @@ -113,9 +114,9 @@ Make sure **English (United States)** language is selected as a default language {{% /warning %}} 1. Clone [Data Center App Performance Toolkit](https://github.com/atlassian/dc-app-performance-toolkit) locally. -2. Follow the [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md) instructions to set up toolkit locally. -3. Navigate to `dc-app-performance-toolkit/app` folder. -4. Open the `jsm.yml` file and fill in the following variables: +1. Follow the [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md) instructions to set up toolkit locally. +1. Navigate to `dc-app-performance-toolkit/app` folder. +1. Open the `jsm.yml` file and fill in the following variables: - `application_hostname`: your_dc_jsm_instance_hostname without protocol. - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. @@ -134,15 +135,15 @@ Make sure **English (United States)** language is selected as a default language - `insight`: True or False. Default value is False. Set True to enable Insight specific tests. -5. In case your application relays or extends the functionality of **Insight**. Make sure to set `True` value next to `insight` variable. +1. In case your application relays or extends the functionality of **Insight**. Make sure to set `True` value next to `insight` variable. -6. Run bzt. +1. Run bzt. ``` bash bzt jsm.yml ``` -7. Review the resulting table in the console log. All JMeter/Locust and Selenium actions should have 95+% success rate. +1. Review the resulting table in the console log. All JMeter/Locust and Selenium actions should have 95+% success rate. In case some actions does not have 95+% success rate refer to the following logs in `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -303,6 +304,7 @@ App-specific actions are required. Do not proceed with the next step until you h {{% warning %}} It is recommended to terminate a development environment before creating an enterprise-scale environment. Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. @@ -310,9 +312,16 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Jira Service Management Data Center enterprise-scale environment with "large" dataset #### EC2 CPU Limit -The installation of 4-nodes Jira Service Management requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) service shows the limit for On-Demand Standard instances. **Applied quota value** is the current CPU limit in the specific region. +{{% warning %}} +The installation of 4-pods DC environment and execution pod requires at least **40** vCPU Cores. +Newly created AWS account often has vCPU limit set to low numbers like 5 vCPU per region. +Check your account current vCPU limit for On-Demand Standard instances by visiting [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) page. +**Applied quota value** is the current CPU limit in the specific region. +Make that current limit is large enough to deploy new cluster. The limit can be increased by using **Request increase at account-level** button: choose a region, set a quota value which equals a required number of CPU Cores for the installation and press **Request** button. +Recommended limit is 50. +{{% /warning %}} #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -320,11 +329,11 @@ Monthly charges will be based on your actual usage of AWS services and may vary *The prices below are approximate and may vary depending on such factors like region, instance type, deployment type of DB, and other. -| Stack | Estimated hourly cost ($) | -| ----- | ------------------------- | -| One Node Jira Service Management DC | 0.8 - 1.1 -| Two Nodes Jira Service Management DC | 1.2 - 1.7 -| Four Nodes Jira Service Management DC | 2.0 - 3.0 +| Stack | Estimated hourly cost ($) | +|-------------------------------------| ------------------------- | +| One pod Jira Service Management DC | 1 - 2 +| Two pod Jira Service Management DC | 1.5 - 2 +| Four pod Jira Service Management DC | 2.0 - 3.0 #### Setup Jira Service Management Data Center enterprise-scale environment on k8s @@ -362,6 +371,7 @@ Below process describes how to install enterprise-scale Jira Service Management 4. Set AWS access keys created in step1 in `aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) 5. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-jsm-large`. - `products` - `jira` @@ -393,18 +403,14 @@ It's recommended to change default password from UI account page for security re --- -### 5. Setting up an execution environment +### 5. Setting up load configuration for Enterprise-scale runs -For generating performance results suitable for Marketplace approval process use dedicated execution environment. This is a separate AWS EC2 instance to run the toolkit from. Running the toolkit from a dedicated instance but not from a local machine eliminates network fluctuations and guarantees stable CPU and memory performance. +Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) +already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. -1. Go to GitHub and create a fork of [dc-app-performance-toolkit](https://github.com/atlassian/dc-app-performance-toolkit). -2. Clone the fork locally, then edit the `jsm.yml` configuration file. Set enterprise-scale Jira Service Management Data Center parameters -3. In case your application relays or extends the functionality of **Insight**. Make sure to set `True` next to the `insight` variable. - -{{% warning %}} -Do not push to the fork real `application_hostname`, `admin_login` and `admin_password` values for security reasons. -Instead, set those values directly in `.yml` file on execution environment instance. -{{% /warning %}} +1. Check the `jsm.yml` configuration file. If load configuration settings were changed for dev runs, make sure parameters + were changed back to the defaults: +1. In case your application relays or extends the functionality of **Insight**. Make sure to set `True` next to the `insight` variable. ``` yaml application_hostname: test_jsm_instance.atlassian.com # Jira Service Management DC hostname without protocol and port e.g. test-jsm.atlassian.com or localhost @@ -423,26 +429,8 @@ Instead, set those values directly in `.yml` file on execution environment insta total_actions_per_hour_customers: 15000 # number of total JMeter/Locust actions per hour insight: False # Set True to enable Insight specific tests - ``` - -1. Push your changes to the forked repository. -1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. - * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) - * Storage size: `30` GiB -1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). - - ```bash - ssh -i path_to_pem_file ubuntu@INSTANCE_PUBLIC_IP ``` -1. Install [Docker](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository). Setup manage Docker as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall). -1. Connect to the AWS EC2 instance and clone forked repository. - -{{% note %}} -At this stage app-specific actions are not needed yet. Use code from `master` branch with your `jsm.yml` changes. -{{% /note %}} - You'll need to run the toolkit for each [test scenario](#testscenario) in the next section. --- @@ -464,12 +452,27 @@ This scenario helps to identify basic performance issues without a need to spin To receive performance baseline results **without** an app installed: -1. Use SSH to connect to execution environment. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `jsm.yml` and toolkit code base has default configuration from the `master` branch. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -499,9 +502,9 @@ The re-index time for JSM is about ~35-45 minutes. 3. Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. 4. Select the **Full re-index** option. 5. Click **Re-Index** and wait until re-indexing is completed. -{{% note %}} +{{% warning %}} Jira Service Management will be temporarily unavailable during the re-indexing process. Once the process is complete, the system will be fully accessible and operational once again. -{{% /note %}} +{{% /warning %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. {{% note %}} @@ -511,14 +514,21 @@ Re-index information window is displayed on the **Indexing page**. If the window 7. Attach the screenshot(s) to your ECOHELP ticket. -**Performance results generation with the app installed:** +**Performance results generation with the app installed (still use master branch):** -1. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -527,56 +537,57 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Generating a performance regression report -To generate a performance regression report: - -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Install and activate the `virtualenv` as described in `dc-app-performance-toolkit/README.md` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `performance_profile.yml` file: - - Under `runName: "without app"`, in the `fullPath` key, insert the full path to results directory of [Run 1](#regressionrun1). - - Under `runName: "with app"`, in the `fullPath` key, insert the full path to results directory of [Run 2](#regressionrun2). -1. Run the following command: - - ``` bash - python csv_chart_generator.py performance_profile.yml +To generate a performance regression report: + +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). + - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local machine terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder you will be able to review the action timings with and without your app to see its impact on the performance of the instance. If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. #### Scenario 2: Scalability testing -The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. For this, you have to run scale testing on your app. +The purpose of scalability testing is to reflect the impact on the customer experience when operating across multiple nodes. +For this, you have to run scale testing on your app. -For many apps and extensions to Atlassian products, there should not be a significant performance difference between operating on a single node or across many nodes in Jira Service Management DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Jira Service Management DC app in a cluster. +For many apps and extensions to Atlassian products, +there should not be a significant performance difference between operating on a single node or across many nodes in +Jira Service Management DC deployment. To demonstrate performance impacts of operating your app at scale, we recommend testing your Jira Service Management DC app in a cluster. ###### Run 3 (~50 min) To receive scalability benchmark results for one-node Jira Service Management DC **with** app-specific actions: -1. Apply app-specific code changes to a new branch of forked repo. -1. Use SSH to connect to execution environment. -1. Pull cloned fork repo branch with app-specific actions. -1. Run toolkit with docker from the execution environment instance: +1. Before run: + * Make sure `jsm.yml` and toolkit code base has code base with your developed app-specific actions. + * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. + * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). + * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. + * [test_1_selenium_customer_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/jsm_ui_customers.py#L43C9-L44) is uncommented and has implementation in case of Selenium app-specific actions. + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` + - `AWS_SESSION_TOKEN` (only for temporary creds) +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -585,7 +596,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -593,8 +604,8 @@ Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/ho To receive scalability benchmark results for two-node Jira Service Management DC **with** app-specific actions: 1. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. -2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. -3. From local terminal (Git Bash for Windows users) start scaling (~20 min): +1. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. +1. From local terminal (Git Bash for Windows users) start scaling (~20 min): ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ @@ -602,13 +613,19 @@ To receive scalability benchmark results for two-node Jira Service Management DC -v "/$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` -4. Use SSH to connect to execution environment. -5. Run toolkit with docker from the execution environment instance: +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + ``` {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. @@ -617,7 +634,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} -Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. +Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -625,13 +642,20 @@ Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/ho To receive scalability benchmark results for four-node Jira Service Management DC with app-specific actions: 1. Scale your Jira Data Center deployment to 4 nodes as described in [Run 4](#run4). -1. Run toolkit with docker from the execution environment instance: - - ``` bash - cd dc-app-performance-toolkit - docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml - ``` +1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + ``` + {{% note %}} Review `results_summary.log` file under artifacts dir location. Make sure that overall status is `OK` before moving to the next steps. For an enterprise-scale environment run, the acceptable success rate for actions is 95% and above. {{% /note %}} @@ -641,37 +665,24 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Use SSH to connect to execution environment. -1. Allow current user (for execution environment default user is `ubuntu`) to access Docker generated reports: - ``` bash - sudo chown -R ubuntu:ubuntu /home/ubuntu/dc-app-performance-toolkit/app/results - ``` -1. Navigate to the `dc-app-performance-toolkit/app/reports_generation` folder. -1. Edit the `scale_profile.yml` file: - - For `runName: "1 Node"`, in the `fullPath` key, insert the full path to results directory of [Run 3](#run3). - - For `runName: "2 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 4](#run4). - - For `runName: "4 Nodes"`, in the `fullPath` key, insert the full path to results directory of [Run 5](#run5). -1. Run the following command from the activated `virtualenv` (as described in `dc-app-performance-toolkit/README.md`): - ``` bash - python csv_chart_generator.py scale_profile.yml +1. Edit the `./app/reports_generation/performance_profile.yml` file: + - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). + - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). + - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash + docker run --pull=always \ + -v "/$PWD:/dc-app-performance-toolkit" \ + --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --entrypoint="python" \ + -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` -1. In the `dc-app-performance-toolkit/app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and summary report. - - -#### Analyzing report - -Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy report artifacts from execution env to local drive: - -1. From local terminal (Git bash terminal for Windows) run command: - ``` bash - export EXEC_ENV_PUBLIC_IP=execution_environment_ec2_instance_public_ip - scp -r -i path_to_exec_env_pem ubuntu@$EXEC_ENV_PUBLIC_IP:/home/ubuntu/dc-app-performance-toolkit/app/results/reports ./reports - ``` -1. Once completed, in the `./reports` folder, you will be able to review action timings on Jira Service Management Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. +1. In the `./app/results/reports/YY-MM-DD-hh-mm-ss` folder, view the `.csv` file (with consolidated scenario results), the `.png` chart file and performance scenario summary report. + If you see an impact (>20%) on any action timing, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +Follow [Terminate enterprise-scale environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. +In case of any problems with uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -686,9 +697,8 @@ Do not forget to attach performance testing results to your ECOHELP ticket. 2. Attach two reports folders to your ECOHELP ticket. ## Support -For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. - If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). +For failed cluster uninstall use [Force terminate command](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#force-terminate-cluster). -In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +In case of any technical questions or issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. From 722758f93cda9e22070174cb2ade57495932cce6 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Mon, 8 Jan 2024 13:00:58 +0100 Subject: [PATCH 122/152] DCA-2164 update dependencies --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- requirements.txt | 22 +++++++++++----------- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 22864dae5..4046cbd2e 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.13.0 + - selenium==4.16.0 execution: - scenario: jmeter executor: jmeter diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 215271e10..a32a43492 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -37,7 +37,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.13.0 + - selenium==4.16.0 execution: - scenario: ${load_executor} concurrency: ${concurrency} diff --git a/app/confluence.yml b/app/confluence.yml index 8dc6cba0d..59c7e9d42 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -52,7 +52,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.13.0 + - selenium==4.16.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jira.yml b/app/jira.yml index 08dd8bc2a..237e88522 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -52,7 +52,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.13.0 + - selenium==4.16.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jsm.yml b/app/jsm.yml index 4eaea5eb2..e768bd49a 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -68,7 +68,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.13.0 + - selenium==4.16.0 execution: - scenario: ${load_executor}_agents executor: ${load_executor} diff --git a/requirements.txt b/requirements.txt index e77195061..5996d2880 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,13 @@ -matplotlib==3.8.0 -pandas==2.1.1 -numpy==1.26.0 -scipy==1.11.3 -pytest==7.4.2 -locust==2.16.1 -selenium==4.13.0 -filelock==3.12.4 -packaging==23.1 +matplotlib==3.8.2 +pandas==2.1.4 +numpy==1.26.3 +scipy==1.11.4 +pytest==7.4.4 +locust==2.20.1 +selenium==4.16.0 +filelock==3.13.1 +packaging==23.2 prettytable==3.9.0 -bzt==1.16.26 -boto3==1.28.56 +bzt==1.16.27 +boto3==1.34.14 retry==0.9.2 From ceb82551e45e724a3ba2eb92dac72ae4363e460e Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 12:04:25 +0000 Subject: [PATCH 123/152] Update dependency org.codehaus.mojo:exec-maven-plugin to v3.1.1 --- app/util/bamboo/bamboo_dataset_generator/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 46b9342c4..2f71984bc 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -18,7 +18,7 @@ org.codehaus.mojo exec-maven-plugin - 3.1.0 + 3.1.1 From 5f612a7a7dfe5daf210367a4acdfd12a1532c2f4 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 8 Jan 2024 15:43:06 +0200 Subject: [PATCH 124/152] reporting/add-nodes-app-specific-to-results-name --- app/util/analytics/analytics.py | 4 +++- app/util/analytics/analytics_utils.py | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/app/util/analytics/analytics.py b/app/util/analytics/analytics.py index a15fcc452..3a54b0a63 100644 --- a/app/util/analytics/analytics.py +++ b/app/util/analytics/analytics.py @@ -5,7 +5,8 @@ from util.data_preparation.prepare_data_common import __warnings_filter from util.analytics.analytics_utils import get_os, convert_to_sec, get_timestamp, get_date, is_all_tests_successful, \ - uniq_user_id, generate_report_summary, get_first_elem, generate_test_actions_by_type, get_crowd_sync_test_results + uniq_user_id, generate_report_summary, get_first_elem, generate_test_actions_by_type, get_crowd_sync_test_results, \ + rename_results_dir from util.analytics.application_info import ApplicationSelector, BaseApplication, JIRA, CONFLUENCE, BITBUCKET, JSM, \ CROWD, BAMBOO, INSIGHT from util.analytics.bamboo_post_run_collector import BambooPostRunCollector @@ -257,6 +258,7 @@ def main(): application = ApplicationSelector(application_name).application collector = AnalyticsCollector(application) generate_report_summary(collector) + rename_results_dir(collector) if collector.is_analytics_enabled(): send_analytics(collector) diff --git a/app/util/analytics/analytics_utils.py b/app/util/analytics/analytics_utils.py index 842d98d5d..ab6c83cf7 100644 --- a/app/util/analytics/analytics_utils.py +++ b/app/util/analytics/analytics_utils.py @@ -154,6 +154,15 @@ def generate_report_summary(collector): write_to_file(pretty_report, summary_report_file) +def rename_results_dir(collector): + if bool(collector.app_specific_rates): + os.rename(f'{collector.log_dir}', f'{collector.log_dir}_{collector.nodes_count}-node_app-specific') + print("Directory with app-specific actions was successfully renamed.") + else: + os.rename(f'{collector.log_dir}', f'{collector.log_dir}_{collector.nodes_count}-node') + print("Directory was successfully renamed.") + + def get_os(): """ Get the operating system on which the tests were run. From 9fbd8d3a22c95786c479472f3bbdbbd9c3469349 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 8 Jan 2024 15:52:07 +0200 Subject: [PATCH 125/152] update variable --- app/util/analytics/analytics_utils.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/app/util/analytics/analytics_utils.py b/app/util/analytics/analytics_utils.py index ab6c83cf7..e28fe8ad9 100644 --- a/app/util/analytics/analytics_utils.py +++ b/app/util/analytics/analytics_utils.py @@ -155,12 +155,13 @@ def generate_report_summary(collector): def rename_results_dir(collector): + new_results_name = f'{collector.log_dir}_{collector.nodes_count}-node' if bool(collector.app_specific_rates): - os.rename(f'{collector.log_dir}', f'{collector.log_dir}_{collector.nodes_count}-node_app-specific') - print("Directory with app-specific actions was successfully renamed.") + os.rename(f'{collector.log_dir}', f'{new_results_name}_app-specific') + print("Results directory with app-specific actions was successfully renamed.") else: - os.rename(f'{collector.log_dir}', f'{collector.log_dir}_{collector.nodes_count}-node') - print("Directory was successfully renamed.") + os.rename(f'{collector.log_dir}', new_results_name) + print("Results directory was successfully renamed.") def get_os(): From 7d87547c71e5d1a58286dbf98e1ce36cb1860e27 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 8 Jan 2024 16:25:01 +0200 Subject: [PATCH 126/152] upd renaming function --- app/util/analytics/analytics_utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/app/util/analytics/analytics_utils.py b/app/util/analytics/analytics_utils.py index e28fe8ad9..5bb54cd0f 100644 --- a/app/util/analytics/analytics_utils.py +++ b/app/util/analytics/analytics_utils.py @@ -155,13 +155,12 @@ def generate_report_summary(collector): def rename_results_dir(collector): - new_results_name = f'{collector.log_dir}_{collector.nodes_count}-node' + app_specific = "" if bool(collector.app_specific_rates): - os.rename(f'{collector.log_dir}', f'{new_results_name}_app-specific') - print("Results directory with app-specific actions was successfully renamed.") - else: - os.rename(f'{collector.log_dir}', new_results_name) - print("Results directory was successfully renamed.") + app_specific = "_app-specific" + new_results_dir_name = f'{collector.log_dir}_{collector.nodes_count}-node{app_specific}' + os.rename(f'{collector.log_dir}', f'{new_results_dir_name}') + print(f'Results directory was renamed. Old name: {collector.log_dir}, new name: {new_results_dir_name}') def get_os(): From 2d32bb45d08878bf165b267687d9bf6a94dbb77f Mon Sep 17 00:00:00 2001 From: bot-dcapt <56587558+bot-dcapt@users.noreply.github.com> Date: Mon, 8 Jan 2024 15:32:42 +0100 Subject: [PATCH 127/152] Python commit, changes to the /dcapt/dc-app-performance-toolkit/app/util/k8s/dcapt-snapshots.json --- app/util/k8s/dcapt-snapshots.json | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 0e2008e11..ae935e162 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -472,6 +472,36 @@ ] } ] + }, + { + "version": "5.2.2", + "data": [ + { + "type": "ebs", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-01d7f772d9d5f1ea3", + "us-west-2": "snap-020d067046649e955", + "us-west-1": "snap-099642ac75a58729b", + "us-east-1": "snap-09a3ae3234cb7dbfe" + } + ] + }, + { + "type": "rds", + "size": "large", + "snapshots": [ + { + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-2-2", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-2-2", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-2-2", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-2-2" + } + ] + } + ], + "build_number": "1943" } ] } From 5911bb26caf3e72318deafe9da904a0c68891fe7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 14:39:33 +0000 Subject: [PATCH 128/152] Update log4j2 monorepo to v2.22.1 --- app/util/bamboo/bamboo_dataset_generator/pom.xml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 46b9342c4..e3a1f35d7 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -88,17 +88,17 @@ org.apache.logging.log4j log4j-api - 2.20.0 + 2.22.1 org.apache.logging.log4j log4j-core - 2.20.0 + 2.22.1 org.apache.logging.log4j log4j-slf4j-impl - 2.20.0 + 2.22.1 From fd5a54c83750893b809cb2d556d107376c8765b1 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 14:39:38 +0000 Subject: [PATCH 129/152] Update dependency com.google.guava:guava to v33 --- app/util/bamboo/bamboo_dataset_generator/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 46b9342c4..aabda8b58 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -62,7 +62,7 @@ com.google.guava guava - 32.0.0-jre + 33.0.0-jre com.fasterxml.jackson.core From 97c58824345f24a32bdbf601179d8e46ccc78d92 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Mon, 8 Jan 2024 16:14:19 +0100 Subject: [PATCH 130/152] Update java in docker file --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a260b9810..8ee30b5be 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,7 @@ ENV CHROME_LATEST_URL="https://dl.google.com/linux/direct/google-chrome-stable_c ENV CHROME_VERSION_URL="https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_${CHROME_VERSION}_amd64.deb" RUN apt-get -y update \ - && $APT_INSTALL vim git openssh-server wget openjdk-11-jdk \ + && $APT_INSTALL vim git openssh-server wget openjdk-17-jdk \ && python -m pip install --upgrade pip \ && apt-get clean From 577a5e9ae37c3cae4512f2223b69987dc4551a5b Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Tue, 9 Jan 2024 11:24:25 +0200 Subject: [PATCH 131/152] remove 5.1.4 --- app/util/k8s/dcapt-snapshots.json | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index ae935e162..eb238c335 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -443,36 +443,6 @@ }, "crowd": { "versions": [ - { - "version": "5.1.4", - "build_number": "1893", - "data": [ - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-1-4", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-4", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-1-4", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-1-4" - } - ] - }, - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-1": "snap-08c6dc6abc75fe2b4", - "us-east-2": "snap-0a8e229690be9ae30", - "us-west-1": "snap-0206b022c6880fe67", - "us-west-2": "snap-07a9b523b316aeb32" - } - ] - } - ] - }, { "version": "5.2.2", "data": [ From 1a1eb2db76f70ea1b7a6c10ba91ce102f1c5fdfb Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Tue, 9 Jan 2024 11:51:38 +0100 Subject: [PATCH 132/152] Remove -t flag --- app/util/k8s/README.MD | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index 270b8d600..cb4e95b33 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -47,7 +47,7 @@ docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./uninstall.sh -t -c conf.tfvars +-it atlassianlabs/terraform ./uninstall.sh -c conf.tfvars ``` # Collect detailed k8s logs From abb08a82594e9a167dde3ff452016ab68cfd92a2 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 10 Jan 2024 15:37:08 +0100 Subject: [PATCH 133/152] Remove redundant docs comments --- app/util/k8s/README.MD | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index cb4e95b33..b0b76b41e 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -15,6 +15,7 @@ docker run --pull=always --env-file aws_envs \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` ## Terminate development environment +Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ @@ -39,9 +40,7 @@ docker run --pull=always --env-file aws_envs \ -it atlassianlabs/terraform ./install.sh -c conf.tfvars ``` ## Terminate enterprise-scale environment -Option `-t` deletes Terraform state files for all installed environment in the same region using the same AWS account. - -If state files are needed, e.g. there are other running clusters for other product, do not use `-t` flag in below command. +Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ From 271a2a2a6df1a713acd621fb802cea4070f81db9 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 10 Jan 2024 16:27:52 +0100 Subject: [PATCH 134/152] Add TF tag 2.7.1 --- app/util/k8s/README.MD | 18 ++++++++++-------- ...ps-performance-toolkit-user-guide-bamboo.md | 8 ++++---- ...performance-toolkit-user-guide-bitbucket.md | 16 ++++++++-------- ...erformance-toolkit-user-guide-confluence.md | 16 ++++++++-------- ...pps-performance-toolkit-user-guide-crowd.md | 14 +++++++------- ...apps-performance-toolkit-user-guide-jira.md | 16 ++++++++-------- ...-apps-performance-toolkit-user-guide-jsm.md | 16 ++++++++-------- 7 files changed, 53 insertions(+), 51 deletions(-) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index b0b76b41e..b721e0a98 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -12,16 +12,17 @@ docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./install.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` ## Terminate development environment +Note: install and uninstall commands have to use the same `atlassianlabs/terraform:TAG` image tag. Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./uninstall.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.1 ./uninstall.sh -c conf.tfvars ``` # Enterprise-scale environment @@ -37,16 +38,17 @@ docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./install.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` ## Terminate enterprise-scale environment +Note: install and uninstall commands have to use the same `atlassianlabs/terraform:TAG` image tag. Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./uninstall.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.1 ./uninstall.sh -c conf.tfvars ``` # Collect detailed k8s logs @@ -60,7 +62,7 @@ export REGION=us-east-2 docker run --pull=always --env-file aws_envs \ -v "/$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs +-it atlassianlabs/terraform:2.7.1 ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs ``` # Force terminate cluster @@ -79,18 +81,18 @@ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-clus ``` # Connect to product pod -Set your environment name +Set your environment name: ``` bash export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 ``` -SSH to `atlassianlabs/terraform` container +SSH to terraform container: ``` bash docker run --pull=always --env-file aws_envs \ -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -e REGION=$REGION \ --it atlassianlabs/terraform bash +-it atlassianlabs/terraform:2.7.1 bash ``` Connect to the product pod. Example below for jira pod with number 0. For other product or pod number change `PRODUCT_POD` accordingly. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index c7fb24fd7..449aa88bc 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -71,7 +71,7 @@ specifically for performance testing during the DC app review process. -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. 8. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. @@ -272,7 +272,7 @@ To receive performance baseline results **without** an app installed and **witho -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bamboo.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bamboo.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bamboo/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -303,7 +303,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bamboo.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bamboo.yml ``` {{% note %}} @@ -337,7 +337,7 @@ To receive results for Bamboo DC **with app** and **with app-specific actions**: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bamboo.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bamboo.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 1285ca13b..c07941920 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -84,7 +84,7 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -248,7 +248,7 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -323,7 +323,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder: @@ -354,7 +354,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -403,7 +403,7 @@ To receive scalability benchmark results for one-node Bitbucket DC **with** app- -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -428,7 +428,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -441,7 +441,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -470,7 +470,7 @@ To receive scalability benchmark results for four-node Bitbucket DC with app-spe -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 1d624c7d9..745a091cd 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -83,7 +83,7 @@ Below process describes how to install low-tier Confluence DC with "small" datas -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -328,7 +328,7 @@ Below process describes how to install enterprise-scale Confluence DC with "larg -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -403,7 +403,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/confluence/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -433,7 +433,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -493,7 +493,7 @@ To receive scalability benchmark results for one-node Confluence DC **with** app -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -518,7 +518,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -531,7 +531,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -560,7 +560,7 @@ To receive scalability benchmark results for four-node Confluence DC with app-sp -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index d7f86dad0..10bee3047 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -66,7 +66,7 @@ specifically for performance testing during the DC app review process. -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. @@ -177,7 +177,7 @@ To receive performance baseline results **without** an app installed and **witho -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/crowd/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -206,7 +206,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -264,7 +264,7 @@ To receive scalability benchmark results for one-node Crowd DC **with** app-spec -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -288,7 +288,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 1. Edit **run parameters** for 2 nodes run. To do it, left uncommented only 2 nodes scenario parameters in `crowd.yml` file. ``` @@ -315,7 +315,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -358,7 +358,7 @@ To receive scalability benchmark results for four-node Crowd DC with app-specifi -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index a3f4a30f0..b7b963767 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -95,7 +95,7 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -356,7 +356,7 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -431,7 +431,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml ``` 1. View the results files of the run in the local `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -483,7 +483,7 @@ Re-index information window is displayed on the **Indexing page**. If the window -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -544,7 +544,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -569,7 +569,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -582,7 +582,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -611,7 +611,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 633aaf3b8..a5b098ba4 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -97,7 +97,7 @@ Below process describes how to install low-tier Jira Service Management DC with -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -392,7 +392,7 @@ Below process describes how to install enterprise-scale Jira Service Management -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -472,7 +472,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -527,7 +527,7 @@ Re-index information window is displayed on the **Indexing page**. If the window -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -586,7 +586,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -611,7 +611,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -624,7 +624,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -653,7 +653,7 @@ To receive scalability benchmark results for four-node Jira Service Management D -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} From eccb7c1e8f14a3f0acdf008d13bd7278b8b635cd Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Wed, 10 Jan 2024 16:53:26 +0100 Subject: [PATCH 135/152] Revert "Update java in docker file" --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 8ee30b5be..a260b9810 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,7 +14,7 @@ ENV CHROME_LATEST_URL="https://dl.google.com/linux/direct/google-chrome-stable_c ENV CHROME_VERSION_URL="https://dl.google.com/linux/chrome/deb/pool/main/g/google-chrome-stable/google-chrome-stable_${CHROME_VERSION}_amd64.deb" RUN apt-get -y update \ - && $APT_INSTALL vim git openssh-server wget openjdk-17-jdk \ + && $APT_INSTALL vim git openssh-server wget openjdk-11-jdk \ && python -m pip install --upgrade pip \ && apt-get clean From cc6e5a218eab6d597ba52d036318f556fc3cb4a4 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Thu, 11 Jan 2024 10:24:31 +0100 Subject: [PATCH 136/152] Revert "Reporting/add specific and node count results" --- app/util/analytics/analytics.py | 4 +--- app/util/analytics/analytics_utils.py | 9 --------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/app/util/analytics/analytics.py b/app/util/analytics/analytics.py index 3a54b0a63..a15fcc452 100644 --- a/app/util/analytics/analytics.py +++ b/app/util/analytics/analytics.py @@ -5,8 +5,7 @@ from util.data_preparation.prepare_data_common import __warnings_filter from util.analytics.analytics_utils import get_os, convert_to_sec, get_timestamp, get_date, is_all_tests_successful, \ - uniq_user_id, generate_report_summary, get_first_elem, generate_test_actions_by_type, get_crowd_sync_test_results, \ - rename_results_dir + uniq_user_id, generate_report_summary, get_first_elem, generate_test_actions_by_type, get_crowd_sync_test_results from util.analytics.application_info import ApplicationSelector, BaseApplication, JIRA, CONFLUENCE, BITBUCKET, JSM, \ CROWD, BAMBOO, INSIGHT from util.analytics.bamboo_post_run_collector import BambooPostRunCollector @@ -258,7 +257,6 @@ def main(): application = ApplicationSelector(application_name).application collector = AnalyticsCollector(application) generate_report_summary(collector) - rename_results_dir(collector) if collector.is_analytics_enabled(): send_analytics(collector) diff --git a/app/util/analytics/analytics_utils.py b/app/util/analytics/analytics_utils.py index 5bb54cd0f..842d98d5d 100644 --- a/app/util/analytics/analytics_utils.py +++ b/app/util/analytics/analytics_utils.py @@ -154,15 +154,6 @@ def generate_report_summary(collector): write_to_file(pretty_report, summary_report_file) -def rename_results_dir(collector): - app_specific = "" - if bool(collector.app_specific_rates): - app_specific = "_app-specific" - new_results_dir_name = f'{collector.log_dir}_{collector.nodes_count}-node{app_specific}' - os.rename(f'{collector.log_dir}', f'{new_results_dir_name}') - print(f'Results directory was renamed. Old name: {collector.log_dir}, new name: {new_results_dir_name}') - - def get_os(): """ Get the operating system on which the tests were run. From a1540026d7224ed8b58e25617e21fb9cde2c1e78 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 11 Jan 2024 11:37:32 +0200 Subject: [PATCH 137/152] added tfstate to termination script --- app/util/k8s/terminate_cluster.py | 58 +++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index ce6160a67..1d807fa86 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -26,6 +26,13 @@ def is_float(element): return False +def retrieve_environment_name(cluster_name): + if cluster_name.endswith('-cluster'): + cluster_name = cluster_name[:-(len('-cluster'))] + environment_name = cluster_name.replace('atlas-', '') + return environment_name + + def wait_for_node_group_delete(eks_client, cluster_name, node_group): timeout = 900 # 15 min attempt = 0 @@ -883,6 +890,55 @@ def delete_unused_volumes(): logging.warning(f"Volume {volume.id} does not have 'persist_days' tag " f"| Name tag {name}: skipping") +def delete_s3_bucket_tf_state(cluster_name): + environment_name = retrieve_environment_name(cluster_name=cluster_name) + s3_client = boto3.client('s3') + bucket_name_template = f'atl-dc-{environment_name}' + response = s3_client.list_buckets() + matching_buckets = [bucket['Name'] for bucket in response['Buckets'] if bucket_name_template in bucket['Name']] + if not matching_buckets: + logging.info(f"Could not find s3 bucket with name contains {bucket_name_template}") + return + for bucket in matching_buckets: + objects_response = s3_client.list_objects_v2(Bucket=bucket) + if 'Contents' in objects_response: + objects = objects_response['Contents'] + for obj in objects: + s3_client.delete_object(Bucket=bucket, Key=obj['Key']) + logging.info(f"Object '{obj['Key']}' deleted successfully from bucket {bucket}.") + versions = s3_client.list_object_versions(Bucket=bucket).get('Versions', []) + delete_markers = s3_client.list_object_versions(Bucket=bucket).get('DeleteMarkers', []) + if versions: + for version in versions: + s3_client.delete_object(Bucket=bucket, Key=version['Key'], VersionId=version['VersionId']) + logging.info(f"S3 object version '{version}' deleted successfully from bucket {bucket}.") + if delete_markers: + for delete_marker in delete_markers: + s3_client.delete_object(Bucket=bucket, Key=delete_marker['Key'], VersionId=delete_marker['VersionId']) + logging.info(f"S3 delete marker '{delete_marker['Key']}' deleted successfully from bucket {bucket}.") + try: + s3_client.delete_bucket(Bucket=bucket) + logging.info(f"S3 bucket '{bucket}' was successfully deleted.") + except Exception as e: + logging.warning(f"Could not delete s3 bucket '{bucket}': {e}") + + +def delete_dynamo_bucket_tf_state(cluster_name, aws_region): + environment_name = retrieve_environment_name(cluster_name=cluster_name) + dynamodb_client = boto3.client('dynamodb', region_name=aws_region) + dynamodb_name_template = f'atl_dc_{environment_name}'.replace('-','_') + response = dynamodb_client.list_tables() + matching_tables = [table for table in response['TableNames'] if dynamodb_name_template in table] + if not matching_tables: + logging.info(f"Could not find dynamo db with name contains {dynamodb_name_template}") + return + for table in matching_tables: + try: + dynamodb_client.delete_table(TableName=table) + logging.info(f"Dynamo db '{table}' was successfully deleted.") + except Exception as e: + logging.warning(f"Could not delete dynamo db '{table}': {e}") + def main(): parser = ArgumentParser() @@ -908,6 +964,8 @@ def main(): delete_open_identities_for_cluster(open_identities) remove_cluster_specific_roles_and_policies(cluster_name=args.cluster_name, aws_region=args.aws_region) delete_ebs_volumes_by_id(aws_region=args.aws_region, volumes=volumes) + delete_s3_bucket_tf_state(cluster_name=args.cluster_name) + delete_dynamo_bucket_tf_state(cluster_name=args.cluster_name, aws_region=args.aws_region) return logging.info("--cluster_name parameter was not specified.") From 7982da38e706ab5e59784afb9611c8400336a272 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 11 Jan 2024 11:45:58 +0200 Subject: [PATCH 138/152] added tfstate to termination script --- app/util/k8s/terminate_cluster.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 1d807fa86..6b17ac833 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -890,6 +890,7 @@ def delete_unused_volumes(): logging.warning(f"Volume {volume.id} does not have 'persist_days' tag " f"| Name tag {name}: skipping") + def delete_s3_bucket_tf_state(cluster_name): environment_name = retrieve_environment_name(cluster_name=cluster_name) s3_client = boto3.client('s3') @@ -926,7 +927,7 @@ def delete_s3_bucket_tf_state(cluster_name): def delete_dynamo_bucket_tf_state(cluster_name, aws_region): environment_name = retrieve_environment_name(cluster_name=cluster_name) dynamodb_client = boto3.client('dynamodb', region_name=aws_region) - dynamodb_name_template = f'atl_dc_{environment_name}'.replace('-','_') + dynamodb_name_template = f'atl_dc_{environment_name}'.replace('-', '_') response = dynamodb_client.list_tables() matching_tables = [table for table in response['TableNames'] if dynamodb_name_template in table] if not matching_tables: From 5e9dd9523f87dc2583c3a00dc44be8fab7bcd9d6 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 11 Jan 2024 12:16:24 +0200 Subject: [PATCH 139/152] remove us-west regions --- app/util/k8s/dcapt-snapshots.json | 60 ------------------------------- 1 file changed, 60 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index eb238c335..84b5404f3 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -63,8 +63,6 @@ "snapshots": [ { "us-east-2": "snap-0e413a90c64812130", - "us-west-2": "snap-0149ccaaeb547726a", - "us-west-1": "snap-02c808bcecdac15b3", "us-east-1": "snap-0dedc16a22652e0f1" } ] @@ -75,8 +73,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-12-1", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-12-1", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-12-1", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-1" } ] @@ -87,8 +83,6 @@ "snapshots": [ { "us-east-2": "snap-0a7b4d27c09013274", - "us-west-2": "snap-0cd172e21b743dae6", - "us-west-1": "snap-0d6437e1830772993", "us-east-1": "snap-07d85f52da6a564ed" } ] @@ -99,8 +93,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-12-1", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-12-1", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-12-1", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-12-1" } ] @@ -120,8 +112,6 @@ "snapshots": [ { "us-east-2": "snap-02757b69de7aeb3f8", - "us-west-2": "snap-00886aef7ae2c209b", - "us-west-1": "snap-0a120110a512b97fb", "us-east-1": "snap-0be0df5470e3a312d" } ] @@ -132,8 +122,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-14", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-14", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-14", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-14" } ] @@ -144,8 +132,6 @@ "snapshots": [ { "us-east-2": "snap-007de06d38fcd95c6", - "us-west-2": "snap-0edcc47a82eccdbd5", - "us-west-1": "snap-0cb31741d2aa37fde", "us-east-1": "snap-075e397f427e6d6c8" } ] @@ -156,8 +142,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-14", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-14", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-14", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-14" } ] @@ -173,8 +157,6 @@ "snapshots": [ { "us-east-2": "snap-011d04a19c6b93529", - "us-west-2": "snap-0beb90b0d0f478697", - "us-west-1": "snap-0ff5fc36c9666dfbe", "us-east-1": "snap-0edfff503a2605803" } ] @@ -185,8 +167,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-12-1", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-12-1", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-12-1", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-1" } ] @@ -197,8 +177,6 @@ "snapshots": [ { "us-east-2": "snap-053193245de30778c", - "us-west-2": "snap-090ac1a5a1738bcca", - "us-west-1": "snap-044bbf0d8785d88a6", "us-east-1": "snap-0c18a374ecc344221" } ] @@ -209,8 +187,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-12-1", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-12-1", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-12-1", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-12-1" } ] @@ -230,8 +206,6 @@ "snapshots": [ { "us-east-2": "snap-021574360a781464f", - "us-west-2": "snap-0684fba1bfe25da2e", - "us-west-1": "snap-0cfc6520219671dd1", "us-east-1": "snap-00d1f3a18d176ceca" } ] @@ -242,8 +216,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-5-4", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-8-5-4", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-8-5-4", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-8-5-4" } ] @@ -254,8 +226,6 @@ "snapshots": [ { "us-east-2": "snap-09802dd4106f2686a", - "us-west-2": "snap-0c063b8fc243497b8", - "us-west-1": "snap-05513a2b127a63d6c", "us-east-1": "snap-08d42b48214eaf3bf" } ] @@ -266,8 +236,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-5-4", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-8-5-4", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-8-5-4", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-8-5-4" } ] @@ -284,8 +252,6 @@ "snapshots": [ { "us-east-2": "snap-0b8723cc5a8f8becc", - "us-west-2": "snap-05f02fd97ba755034", - "us-west-1": "snap-065efd48cb9072b1d", "us-east-1": "snap-030f0bb7870b60c73" } ] @@ -296,8 +262,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-17", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-17", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-17", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-17" } ] @@ -308,8 +272,6 @@ "snapshots": [ { "us-east-2": "snap-062952d964320477f", - "us-west-2": "snap-0dec73af33bec28f5", - "us-west-1": "snap-07d8242990b92dd0c", "us-east-1": "snap-0720b5df2ed27b435" } ] @@ -320,8 +282,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-17", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-17", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-17", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-17" } ] @@ -342,8 +302,6 @@ "snapshots": [ { "us-east-2": "snap-04b1409ae2afa2d65", - "us-west-2": "snap-0d84a0253f8c204a6", - "us-west-1": "snap-0c4d6529a7fd04179", "us-east-1": "snap-02a3125029b85438b" } ] @@ -354,8 +312,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-8" } ] @@ -366,8 +322,6 @@ "snapshots": [ { "us-east-2": "snap-06d634d448d684fba", - "us-west-2": "snap-059f151fb3be40498", - "us-west-1": "snap-011826a57a02a31e2", "us-east-1": "snap-0759f03d54c2138cc" } ] @@ -378,8 +332,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-8", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-9-8", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-9-8", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-8" } ] @@ -395,8 +347,6 @@ "snapshots": [ { "us-east-2": "snap-01e565f1a0c5d3f2c", - "us-west-2": "snap-0ed9ca01d9c27755c", - "us-west-1": "snap-05fcaa3d397e7e507", "us-east-1": "snap-046e472e93ae1ad2b" } ] @@ -407,8 +357,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-20" } ] @@ -419,8 +367,6 @@ "snapshots": [ { "us-east-2": "snap-0de936ce723f9582c", - "us-west-2": "snap-0380b8972fb9f088e", - "us-west-1": "snap-0178b8609cb5396ab", "us-east-1": "snap-02f3a73aef1b80ffe" } ] @@ -431,8 +377,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-20", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-20", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-20", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-20" } ] @@ -452,8 +396,6 @@ "snapshots": [ { "us-east-2": "snap-01d7f772d9d5f1ea3", - "us-west-2": "snap-020d067046649e955", - "us-west-1": "snap-099642ac75a58729b", "us-east-1": "snap-09a3ae3234cb7dbfe" } ] @@ -464,8 +406,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-2-2", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-2-2", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-2-2", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-2-2" } ] From 610b35f257ffee76eb80241080a95b8eead57b5a Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 11 Jan 2024 12:42:09 +0200 Subject: [PATCH 140/152] fix west regions --- app/util/k8s/dcapt-snapshots.json | 8 -------- 1 file changed, 8 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 84b5404f3..0b282324b 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -10,8 +10,6 @@ "snapshots": [ { "us-east-2": "snap-084abf5dfca234b9d", - "us-west-2": "snap-0dab6fba99c0995b9", - "us-west-1": "snap-0061ace6d46497f56", "us-east-1": "snap-0934c1aa5c62be5dc" } ] @@ -22,8 +20,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-14", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-14", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-14", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-14" } ] @@ -34,8 +30,6 @@ "snapshots": [ { "us-east-2": "snap-00af725c87690569d", - "us-west-2": "snap-06e76435bf1cba625", - "us-west-1": "snap-098b6daa64c6f0a28", "us-east-1": "snap-04417460cb27d17cb" } ] @@ -46,8 +40,6 @@ "snapshots": [ { "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-14", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-14", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-14", "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-14" } ] From a3446cc64e139ad1fa7c02787f3f1c3a77d7437e Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Thu, 11 Jan 2024 17:41:02 +0200 Subject: [PATCH 141/152] bamboo/fix-ds-generator-pom-file --- app/util/bamboo/bamboo_dataset_generator/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/run.sh b/app/util/bamboo/bamboo_dataset_generator/run.sh index 1c48a8197..6667246c5 100755 --- a/app/util/bamboo/bamboo_dataset_generator/run.sh +++ b/app/util/bamboo/bamboo_dataset_generator/run.sh @@ -1,3 +1,3 @@ #!/bin/sh [ -z "$BAMBOO_TOKEN" ] && echo "BAMBOO_TOKEN is not set" && exit -mvn compile exec:java -Dexec.mainClass="bamboogenerator.Main" +mvn compile exec:java -Dexec.mainClass="bamboogenerator.Main" -Dexec.cleanupDaemonThreads=false From 1f57db1cbac9cb5171a7c5f0194ee46e31a3f6f4 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Thu, 11 Jan 2024 17:13:19 +0100 Subject: [PATCH 142/152] Increase exec env resources --- app/util/k8s/dcapt.tfvars | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 1d7c0a667..8fc03c955 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -87,8 +87,8 @@ max_cluster_capacity = 6 start_test_deployment = "true" test_deployment_cpu_request = "3" test_deployment_cpu_limit = "4" -test_deployment_mem_request = "6Gi" -test_deployment_mem_limit = "6Gi" +test_deployment_mem_request = "8Gi" +test_deployment_mem_limit = "8Gi" ################################################################################ # Jira/JSM Settings @@ -127,7 +127,7 @@ jira_installation_timeout = 25 # Jira/JSM instance resource configuration jira_cpu = "6" -jira_mem = "20Gi" +jira_mem = "16Gi" jira_min_heap = "12288m" jira_max_heap = "12288m" jira_reserved_code_cache = "2048m" @@ -181,7 +181,7 @@ confluence_installation_timeout = 30 # Confluence instance resource configuration confluence_cpu = "6" -confluence_mem = "20Gi" +confluence_mem = "16Gi" confluence_min_heap = "12288m" confluence_max_heap = "12288m" @@ -419,17 +419,17 @@ bamboo_max_heap = "4096m" # Bamboo Agent instance resource configuration bamboo_agent_cpu = "250m" -bamboo_agent_mem = "1000m" +bamboo_agent_mem = "700m" # Storage bamboo_local_home_size = "200Gi" bamboo_shared_home_size = "400Gi" # Bamboo NFS instance resource configuration -#bamboo_nfs_requests_cpu = "" -#bamboo_nfs_requests_memory = "" -#bamboo_nfs_limits_cpu = "" -#bamboo_nfs_limits_memory = "" +bamboo_nfs_requests_cpu = "1" +bamboo_nfs_requests_memory = "1Gi" +bamboo_nfs_limits_cpu = "2" +bamboo_nfs_limits_memory = "2Gi" # RDS instance configurable attributes. Note that the allowed value of allocated storage and iops may vary based on instance type. # You may want to adjust these values according to your needs. From d8dfb27ab545c4ed8171d1b5eee5e96272c407a4 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Thu, 11 Jan 2024 23:43:39 +0200 Subject: [PATCH 143/152] added internal termination --- app/util/k8s/terminate_cluster.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 6b17ac833..0e389bf18 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -29,8 +29,9 @@ def is_float(element): def retrieve_environment_name(cluster_name): if cluster_name.endswith('-cluster'): cluster_name = cluster_name[:-(len('-cluster'))] - environment_name = cluster_name.replace('atlas-', '') - return environment_name + if cluster_name.startswith('atlas-'): + cluster_name = cluster_name[len('atlas-'):] + return cluster_name def wait_for_node_group_delete(eks_client, cluster_name, node_group): @@ -978,6 +979,8 @@ def main(): vpc_name = f'{cluster_name.replace("-cluster", "-vpc")}' terminate_vpc(vpc_name=vpc_name) terminate_open_id_providers(cluster_name=cluster_name) + delete_s3_bucket_tf_state(cluster_name=cluster_name) + delete_dynamo_bucket_tf_state(cluster_name=cluster_name, aws_region=args.aws_region) vpcs = get_vpcs_to_terminate() for vpc_name in vpcs: logging.info(f"Delete all resources for vpc {vpc_name}.") From db18be423469a2b08529543e6004fc0886b6422f Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Fri, 12 Jan 2024 11:17:57 +0200 Subject: [PATCH 144/152] added catch response --- app/locustio/jira/http_actions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/locustio/jira/http_actions.py b/app/locustio/jira/http_actions.py index 9d588b241..a4de72341 100644 --- a/app/locustio/jira/http_actions.py +++ b/app/locustio/jira/http_actions.py @@ -843,7 +843,7 @@ def kanban_board(locust, board_id): # 1055 /rest/greenhopper/1.0/xboard/work/transitions.json locust.get(f'/rest/greenhopper/1.0/xboard/work/transitions.json?' f'projectId={project_id}' - f'&_={timestamp_int()}') + f'&_={timestamp_int()}', catch_response=True) # 1060 /rest/analytics/1.0/publish/bulk locust.post('/rest/analytics/1.0/publish/bulk', From 22d99b8facc89f2f8d0562bdd5e48e4bb33f8763 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Fri, 12 Jan 2024 15:57:24 +0100 Subject: [PATCH 145/152] Update tfvars versions for release 8.0.0 --- app/util/k8s/dcapt-small.tfvars | 6 +++--- app/util/k8s/dcapt.tfvars | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 87ae0c6dd..08f058722 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -87,10 +87,10 @@ jira_image_repository = "atlassian/jira-software" # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # Jira version. -jira_version_tag = "9.4.10" +jira_version_tag = "9.12.1" # JSM version # ! REQUIRED for JSM ! -# jira_version_tag = "5.4.10" +# jira_version_tag = "5.12.1" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large jira_dataset_size = "small" @@ -228,7 +228,7 @@ confluence_collaborative_editing_enabled = true ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "8.9.5" +bitbucket_version_tag = "8.9.8" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "small" diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 8fc03c955..12c83da8a 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -108,11 +108,11 @@ jira_image_repository = "atlassian/jira-software" # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # Jira version -jira_version_tag = "9.4.10" +jira_version_tag = "9.12.1" # JSM version # ! REQUIRED for JSM ! -# jira_version_tag = "5.4.10" +# jira_version_tag = "5.12.1" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large. jira_dataset_size = "large" @@ -231,7 +231,7 @@ confluence_collaborative_editing_enabled = true ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "8.9.5" +bitbucket_version_tag = "8.9.8" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "large" From f6d7ea9bfa27eb893ba4e57d513913f589ee1e53 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Sun, 14 Jan 2024 22:14:23 +0200 Subject: [PATCH 146/152] fix resolution jira request issue --- app/locustio/common_utils.py | 4 ++++ app/locustio/jira/requests_params.py | 3 ++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/app/locustio/common_utils.py b/app/locustio/common_utils.py index 5034d361b..93ca24099 100644 --- a/app/locustio/common_utils.py +++ b/app/locustio/common_utils.py @@ -125,6 +125,10 @@ def failure_check(self, response, action_name): if hasattr(response, 'error') or not response: if 'login' in action_name: self.login_failed = True + try: + logger.error(response.json()) + except json.decoder.JSONDecodeError: + logger.error(response.text()) events.request.fire(request_type="Action", name=f"locust_{action_name}", response_time=0, diff --git a/app/locustio/jira/requests_params.py b/app/locustio/jira/requests_params.py index dee8dc1a1..b1c20a6b8 100644 --- a/app/locustio/jira/requests_params.py +++ b/app/locustio/jira/requests_params.py @@ -83,6 +83,7 @@ def prepare_issue_body(issue_body_dict: dict, user): form_token = issue_body_dict['form_token'] issue_type = issue_body_dict['issue_type'] resolution_done = issue_body_dict['resolution_done'] + resolution_string = f'&resolution={resolution_done}' if resolution_done else '' fields_to_retain = issue_body_dict['fields_to_retain'] custom_fields_to_retain = issue_body_dict['custom_fields_to_retain'] @@ -91,7 +92,7 @@ def prepare_issue_body(issue_body_dict: dict, user): f"&description={description}&timetracking_originalestimate={timetracking_originalestimate}" \ f"&timetracking_remainingestimate={timetracking_remainingestimate}" \ f"&is_create_issue={is_create_issue}" \ - f"&hasWorkStarted={has_work_started}&resolution={resolution_done}" + f"&hasWorkStarted={has_work_started}{resolution_string}" fields_to_retain_body = '' custom_fields_to_retain_body = '' for field in fields_to_retain: From 1fd8c557573922162dd682500f294c03334a5ba5 Mon Sep 17 00:00:00 2001 From: Serhii Moroz Date: Mon, 15 Jan 2024 23:38:46 +0200 Subject: [PATCH 147/152] fix related comment --- app/locustio/common_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/app/locustio/common_utils.py b/app/locustio/common_utils.py index 93ca24099..bb13802f2 100644 --- a/app/locustio/common_utils.py +++ b/app/locustio/common_utils.py @@ -125,10 +125,8 @@ def failure_check(self, response, action_name): if hasattr(response, 'error') or not response: if 'login' in action_name: self.login_failed = True - try: + if response.headers.get('Content-Type') == 'application/json': logger.error(response.json()) - except json.decoder.JSONDecodeError: - logger.error(response.text()) events.request.fire(request_type="Action", name=f"locust_{action_name}", response_time=0, From 26eabb3320606335464e398cee012dfd2355258e Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Tue, 16 Jan 2024 12:04:47 +0100 Subject: [PATCH 148/152] add .gitattributes file to set line endings unix style --- .gitattributes | 1 + 1 file changed, 1 insertion(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..390d21218 --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +*.sh text eol=lf \ No newline at end of file From f7bab31028551443e472a5722151334d6f001c81 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Tue, 16 Jan 2024 16:17:55 +0100 Subject: [PATCH 149/152] Update workdir syntax for windows git bash users --- app/util/k8s/README.MD | 5 +++-- docs/dc-apps-performance-toolkit-user-guide-bamboo.md | 4 ++-- ...dc-apps-performance-toolkit-user-guide-bitbucket.md | 10 ++++++---- ...c-apps-performance-toolkit-user-guide-confluence.md | 10 ++++++---- docs/dc-apps-performance-toolkit-user-guide-crowd.md | 10 ++++++---- docs/dc-apps-performance-toolkit-user-guide-jira.md | 8 ++++---- docs/dc-apps-performance-toolkit-user-guide-jsm.md | 10 ++++++---- 7 files changed, 33 insertions(+), 24 deletions(-) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index b721e0a98..f612d687a 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -74,7 +74,7 @@ export REGION=us-east-2 ``` bash docker run --pull=always --env-file aws_envs \ ---workdir="/data-center-terraform" \ +--workdir="//data-center-terraform" \ --entrypoint="python" \ -v "/$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION @@ -99,4 +99,5 @@ Connect to the product pod. Example below for jira pod with number 0. For other ``` bash export PRODUCT_POD=jira-0 aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION -kubectl exec -it $PRODUCT_POD -n atlassian -- bash \ No newline at end of file +kubectl exec -it $PRODUCT_POD -n atlassian -- bash +``` \ No newline at end of file diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 449aa88bc..1cabadf25 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -353,11 +353,11 @@ To generate a performance regression report: - Under `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). - Under `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). - Under `runName: "with app and app-specific actions"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py bamboo_profile.yml ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index c07941920..504a17f93 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -369,10 +369,11 @@ To generate a performance regression report: 1. Edit the `./app/reports_generation/performance_profile.yml` file: - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` @@ -486,10 +487,11 @@ To generate a scalability report: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 745a091cd..1f34efd3a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -448,10 +448,11 @@ To generate a performance regression report: 1. Edit the `./app/reports_generation/performance_profile.yml` file: - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` @@ -576,10 +577,11 @@ To generate a scalability report: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 10bee3047..1edd592a4 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -221,10 +221,11 @@ To generate a performance regression report: 1. Edit the `./app/reports_generation/performance_profile.yml` file: - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` @@ -374,10 +375,11 @@ To generate a scalability report: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index b7b963767..7c964cdfa 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -498,11 +498,11 @@ To generate a performance regression report: 1. Edit the `./app/reports_generation/performance_profile.yml` file: - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` @@ -627,11 +627,11 @@ To generate a scalability report: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index a5b098ba4..2243e4add 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -542,10 +542,11 @@ To generate a performance regression report: 1. Edit the `./app/reports_generation/performance_profile.yml` file: - For `runName: "without app"`, in the `relativePath` key, insert the relative path to results directory of [Run 1](#regressionrun1). - For `runName: "with app"`, in the `relativePath` key, insert the relative path to results directory of [Run 2](#regressionrun2). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py performance_profile.yml ``` @@ -669,10 +670,11 @@ To generate a scalability report: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). -1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command to generate reports: ``` bash +1. Navigate locally to `dc-app-performance-toolkit` folder and run the following command from local terminal (Git Bash for Windows users) to generate reports: + ``` bash docker run --pull=always \ -v "/$PWD:/dc-app-performance-toolkit" \ - --workdir="/dc-app-performance-toolkit/app/reports_generation" \ + --workdir="//dc-app-performance-toolkit/app/reports_generation" \ --entrypoint="python" \ -it atlassian/dcapt csv_chart_generator.py scale_profile.yml ``` From d4599aca9f19596f0a038d87f9e0ddc9763bc4e8 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Tue, 16 Jan 2024 17:10:14 +0100 Subject: [PATCH 150/152] Fix docs --- docs/dc-apps-performance-toolkit-user-guide-bamboo.md | 2 +- docs/dc-apps-performance-toolkit-user-guide-crowd.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 1cabadf25..b82dec709 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -210,7 +210,7 @@ Note, that `locust_app_specific_action` action execution will start in some time --- -### 3. Setting up load configuration for Enterprise-scale runs +## 3. Setting up load configuration for Enterprise-scale runs Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 1edd592a4..4f2747322 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -114,7 +114,7 @@ Data Center App Performance Toolkit has its own set of default [JMeter](https:// --- -### 3. Setting up load configuration for Enterprise-scale runs +## 3. Setting up load configuration for Enterprise-scale runs Default TerraForm deployment [configuration](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/dcapt.tfvars) already has a dedicated execution environment pod to run tests from. For more details see `Execution Environment Settings` section in `dcapt.tfvars` file. From a4400d4ee9d46be56ca32611519f606fec14c361 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 17 Jan 2024 12:42:42 +0100 Subject: [PATCH 151/152] Release 8.0.0 --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/crowd.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 4046cbd2e..7378db9b8 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. # Action percentage for JMeter load executor view_all_builds: 15 diff --git a/app/bitbucket.yml b/app/bitbucket.yml index a32a43492..34bc62a23 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. services: - module: shellexec diff --git a/app/confluence.yml b/app/confluence.yml index 59c7e9d42..0624d14c8 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. extended_metrics: False # Action percentage for JMeter and Locust load executors diff --git a/app/crowd.yml b/app/crowd.yml index d36f529dc..33c6de9d7 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -32,7 +32,7 @@ settings: JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. services: - module: shellexec diff --git a/app/jira.yml b/app/jira.yml index 237e88522..10868e875 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. # Action percentage for Jmeter and Locust load executors create_issue: 4 diff --git a/app/jsm.yml b/app/jsm.yml index e768bd49a..2c90b5724 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -23,7 +23,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. environment_compliance_check: True # Pre-test environment compliance validation. Set to "False" to skip it. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 From e3812032a3292eaeea242e6fdd1fb52ba1b79fba Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 17 Jan 2024 18:00:21 +0100 Subject: [PATCH 152/152] Fix readme versions, fix comment in bamboo.yml --- README.md | 6 +++--- app/bamboo.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 127409ed8..82df5131c 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,13 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.1` and `9.4.10` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.1` and `9.4.14` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.1` and `5.4.10` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.1` and `5.4.14` * Supported Confluence versions: - * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.14` and `8.5.4` + * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.5.4` and `7.19.17` * Supported Bitbucket Server versions: * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.8` and `7.21.20` diff --git a/app/bamboo.yml b/app/bamboo.yml index 7378db9b8..b4dbfb335 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -13,7 +13,7 @@ settings: admin_login: admin admin_password: admin load_executor: jmeter - concurrency: 200 # number of Bamboo users for Selenium actions + concurrency: 200 # number of Bamboo users for Jmeter actions test_duration: 45m ramp-up: 5m # time to spin all concurrent threads total_actions_per_hour: 2000 # number of total JMeter actions per hour