diff --git a/CODEOWNERS b/CODEOWNERS index db4dbae86..c4a79c422 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # Default code owners - Atlassian Data Center App Performance Toolkit -* @ometelytsia @SergeyMoroz0703 @opopovss @OlehStefanyshyn @dmika1 \ No newline at end of file +* @ometelytsia @SergeyMoroz0703 @OlehStefanyshyn \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 00cc2f782..6eb75064f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # bzt run: docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml # interactive run: docker run -it --entrypoint="/bin/bash" -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt -FROM python:3.11-slim +FROM python:3.11-slim-bullseye ENV APT_INSTALL="apt-get -y install --no-install-recommends" diff --git a/README.md b/README.md index 9785d2c88..2286db13e 100644 --- a/README.md +++ b/README.md @@ -5,22 +5,22 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.20.22` and `9.4.6` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.4.8` and `8.20.24` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `4.20.22` and `5.4.6` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.4.8` and `4.20.24` * Supported Confluence versions: - * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.9`, `7.13.17` and `8.1.4` platform release + * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.11`, `7.13.18` and `8.1.4` platform release * Supported Bitbucket Server versions: - * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.21.11`, `7.17.16`, and `8.8.3` platform release. + * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.21.14` and `8.9.2` * Supported Crowd versions: - * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.0.5` + * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.4` * Supported Bamboo versions: - * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.1` + * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.3` ## Support In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. @@ -95,7 +95,7 @@ git --version ``` We recommend using [virtualenv](https://virtualenv.pypa.io/en/latest/) for Taurus. See example setup below. -## Example setup for clean Ubuntu 20.04 +## Example setup for clean Ubuntu 22.04 JDK setup (if missing): ``` sudo apt-get update diff --git a/app/bamboo.yml b/app/bamboo.yml index 2ff62cfa7..32f0ef018 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -18,7 +18,7 @@ settings: ramp-up: 5m # time to spin all concurrent threads total_actions_per_hour: 2000 # number of total JMeter actions per hour WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor @@ -49,7 +49,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: jmeter executor: jmeter diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 9e3495c6a..a01519649 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -18,7 +18,7 @@ settings: ramp-up: 10m # time to spin all concurrent users total_actions_per_hour: 32700 WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: @@ -36,7 +36,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor} concurrency: ${concurrency} diff --git a/app/confluence.yml b/app/confluence.yml index d80d0cd02..54403ab72 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -18,7 +18,7 @@ settings: ramp-up: 5m # time to spin all concurrent users total_actions_per_hour: 20000 WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/crowd.yml b/app/crowd.yml index 992736743..4493a48de 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 4990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /crowd in case of url like http://localhost:4990/crowd + application_postfix: /crowd # e.g. /crowd in case of url like http://localhost:4990/crowd admin_login: admin admin_password: admin application_name: crowd @@ -30,7 +30,7 @@ settings: # ramp-up: 5s # time to spin all concurrent threads # total_actions_per_hour: 720000 # number of total JMeter actions per hour - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: diff --git a/app/jira.yml b/app/jira.yml index b21d24016..1dc2d0e9d 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. @@ -18,7 +18,7 @@ settings: ramp-up: 3m # time to spin all concurrent users total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jmeter/confluence.jmx b/app/jmeter/confluence.jmx index dd3b3a954..60127d893 100644 --- a/app/jmeter/confluence.jmx +++ b/app/jmeter/confluence.jmx @@ -5362,7 +5362,7 @@ if (response.contains("Successfully added emoji")) { - ${application.postfix}/rest/emoticons/1.0/custom/delete-your-upload/${emoji_shortcut} + ${application.postfix}/rest/emoticons/1.0/custom/delete-your-upload?shortcut=${emoji_shortcut} DELETE true false diff --git a/app/jsm.yml b/app/jsm.yml index 5d9ad18e2..db0c188dc 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. @@ -21,7 +21,7 @@ settings: total_actions_per_hour_customers: 15000 insight: False # Set True to enable Insight specific tests WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors @@ -67,7 +67,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor}_agents executor: ${load_executor} diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index 4e5952549..60839ce1a 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -4,19 +4,17 @@ import functools import json import os +import re import sys -import time from datetime import timezone -import re from pprint import pprint +from time import sleep, time import filelock import pytest from selenium.common.exceptions import WebDriverException -from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options -from time import sleep from util.conf import CONFLUENCE_SETTINGS, JIRA_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, BAMBOO_SETTINGS from util.exceptions import WebDriverExceptionPostpone @@ -24,7 +22,7 @@ JIRA_DATASET_PROJECTS, JIRA_DATASET_SCRUM_BOARDS, JIRA_DATASET_USERS, JIRA_DATASET_CUSTOM_ISSUES, BITBUCKET_USERS, \ BITBUCKET_PROJECTS, BITBUCKET_REPOS, BITBUCKET_PRS, CONFLUENCE_BLOGS, CONFLUENCE_PAGES, CONFLUENCE_CUSTOM_PAGES, \ CONFLUENCE_USERS, ENV_TAURUS_ARTIFACT_DIR, JSM_DATASET_REQUESTS, JSM_DATASET_CUSTOMERS, JSM_DATASET_AGENTS, \ - JSM_DATASET_SERVICE_DESKS_L, JSM_DATASET_SERVICE_DESKS_M, JSM_DATASET_SERVICE_DESKS_S, JSM_DATASET_CUSTOM_ISSUES,\ + JSM_DATASET_SERVICE_DESKS_L, JSM_DATASET_SERVICE_DESKS_M, JSM_DATASET_SERVICE_DESKS_S, JSM_DATASET_CUSTOM_ISSUES, \ JSM_DATASET_INSIGHT_SCHEMAS, JSM_DATASET_INSIGHT_ISSUES, BAMBOO_USERS, BAMBOO_BUILD_PLANS SCREEN_WIDTH = 1920 @@ -134,7 +132,7 @@ def wrapper(*args, **kwargs): if globals.login_failed: pytest.skip("login is failed") node_ip = "" - start = time.time() + start = time() error_msg = 'Success' full_exception = '' if args: @@ -148,14 +146,14 @@ def wrapper(*args, **kwargs): # https://docs.python.org/2/library/sys.html#sys.exc_info exc_type, full_exception = sys.exc_info()[:2] error_msg = f"Failed measure: {interaction} - {exc_type.__name__}" - end = time.time() + end = time() timing = str(int((end - start) * 1000)) lockfile = f'{selenium_results_file}.lock' with filelock.SoftFileLock(lockfile): with open(selenium_results_file, "a+") as jtl_file: - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) if explicit_timing: jtl_file.write(f"{timestamp},{explicit_timing*1000},{interaction},,{error_msg}," f",{success},0,0,0,0,,0\n") @@ -178,10 +176,8 @@ def wrapper(*args, **kwargs): def webdriver(app_settings): def driver_init(): chrome_options = Options() - capabilities = DesiredCapabilities.CHROME - capabilities["goog:loggingPrefs"] = {"performance": "ALL"} if app_settings.webdriver_visible and is_docker(): - raise SystemExit("ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") + raise Exception("ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") if not app_settings.webdriver_visible: chrome_options.add_argument("--headless") if not app_settings.secure: @@ -191,7 +187,8 @@ def driver_init(): chrome_options.add_argument("--disable-infobars") chrome_options.add_argument('lang=en') chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'}) - driver = Chrome(options=chrome_options, desired_capabilities=capabilities) + chrome_options.set_capability('goog:loggingPrefs', {'performance': 'ALL'}) + driver = Chrome(options=chrome_options) driver.app_settings = app_settings return driver @@ -271,7 +268,7 @@ def get_wait_browser_metrics(webdriver, expected_metrics): return data print(f'Waiting for browser metrics, attempt {i}, sleep {sleep_time}') - time.sleep(sleep_time) + sleep(sleep_time) return {} @@ -287,7 +284,7 @@ def measure_dom_requests(webdriver, interaction, description=''): success = True with filelock.SoftFileLock(lockfile): with open(selenium_results_file, "a+") as jtl_file: - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) jtl_file.write( f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") print(f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") @@ -351,7 +348,7 @@ def measure_browser_navi_metrics(webdriver, dataset, expected_metrics): for metric in metrics: interaction = metric['key'] ready_for_user_timing = metric['ready_for_user'] - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) node_ip = webdriver.node_ip jtl_file.write( f"{timestamp},{ready_for_user_timing},{interaction},,{error_msg},,{success},0,0,0,0,{node_ip},0\n") @@ -430,7 +427,7 @@ def get_screen_shots(request, webdriver): action_name = request.node.rep_call.head_line error_text = request.node.rep_call.longreprtext with open(selenium_error_file, mode) as err_file: - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) dt = datetime.datetime.now() utc_time = dt.replace(tzinfo=timezone.utc) str_time = utc_time.strftime("%m-%d-%Y, %H:%M:%S") diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index c93ecf88c..f0e4b5b1a 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -36,7 +36,7 @@ def open_profile_menu(self): def logout(self): self.get_element(TopPanelSelectors.logout_button).click() - self.wait_until_invisible(TopPanelSelectors.profile_icon) + self.wait_until_visible(LoginPageLocators.login_field) class CustomerPortals(BasePage): diff --git a/app/util/bamboo/bamboo_dataset_generator/README.md b/app/util/bamboo/bamboo_dataset_generator/README.md index 5a9a55bb1..d5731a06c 100644 --- a/app/util/bamboo/bamboo_dataset_generator/README.md +++ b/app/util/bamboo/bamboo_dataset_generator/README.md @@ -7,6 +7,11 @@ Configuration located inside: [src/main/java/bamboogenerator/Main.java](src/main **Client Configuration** - `BAMBOO_SERVER_URL` - the URL of Bamboo + + For TerraForm deployment URL should have port and postfix + ``` + BAMBOO_SERVER_URL = "http://my-bamboo.amazonaws.com:80/bamboo" + ``` - `ADMIN_USER_NAME` - the username of admin account diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index f09606be9..823c8fab9 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -62,7 +62,7 @@ com.google.guava guava - 29.0-jre + 32.0.0-jre com.fasterxml.jackson.core diff --git a/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java b/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java index 2a543cf5a..f27a1fff3 100644 --- a/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java +++ b/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java @@ -27,7 +27,9 @@ public class Main { private static final Logger LOG = LoggerFactory.getLogger(Main.class); - private static final String BAMBOO_SERVER_URL = "http://0.0.0.0:8085"; + // e.g. for TerraForm deployment: BAMBOO_SERVER_URL = "http://my-babmoo.amazonaws.com:80/bamboo" + // e.g. for localhost deployment: BAMBOO_SERVER_URL = "http://0.0.0.0:8085" + private static final String BAMBOO_SERVER_URL = "http://my-babmoo.amazonaws.com:80/bamboo"; private static final String ADMIN_USER_NAME = "admin"; // NOTE: Please make sure you haven't changed these values after initial run diff --git a/app/util/conf.py b/app/util/conf.py index a021d4995..db0c93623 100644 --- a/app/util/conf.py +++ b/app/util/conf.py @@ -2,7 +2,7 @@ from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML -TOOLKIT_VERSION = '7.4.1' +TOOLKIT_VERSION = '7.5.0' UNSUPPORTED_VERSION = '6.3.0' diff --git a/app/util/jira/populate_db.sh b/app/util/jira/populate_db.sh index cce3cdb88..1ee377dc5 100644 --- a/app/util/jira/populate_db.sh +++ b/app/util/jira/populate_db.sh @@ -43,8 +43,8 @@ JIRA_DB_PASS="Password1!" # Jira/JSM supported versions -SUPPORTED_JIRA_VERSIONS=(8.20.22 9.4.6) -SUPPORTED_JSM_VERSIONS=(4.20.22 5.4.6) +SUPPORTED_JIRA_VERSIONS=(8.20.24 9.4.8) +SUPPORTED_JSM_VERSIONS=(4.20.24 5.4.8) SUPPORTED_VERSIONS=("${SUPPORTED_JIRA_VERSIONS[@]}") # JSM section diff --git a/app/util/jira/upload_attachments.sh b/app/util/jira/upload_attachments.sh index f5616ad84..695289770 100644 --- a/app/util/jira/upload_attachments.sh +++ b/app/util/jira/upload_attachments.sh @@ -29,8 +29,8 @@ JIRA_VERSION_FILE="/media/atl/jira/shared/jira-software.version" # Jira/JSM supported versions -SUPPORTED_JIRA_VERSIONS=(8.20.22 9.4.6) -SUPPORTED_JSM_VERSIONS=(4.20.22 5.4.6) +SUPPORTED_JIRA_VERSIONS=(8.20.24 9.4.8) +SUPPORTED_JSM_VERSIONS=(4.20.24 5.4.8) SUPPORTED_VERSIONS=("${SUPPORTED_JIRA_VERSIONS[@]}") if [[ ${jsm} == 1 ]]; then diff --git a/app/util/jmeter/start_jmeter_ui.py b/app/util/jmeter/start_jmeter_ui.py index 13c4e4a98..fa1ddefe5 100644 --- a/app/util/jmeter/start_jmeter_ui.py +++ b/app/util/jmeter/start_jmeter_ui.py @@ -137,7 +137,7 @@ def print_settings(self, settings): print(setting.replace('\n', '')) def launch_jmeter_ui(self): - jmeter_path = JMETER_HOME / self.env_settings['JMETER_VERSION'] / 'bin' / 'jmeter' + jmeter_path = JMETER_HOME / str(self.env_settings['JMETER_VERSION']) / 'bin' / 'jmeter' command = [str(jmeter_path), "-p", str(PROPERTIES), "-t", str(self.jmx)] print("JMeter start command: {}".format(' '.join(command))) print("Working dir: {}".format(APP_DIR)) diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD new file mode 100644 index 000000000..d612862e4 --- /dev/null +++ b/app/util/k8s/README.MD @@ -0,0 +1,73 @@ +# Development environment +## Create development environment +* set AWS credential in [aws_envs](./aws_envs) file +* set correct values in [dcapt-small.tfvars](./dcapt-small.tfvars) file: + * `environment_name` + * `products` + * `license` +* run install development environment command: +``` bash +docker run --pull=always --env-file aws_envs \ +-v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./install.sh -c config.tfvars +``` +## Terminate development environment +``` bash +docker run --pull=always --env-file aws_envs \ +-v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars +``` + +# Enterprise-scale environment +## Create enterprise-scale environment +* set AWS credential in [aws_envs](./aws_envs) file +* set correct values in [dcapt.tfvars](./dcapt.tfvars) file: + * `environment_name` + * `products` + * `license` +* run install enterprise-scale environment command: +``` bash +docker run --pull=always --env-file aws_envs \ +-v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./install.sh -c config.tfvars +``` +## Terminate enterprise-scale environment +``` bash +docker run --pull=always --env-file aws_envs \ +-v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars +``` + +# Collect detailed k8s logs +Set AWS credential in [aws_envs](./aws_envs) file and run command: +``` bash +export ENVIRONMENT_NAME=your_environment_name +export REGION=us-east-2 + +docker run --pull=always --env-file aws_envs \ +-v "$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs +``` + +# Force terminate cluster +Set AWS credential in [aws_envs](./aws_envs) file and run command: +``` bash +export ENVIRONMENT_NAME=your_environment_name +export REGION=us-east-2 + +docker run --pull=always --env-file aws_envs \ +--workdir="/data-center-terraform" \ +--entrypoint="python" \ +-v "$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ +atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION +``` + +# Non default product version or aws region +File [dcapt-snapshots.json](./dcapt-snapshots.json) has all available RDS and EBS snapshots IDs for all supported product +versions and AWS regions. +Set `version_tag`, `shared_home_snapshot_id` and `db_snapshot_id` values correspondingly to product version and region. \ No newline at end of file diff --git a/app/util/k8s/aws_envs b/app/util/k8s/aws_envs new file mode 100644 index 000000000..6e515f78e --- /dev/null +++ b/app/util/k8s/aws_envs @@ -0,0 +1,3 @@ +# aws_envs file should contain AWS variables needed for authorization (without quotes) +AWS_ACCESS_KEY_ID=abc +AWS_SECRET_ACCESS_KEY=efg diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 91ba64bd0..c50270074 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -85,34 +85,34 @@ jira_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # # Jira version -jira_version_tag = "9.4.6" +jira_version_tag = "9.4.8" # JSM version -# jira_version_tag = "5.4.6" +# jira_version_tag = "5.4.8" # Shared home restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # -# Jira 9.4.6 DCAPT small dataset EBS snapshot - jira_shared_home_snapshot_id = "snap-0651a00c1234ca355" -# Jira 8.20.22 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-060af366a595cf019" -# JSM 5.4.6 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0b989ff3e3236e707" -# JSM 4.20.22 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-02cf7f70e3872320f" +# Jira 9.4.8 DCAPT small dataset EBS snapshot + jira_shared_home_snapshot_id = "snap-0005a8c3cc297b294" +# Jira 8.20.24 DCAPT small dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0c3cb60ddc50c1136" +# JSM 5.4.8 DCAPT small dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-02f299ef7f1f524b2" +# JSM 4.20.24 DCAPT small dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0971e128b8d1d2af9" # Database restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. # -# Jira 9.4.6 DCAPT small dataset RDS snapshot - jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-6" -# Jira 8.20.22 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-22" -# JSM 5.4.6 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-6" -# JSM 4.20.22 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-22" +# Jira 9.4.8 DCAPT small dataset RDS snapshot + jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-8" +# Jira 8.20.24 DCAPT small dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-24" +# JSM 5.4.8 DCAPT small dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-8" +# JSM 4.20.24 DCAPT small dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-24" # Helm chart version of Jira # jira_helm_chart_version = "" @@ -160,6 +160,10 @@ jira_db_name = "jira" jira_db_master_username = "atljira" jira_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# jira_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Confluence Settings ################################################################################ @@ -177,32 +181,32 @@ confluence_license = "confluence-license" confluence_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "7.19.9" +confluence_version_tag = "7.19.11" # Shared home restore configuration. # 8.1.4 DCAPT small dataset EBS snapshot # confluence_shared_home_snapshot_id = "snap-0815ada397b953b93" -# 7.19.9 DCAPT small dataset EBS snapshot -confluence_shared_home_snapshot_id = "snap-04a21b844f187b645" -# 7.13.17 DCAPT small dataset EBS snapshot -# confluence_shared_home_snapshot_id = "snap-02107cfb60888ccbf" +# 7.19.11 DCAPT small dataset EBS snapshot +confluence_shared_home_snapshot_id = "snap-00ede7dca448a6243" +# 7.13.18 DCAPT small dataset EBS snapshot +# confluence_shared_home_snapshot_id = "snap-055811dae848f13ae" # Database restore configuration. # Build number stored within the snapshot and Confluence license are also required, so that Confluence can be fully setup prior to start. # 8.1.4 DCAPT small dataset RDS snapshot # confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-1-4" -# 7.19.9 DCAPT small dataset RDS snapshot -confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-9" -# 7.13.17 DCAPT small dataset RDS snapshot -# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-17" +# 7.19.11 DCAPT small dataset RDS snapshot +confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-11" +# 7.13.18 DCAPT small dataset RDS snapshot +# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-18" # Build number for a specific Confluence version can be found in the link below: # https://developer.atlassian.com/server/confluence/confluence-build-information -# 8.1.1 +# 8.1.4 # confluence_db_snapshot_build_number = "9003" -# 7.19.6 +# 7.19.11 confluence_db_snapshot_build_number = "8804" -# 7.13.14 +# 7.13.18 # confluence_db_snapshot_build_number = "8703" # Helm chart version of Confluence @@ -258,6 +262,9 @@ confluence_db_master_password = "Password1!" # Enables Collaborative editing in Confluence confluence_collaborative_editing_enabled = true +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# confluence_custom_values_file = "/path/to/values.yaml" ################################################################################ # Bitbucket Settings @@ -276,27 +283,23 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.11" +bitbucket_version_tag = "7.21.14" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT small dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-01f510d0c4405ce78" -# 8.8.3 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-01f510d0c4405ce78" -# 7.17.16 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-0c945dbcbd3e4ebff" +# 7.21.14 DCAPT small dataset EBS snapshot +bitbucket_shared_home_snapshot_id = "snap-03893c494ba7edcf4" +# 8.9.2 DCAPT small dataset EBS snapshot +#bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT small dataset RDS snapshot - bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11" -# 8.8.3 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-8-3" -# 7.17.16 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-17-16" +# 7.21.14 DCAPT small dataset RDS snapshot +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" +# 8.9.2 DCAPT small dataset RDS snapshot +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" @@ -369,3 +372,7 @@ bitbucket_db_name = "bitbucket" # If password is not provided, a random password will be generated. bitbucket_db_master_username = "atlbitbucket" bitbucket_db_master_password = "Password1!" + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# bitbucket_custom_values_file = "/path/to/values.yaml" diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 524066431..51e46840f 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -2,17 +2,17 @@ "jira": { "versions": [ { - "version": "9.4.6", + "version": "9.4.8", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-8" } ] }, @@ -21,10 +21,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-8" } ] }, @@ -33,10 +33,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-00dbfdccd242ed1b3", - "us-east-2": "snap-051b68559232b9c52", - "us-west-1": "snap-06680b532a5124558", - "us-west-2": "snap-02a7ba9c66ef7867a" + "us-east-1": "snap-0640210f62a262aaf", + "us-east-2": "snap-0d619095feaa2eca5", + "us-west-1": "snap-00f6a0fc8ba4c4cce", + "us-west-2": "snap-0d23a05be5f527030" } ] }, @@ -45,27 +45,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0d43933fdd4672594", - "us-east-2": "snap-0651a00c1234ca355", - "us-west-1": "snap-008c03d701800ff3f", - "us-west-2": "snap-0c47ff88937f1a169" + "us-east-1": "snap-05a61b57dbb4f9834", + "us-east-2": "snap-0005a8c3cc297b294", + "us-west-1": "snap-0dfb346bb01f4709a", + "us-west-2": "snap-0c17be9ae98bbd1ed" } ] } ] }, { - "version": "8.20.22", + "version": "8.20.24", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-8-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-8-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-8-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-8-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-8-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-8-20-24" } ] }, @@ -74,10 +74,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-8-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-8-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-8-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-8-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-8-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-8-20-24" } ] }, @@ -86,10 +86,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0d641c27d1335f1e4", - "us-east-2": "snap-07eabc725b2784dd8", - "us-west-1": "snap-021b200aee83f4c42", - "us-west-2": "snap-0c447a319a3062d00" + "us-east-1": "snap-029edbed07ab594e0", + "us-east-2": "snap-0b5f4473954e6d959", + "us-west-1": "snap-0ffaa992ba449a53d", + "us-west-2": "snap-01e69efc1d8943038" } ] }, @@ -98,10 +98,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-08fa076e03f4d7489", - "us-east-2": "snap-060af366a595cf019", - "us-west-1": "snap-033251c6028eb205a", - "us-west-2": "snap-09919bd45a9bf234c" + "us-east-1": "snap-083d2d8f5797f907e", + "us-east-2": "snap-0c3cb60ddc50c1136", + "us-west-1": "snap-07de609e058d28a03", + "us-west-2": "snap-01fa045458071eda5" } ] } @@ -112,17 +112,17 @@ "jsm": { "versions": [ { - "version": "5.4.6", + "version": "5.4.8", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-8" } ] }, @@ -131,10 +131,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-8" } ] }, @@ -143,10 +143,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-08c764486b2faa09a", - "us-east-2": "snap-0a65d52f20fc43d4e", - "us-west-1": "snap-05eff0f22140b59e8", - "us-west-2": "snap-0b32ffd526d769aea" + "us-east-1": "snap-0a13271b63872a2a6", + "us-east-2": "snap-0fb58e8d005edeb32", + "us-west-1": "snap-05d6aa53717fb3c6c", + "us-west-2": "snap-043842d9319f25659" } ] }, @@ -155,27 +155,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0390ceabfc3292865", - "us-east-2": "snap-0b989ff3e3236e707", - "us-west-1": "snap-03a0c97a99f84fcf9", - "us-west-2": "snap-0528f3b63c100cf48" + "us-east-1": "snap-08bc969471d0ee11f", + "us-east-2": "snap-02f299ef7f1f524b2", + "us-west-1": "snap-0fa5b8bd27f66e6c3", + "us-west-2": "snap-063211f90e6d81bbd" } ] } ] }, { - "version": "4.20.22", + "version": "4.20.24", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-4-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-4-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-4-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-4-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-4-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-4-20-24" } ] }, @@ -184,10 +184,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-4-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-4-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-4-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-4-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-4-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-4-20-24" } ] }, @@ -196,10 +196,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0ff68d6d06e69fffb", - "us-east-2": "snap-02cf7f70e3872320f", - "us-west-1": "snap-076395fc4d09c9020", - "us-west-2": "snap-0f148cab48b6efea3" + "us-east-1": "snap-0c95405b316f28ec8", + "us-east-2": "snap-0cae5febc8127250b", + "us-west-1": "snap-081f548dda005c97e", + "us-west-2": "snap-08e23754ddc402ec4" } ] }, @@ -208,10 +208,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0ac5442e3b594cbd2", - "us-east-2": "snap-0b9d0488da3b12e0a", - "us-west-1": "snap-08bc0efc05e414230", - "us-west-2": "snap-0a05f25582e8ccd9f" + "us-east-1": "snap-0d933d20b989beb7b", + "us-east-2": "snap-0971e128b8d1d2af9", + "us-west-1": "snap-0c6d8b6aa53b93e78", + "us-west-2": "snap-0734518fb7d55f7ce" } ] } @@ -222,17 +222,17 @@ "confluence": { "versions": [ { - "version": "7.19.9", + "version": "7.19.11", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-9", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-9", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-9", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-9" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-11", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-11", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-11", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-11" } ] }, @@ -241,10 +241,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-9", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-9", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-9", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-9" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-11", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-11", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-11", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-11" } ] }, @@ -253,10 +253,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-036dea82bc99ce12c", - "us-east-2": "snap-0bd74575c95014c10", - "us-west-1": "snap-0312783bdbba635ae", - "us-west-2": "snap-01e1d66f4372a9f03" + "us-east-1": "snap-03b3a8541b7466ec3", + "us-east-2": "snap-09365c581a158a979", + "us-west-1": "snap-01bc9fdb49bc6641e", + "us-west-2": "snap-061919924738ea4c3" } ] }, @@ -265,27 +265,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0bf417b074f07022f", - "us-east-2": "snap-04a21b844f187b645", - "us-west-1": "snap-047a000c8a98ef356", - "us-west-2": "snap-0374a93b1da17e01d" + "us-east-1": "snap-0dad75f94da7f317b", + "us-east-2": "snap-00ede7dca448a6243", + "us-west-1": "snap-0f72ad2146e3a19c3", + "us-west-2": "snap-09ff2c4be549518a0" } ] } ] }, { - "version": "7.13.17", + "version": "7.13.18", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-13-17", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-17", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-13-17", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-13-17" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-13-18", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-18", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-13-18", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-13-18" } ] }, @@ -294,10 +294,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-13-17", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-17", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-13-17", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-13-17" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-13-18", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-18", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-13-18", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-13-18" } ] }, @@ -306,10 +306,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0cca34405ae9b37e5", - "us-east-2": "snap-08abae6cf1937e958", - "us-west-1": "snap-0f6ea8d66f79f2e74", - "us-west-2": "snap-023c61d7135250437" + "us-east-1": "snap-08156f8bb0099942f", + "us-east-2": "snap-04cc3d8455b1ef6e9", + "us-west-1": "snap-039e3a985cf126fc0", + "us-west-2": "snap-0ce8a0947cd581752" } ] }, @@ -318,10 +318,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-045cd3b59a68e281d", - "us-east-2": "snap-02107cfb60888ccbf", - "us-west-1": "snap-051a51af7954fe568", - "us-west-2": "snap-05978ae2c8615fb38" + "us-east-1": "snap-01df9653b5c8f9f64", + "us-east-2": "snap-055811dae848f13ae", + "us-west-1": "snap-058e37561a1cce3e9", + "us-west-2": "snap-0bb261f0b3266d136" } ] } @@ -385,17 +385,17 @@ "bitbucket": { "versions": [ { - "version": "7.17.16", + "version": "7.21.14", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-17-16", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-17-16", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-17-16", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-17-16" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-14", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" } ] }, @@ -404,10 +404,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-17-16", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-17-16", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-17-16", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-17-16" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-14", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" } ] }, @@ -416,10 +416,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0849ecbad09d7aa15", - "us-east-2": "snap-06fceac7bdcc3844c", - "us-west-1": "snap-06147e64d7ae138ff", - "us-west-2": "snap-04f83a4e7360b64c9" + "us-east-1": "snap-01873c2840b4dd3c3", + "us-east-2": "snap-0ccb8c3d34ff171f1", + "us-west-1": "snap-0cde4bd0ed0358d0e", + "us-west-2": "snap-0f8b60b668f3bbb66" } ] }, @@ -428,27 +428,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-017f427062d1eef34", - "us-east-2": "snap-0c945dbcbd3e4ebff", - "us-west-1": "snap-0bef4d24419315e7d", - "us-west-2": "snap-07826f85a1de3ccf6" + "us-east-1": "snap-0a869d88cb2829bc4", + "us-east-2": "snap-03893c494ba7edcf4", + "us-west-1": "snap-020733b644be01f62", + "us-west-2": "snap-07e784e26a282e18c" } ] } ] }, { - "version": "7.21.11", + "version": "8.9.2", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-11", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-11", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-11", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-11" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-2", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-9-2", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" } ] }, @@ -457,10 +457,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-11", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-11", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-2", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-2", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" } ] }, @@ -469,10 +469,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-05a4660e8b2b672cc", - "us-east-2": "snap-0456406e413ff835b", - "us-west-1": "snap-037d014b99517b76a", - "us-west-2": "snap-08f8db72740f75e87" + "us-east-1": "snap-0c5d82e754dd8f536", + "us-east-2": "snap-0d933f3d40000e877", + "us-west-1": "snap-039620171723e6e2c", + "us-west-2": "snap-01a121265b13ee758" } ] }, @@ -481,63 +481,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-08d61da85a886e5cc", - "us-east-2": "snap-0a6edc9184118695c", - "us-west-1": "snap-0cb0694c018915313", - "us-west-2": "snap-0be44afd82c89b714" - } - ] - } - ] - }, - { - "version": "8.8.3", - "data": [ - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-8-3", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-8-3", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-8-3", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-8-3" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-8-3", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-8-3", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-8-3", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-8-3" - } - ] - }, - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-1": "snap-0c5a9cd13b3259403", - "us-east-2": "snap-04138d264fb24f2e7", - "us-west-1": "snap-0d24266fe20d821f7", - "us-west-2": "snap-09d3c3152fc0123ce" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-1": "snap-009ef66688cdb0919", - "us-east-2": "snap-01f510d0c4405ce78", - "us-west-1": "snap-0b35c2aed07dd644b", - "us-west-2": "snap-0c5e05bb471a65d37" + "us-east-1": "snap-039acd2608c3bce3f", + "us-east-2": "snap-0fb8cd6bf387057c0", + "us-west-1": "snap-012dc94feaaa30490", + "us-west-2": "snap-0a0f1b03ec0e8bf36" } ] } @@ -548,17 +495,17 @@ "crowd": { "versions": [ { - "version": "5.0.5", + "version": "5.1.4", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-0-5", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-0-5", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-0-5", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-0-5" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-1-4", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-4", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-1-4", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-1-4" } ] }, @@ -567,10 +514,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-07b043e36e6b0df13", - "us-east-2": "snap-0da31ed523c51a0af", - "us-west-1": "snap-065aeb9f22113f544", - "us-west-2": "snap-0c70c43eba3e0e5a0" + "us-east-1": "", + "us-east-2": "snap-0a8e229690be9ae30", + "us-west-1": "", + "us-west-2": "" } ] } diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 5c88e5812..4d59db180 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -31,7 +31,7 @@ resource_tags = {Name: "dcapt-testing"} # Instance types that is preferred for EKS node group. # Confluence, Bamboo, Jira - use default value # Bitbucket - ["m5.4xlarge"] -# Crowd - ["c5.xlarge"] +# Crowd - ["m5.xlarge"] # ! REQUIRED ! instance_types = ["m5.2xlarge"] instance_disk_size = 100 @@ -84,34 +84,34 @@ jira_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # # Jira version -jira_version_tag = "9.4.6" +jira_version_tag = "9.4.8" # JSM version -# jira_version_tag = "5.4.6" +# jira_version_tag = "5.4.8" # Shared home restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # -# Jira 9.4.6 DCAPT large dataset EBS snapshot -jira_shared_home_snapshot_id = "snap-051b68559232b9c52" -# Jira 8.20.22 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-07eabc725b2784dd8" -# JSM 5.4.6 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0a65d52f20fc43d4e" -# JSM 4.20.22 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-02cf7f70e3872320f" +# Jira 9.4.8 DCAPT large dataset EBS snapshot +jira_shared_home_snapshot_id = "snap-0d619095feaa2eca5" +# Jira 8.20.24 DCAPT large dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0b5f4473954e6d959" +# JSM 5.4.8 DCAPT large dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0fb58e8d005edeb32" +# JSM 4.20.24 DCAPT large dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0cae5febc8127250b" # Database restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. # -# Jira 9.4.6 DCAPT large dataset RDS snapshot -jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-6" -# Jira 8.20.22 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-22" -# JSM 5.4.6 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-6" -# JSM 4.20.22 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-22" +# Jira 9.4.8 DCAPT large dataset RDS snapshot +jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-8" +# Jira 8.20.24 DCAPT large dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-24" +# JSM 5.4.8 DCAPT large dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-8" +# JSM 4.20.24 DCAPT large dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-24" # Helm chart version of Jira # jira_helm_chart_version = "" @@ -153,6 +153,10 @@ jira_db_name = "jira" jira_db_master_username = "atljira" jira_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# jira_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Confluence Settings ################################################################################ @@ -170,17 +174,17 @@ confluence_license = "confluence-license" confluence_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "7.19.9" +confluence_version_tag = "7.19.11" # Shared home restore configuration. # Make sure confluence version set in `confluence_version_tag` match the snapshot version. # # 8.1.4 DCAPT large dataset EBS snapshot # confluence_shared_home_snapshot_id = "snap-0125fdfcf37dabef5" -# 7.19.9 DCAPT large dataset EBS snapshot -confluence_shared_home_snapshot_id = "snap-0bd74575c95014c10" -# 7.13.17 DCAPT large dataset EBS snapshot -# confluence_shared_home_snapshot_id = "snap-08abae6cf1937e958" +# 7.19.11 DCAPT large dataset EBS snapshot +confluence_shared_home_snapshot_id = "snap-09365c581a158a979" +# 7.13.18 DCAPT large dataset EBS snapshot +# confluence_shared_home_snapshot_id = "snap-04cc3d8455b1ef6e9" # Database restore configuration. # Make sure confluence version set in `confluence_version_tag` match the snapshot version. @@ -188,18 +192,18 @@ confluence_shared_home_snapshot_id = "snap-0bd74575c95014c10" # # 8.1.4 DCAPT large dataset RDS snapshot # confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-1-4" -# 7.19.9 DCAPT large dataset RDS snapshot -confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-9" -# 7.13.17 DCAPT large dataset RDS snapshot -# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-17" +# 7.19.11 DCAPT large dataset RDS snapshot +confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-11" +# 7.13.18 DCAPT large dataset RDS snapshot +# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-18" # Build number for a specific Confluence version can be found in the link below: # https://developer.atlassian.com/server/confluence/confluence-build-information # 8.1.4 # confluence_db_snapshot_build_number = "9003" -# 7.19.9 +# 7.19.11 confluence_db_snapshot_build_number = "8804" -# 7.13.17 +# 7.13.18 # confluence_db_snapshot_build_number = "8703" # Helm chart version of Confluence @@ -208,7 +212,7 @@ confluence_db_snapshot_build_number = "8804" # Installation timeout # Different variables can influence how long it takes the application from installation to ready state. These # can be dataset restoration, resource requirements, number of replicas and others. -confluence_installation_timeout = 25 +confluence_installation_timeout = 30 # Confluence instance resource configuration confluence_cpu = "4" @@ -249,6 +253,14 @@ confluence_db_master_password = "Password1!" # Enables Collaborative editing in Confluence confluence_collaborative_editing_enabled = true +# Use AWS S3 to store attachments. See: https://confluence.atlassian.com/doc/configuring-s3-object-storage-1206794554.html +# Terraform will automatically create S3 bucket, IAM role and policy +#confluence_s3_attachments_storage = true + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# confluence_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Bitbucket Settings ################################################################################ @@ -266,27 +278,23 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.11" +bitbucket_version_tag = "7.21.14" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT large dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-0456406e413ff835b" -# 8.8.3 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-04138d264fb24f2e7" -# 7.17.16 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-06fceac7bdcc3844c" +# 7.21.14 DCAPT large dataset EBS snapshot +bitbucket_shared_home_snapshot_id = "snap-0ccb8c3d34ff171f1" +# 8.9.2 DCAPT large dataset EBS snapshot +#bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT large dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-11" -# 8.8.3 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-8-3" -# 7.17.16 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-17-16" +# 7.21.14 DCAPT large dataset RDS snapshot +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" +# 8.9.2 DCAPT large dataset RDS snapshot +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" @@ -357,6 +365,10 @@ bitbucket_db_name = "bitbucket" bitbucket_db_master_username = "atlbitbucket" bitbucket_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# bitbucket_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Crowd Settings ################################################################################ @@ -374,7 +386,7 @@ crowd_license = "crowd-license" crowd_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -crowd_version_tag = "5.0.5" +crowd_version_tag = "5.1.4" # Dataset Restore @@ -383,8 +395,8 @@ crowd_version_tag = "5.0.5" # This volume will be mounted to the NFS server and used when the product is started. # Make sure the snapshot is available in the region you are deploying to and it follows all product requirements. # -# Crowd 5.0.5 DCAPT large dataset EBS snapshot -crowd_shared_home_snapshot_id = "snap-0da31ed523c51a0af" +# Crowd 5.1.4 DCAPT large dataset EBS snapshot +crowd_shared_home_snapshot_id = "snap-0a8e229690be9ae30" # Database restore configuration # If you want to restore the database from a snapshot, uncomment the following line and provide the snapshot identifier. @@ -392,9 +404,9 @@ crowd_shared_home_snapshot_id = "snap-0da31ed523c51a0af" # The snapshot should be in the same AWS account and region as the environment to be deployed. # Please also provide crowd_db_master_username and crowd_db_master_password that matches the ones in snapshot # -# Crowd 5.0.2 DCAPT large dataset RDS snapshot -crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-0-5" -crowd_db_snapshot_build_number = "1794" +# Crowd 5.1.4 DCAPT large dataset RDS snapshot +crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-4" +crowd_db_snapshot_build_number = "1893" # Helm chart version of Crowd and Crowd agent instances. By default the latest version is installed. # crowd_helm_chart_version = "" @@ -406,19 +418,19 @@ crowd_installation_timeout = 20 # Crowd instance resource configuration crowd_cpu = "2" -crowd_mem = "3Gi" -crowd_min_heap = "1024m" -crowd_max_heap = "1024m" +crowd_mem = "8Gi" +crowd_min_heap = "2048m" +crowd_max_heap = "2048m" # Storage -crowd_local_home_size = "10Gi" -crowd_shared_home_size = "10Gi" +crowd_local_home_size = "15Gi" +crowd_shared_home_size = "15Gi" # Crowd NFS instance resource configuration crowd_nfs_requests_cpu = "1" crowd_nfs_requests_memory = "1Gi" crowd_nfs_limits_cpu = "1" -crowd_nfs_limits_memory = "1Gi" +crowd_nfs_limits_memory = "2Gi" # RDS instance configurable attributes. Note that the allowed value of allocated storage and iops may vary based on instance type. # You may want to adjust these values according to your needs. @@ -443,6 +455,10 @@ crowd_termination_grace_period = 0 crowd_db_master_username = "atlcrowd" crowd_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# crowd_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Bamboo Settings ################################################################################ @@ -457,8 +473,8 @@ bamboo_license = "bamboo-license" # By default, latest supported by DCAPT version is set. # https://hub.docker.com/r/atlassian/bamboo/tags # https://hub.docker.com/r/atlassian/bamboo-agent-base/tags -bamboo_version_tag = "9.2.1" -bamboo_agent_version_tag = "9.2.1" +bamboo_version_tag = "9.2.3" +bamboo_agent_version_tag = "9.2.3" # Helm chart version of Bamboo and Bamboo agent instances # bamboo_helm_chart_version = "" @@ -530,3 +546,32 @@ bamboo_db_name = "bamboo" # See https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bamboo # bamboo_dataset_url = "https://centaurus-datasets.s3.amazonaws.com/bamboo/dcapt-bamboo.zip" + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# bamboo_custom_values_file = "/path/to/values.yaml" + +################################################################################ +# Monitoring settings +################################################################################ + +# Deploy https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack Helm chart +# to kube-monitoring namespace. Defaults to false. +# monitoring_enabled = true + +# Create Grafana service of LoadBalancer type. Defaults to false. To restric access to LB URL +# the list of CIRDs from whitelist_cidr will be automatically applied. +# monitoring_grafana_expose_lb = true + +# Prometheus Persistent Volume Claim size. Defaults to 10Gi. +# Out of the box EKS cluster is created with gp2 storage class which does not allow volume expansion, +# i.e. if you expect a high volume of metrics or metrics with high cardinality it is recommended +# to override the default Prometheus 10Gi PVC storage request when creating enabling monitoring for the first time. +# prometheus_pvc_disk_size = "100Gi" + +# Grafana Persistent Volume Claim size. Defaults to 10Gi. +# grafana_pvc_disk_size = "20Gi" + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# monitoring_custom_values_file = "/path/to/values.yaml" diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py new file mode 100644 index 000000000..e8a17cb60 --- /dev/null +++ b/app/util/k8s/terminate_cluster.py @@ -0,0 +1,731 @@ +import logging +from argparse import ArgumentParser +from datetime import datetime, timedelta +from time import sleep, time + +import boto3 +import botocore +from boto3.exceptions import Boto3Error +from botocore import exceptions + +US_EAST_2 = "us-east-2" +US_EAST_1 = "us-east-1" +REGIONS = [US_EAST_2, US_EAST_1] + + +def is_float(element): + try: + float(element) + return True + except ValueError: + return False + + +def wait_for_node_group_delete(eks_client, cluster_name, node_group): + timeout = 900 # 15 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status_info = eks_client.describe_nodegroup(clusterName=cluster_name, nodegroupName=node_group)['nodegroup'] + except eks_client.exceptions.ResourceNotFoundException: + logging.info(f"Node group {node_group} for cluster {cluster_name} was successfully deleted.") + break + if status_info['status'] == "DELETING": + logging.info(f"Node group {node_group} for cluster {cluster_name} status is {status_info['status']}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"Node group {node_group} for cluster {cluster_name} has " + f"unexpected status: {status_info['status']}.") + logging.error(f"Health status: {status_info['health']}") + return + else: + logging.error(f"Node group {node_group} for cluster {cluster_name} was not deleted in {timeout} seconds.") + + +def wait_for_cluster_delete(eks_client, cluster_name): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = eks_client.describe_cluster(name=cluster_name)['cluster']['status'] + except eks_client.exceptions.ResourceNotFoundException: + logging.info(f"Cluster {cluster_name} was successfully deleted.") + break + logging.info(f"Cluster {cluster_name} status is {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"Cluster {cluster_name} was not deleted in {timeout} seconds.") + + +def wait_for_rds_delete(rds_client, db_name): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = \ + rds_client.describe_db_instances(DBInstanceIdentifier=db_name)['DBInstances'][0]['DBInstanceStatus'] + except rds_client.exceptions.DBInstanceNotFoundFault: + logging.info(f"RDS {db_name} was successfully deleted.") + break + logging.info(f"RDS {db_name} status is {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"RDS {db_name} was not deleted in {timeout} seconds.") + + +def delete_nodegroup(aws_region, cluster_name): + try: + eks_client = boto3.client('eks', region_name=aws_region) + autoscaling_client = boto3.client('autoscaling', region_name=aws_region) + node_groups = eks_client.list_nodegroups(clusterName=cluster_name)['nodegroups'] + + if node_groups: + for node_group in node_groups: + autoscaling_group_name = None + try: + autoscaling_group_name = eks_client.describe_nodegroup( + clusterName=cluster_name, + nodegroupName=node_group)['nodegroup']['resources']['autoScalingGroups'][0]['name'] + autoscaling_client.delete_auto_scaling_group(AutoScalingGroupName=autoscaling_group_name, + ForceDelete=True) + except Boto3Error as e: + logging.error(f"Deleting autoscaling group {autoscaling_group_name} failed with error: {e}") + + try: + eks_client.delete_nodegroup(clusterName=cluster_name, nodegroupName=node_group) + wait_for_node_group_delete(eks_client, cluster_name, node_group) + except Boto3Error as e: + logging.error(f"Deleting node group {node_group} failed with error: {e}") + else: + logging.info(f"Cluster {cluster_name} does not have nodegroups.") + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + logging.info(f"No cluster found for name: {cluster_name}") + else: + raise e + + +def delete_cluster(aws_region, cluster_name): + try: + eks_client = boto3.client('eks', region_name=aws_region) + eks_client.delete_cluster(name=cluster_name) + wait_for_cluster_delete(eks_client, cluster_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + logging.info(f"No cluster found for name: {cluster_name}") + else: + raise e + + +def delete_lb(aws_region, vpc_id): + elb_client = boto3.client('elb', region_name=aws_region) + try: + lb_names = [lb['LoadBalancerName'] + for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] + if lb['VPCId'] == vpc_id] + except exceptions.EndpointConnectionError as e: + logging.error(f"Could not connect to the ELBv2 endpoint URL: {e}") + return + if lb_names: + for lb_name in lb_names: + try: + logging.info(f"Deleting load balancer: {lb_name} for vpc id: {vpc_id}") + elb_client.delete_load_balancer(LoadBalancerName=lb_name) + except Boto3Error as e: + logging.error(f"Deleting load balancer {lb_name} failed with error: {e}") + + +def wait_for_nat_gateway_delete(ec2, nat_gateway_id): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = ec2.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])['NatGateways'][0]['State'] + except ec2.exceptions.ResourceNotFoundException: + logging.info(f"NAT gateway with id {nat_gateway_id} was not found.") + break + + if status == 'deleted': + logging.info(f"NAT gateway with id {nat_gateway_id} was successfully deleted.") + break + + logging.info(f"NAT gateway with id {nat_gateway_id} status is {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + + else: + logging.error(f"NAT gateway with id {nat_gateway_id} was not deleted in {timeout} seconds.") + + +def delete_nat_gateway(aws_region, vpc_id): + ec2_client = boto3.client('ec2', region_name=aws_region) + filters = [{'Name': 'vpc-id', 'Values': [f'{vpc_id}', ]}, ] + try: + nat_gateway = ec2_client.describe_nat_gateways(Filters=filters) + except exceptions.EndpointConnectionError as e: + logging.error(f"Could not retrieve NAT gateways: {e}") + return + nat_gateway_ids = [nat['NatGatewayId'] for nat in nat_gateway['NatGateways']] + if nat_gateway_ids: + for nat_gateway_id in nat_gateway_ids: + logging.info(f"Deleting NAT gateway with id: {nat_gateway_id}") + try: + ec2_client.delete_nat_gateway(NatGatewayId=nat_gateway_id) + wait_for_nat_gateway_delete(ec2_client, nat_gateway_id) + except Boto3Error as e: + logging.error(f"Deleting NAT gateway with id {nat_gateway_id} failed with error: {e}") + + +def delete_igw(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + igws = vpc_resource.internet_gateways.all() + if igws: + for igw in igws: + for retry in range(5): + try: + logging.info(f"Detaching and Removing igw id: {igw.id}") + igw.detach_from_vpc( + VpcId=vpc_id + ) + igw.delete() + break + except exceptions.ClientError as e: + if "Network vpc-" in str(e) and "has some mapped public address(es)" in str(e): + logging.warning(f"Detaching igw failed with error: {e}. Retrying in 1 minute...") + sleep(120) + except Boto3Error as e: + logging.error(f"Deleting igw failed with error: {e}") + + +def delete_subnets(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + subnets_all = vpc_resource.subnets.all() + subnets = [ec2_resource.Subnet(subnet.id) for subnet in subnets_all] + + if subnets: + try: + for sub in subnets: + logging.info(f"Removing subnet with id: {sub.id}") + sub.delete() + except Boto3Error as e: + logging.error(f"Delete of subnet failed with error: {e}") + + +def delete_route_tables(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + rtbs = vpc_resource.route_tables.all() + if rtbs: + try: + for rtb in rtbs: + if rtb.associations_attribute and rtb.associations_attribute[0]['Main'] == True: + logging.info(f"{rtb.id} is the main route table, skipping...") + continue + logging.info(f"Removing rtb-id: {rtb.id}") + table = ec2_resource.RouteTable(rtb.id) + table.delete() + except Boto3Error as e: + logging.error(f"Delete of route table failed with error: {e}") + + +def delete_security_groups(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + sgps = vpc_resource.security_groups.all() + if sgps: + try: + for sg in sgps: + if sg.group_name == 'default': + logging.info(f"{sg.id} is the default security group, skipping...") + continue + if sg.ip_permissions: + logging.info(f"Removing ingress rules for security group with id: {sg.id}") + sg.revoke_ingress(IpPermissions=sg.ip_permissions) + if sg.ip_permissions_egress: + logging.info(f"Removing egress rules for security group with id: {sg.id}") + sg.revoke_egress(IpPermissions=sg.ip_permissions_egress) + for sg in sgps: + if sg.group_name == 'default': + logging.info(f"{sg.id} is the default security group, skipping...") + continue + logging.info(f"Removing security group with id: {sg.id}") + sg.delete() + except Boto3Error as e: + logging.error(f"Delete of security group failed with error: {e}") + + +def get_vpc_region_by_name(vpc_name): + for rgn in REGIONS: + ec2_resource = boto3.resource('ec2', region_name=rgn) + filters = [{'Name': 'tag:Name', 'Values': [vpc_name]}] + vpc = list(ec2_resource.vpcs.filter(Filters=filters)) + if vpc: + return rgn + logging.info(f"VPC {vpc_name} NOT found in {rgn} region.") + + logging.warning(f"VPC {vpc_name} NOT found in the following regions: {REGIONS}.") + + +def delete_rds(aws_region, vpc_id): + rds_client = boto3.client('rds', region_name=aws_region) + try: + db_instances = rds_client.describe_db_instances()['DBInstances'] + except exceptions.EndpointConnectionError as e: + logging.error(f"Could not connect to the RDS endpoint URL: {e}") + return + db_names_and_subnets = [(db_instance['DBInstanceIdentifier'], db_instance['DBSubnetGroup']['DBSubnetGroupName']) + for db_instance in db_instances + if vpc_id == db_instance['DBSubnetGroup']['VpcId']] + for db_name, subnet_name in db_names_and_subnets: + try: + logging.info(f"Deleting RDS {db_name} for VPC id: {vpc_id}.") + rds_client.delete_db_instance( + DBInstanceIdentifier=db_name, SkipFinalSnapshot=True, DeleteAutomatedBackups=True) + wait_for_rds_delete(rds_client, db_name) + logging.info(f"Deleting RDS subnet group {subnet_name}") + rds_client.delete_db_subnet_group(DBSubnetGroupName=subnet_name) + except Boto3Error as e: + logging.error(f"Delete RDS {db_name} failed with error: {e}") + + +def terminate_vpc(vpc_name, aws_region=None): + if not aws_region: + aws_region = get_vpc_region_by_name(vpc_name) + + if aws_region: + ec2_resource = boto3.resource('ec2', region_name=aws_region) + filters = [{'Name': 'tag:Name', 'Values': [vpc_name]}] + vpc = list(ec2_resource.vpcs.filter(Filters=filters)) + if not vpc: + logging.warning(f"VPC {vpc_name} NOT found in region {aws_region}.") + return + vpc_id = vpc[0].id + logging.info(f"Checking RDS for VPC {vpc_name}.") + delete_rds(aws_region, vpc_id) + + logging.info(f"Checking load balancers for VPC {vpc_name}.") + delete_lb(aws_region, vpc_id) + + logging.info(f"Checking NAT gateway for VPC {vpc_name}.") + delete_nat_gateway(aws_region, vpc_id) + + logging.info(f"Checking internet gateway for VPC {vpc_name}.") + delete_igw(ec2_resource, vpc_id) + + logging.info(f"Checking subnets for VPC {vpc_name}.") + delete_subnets(ec2_resource, vpc_id) + + logging.info(f"Checking route tables for VPC {vpc_name}.") + delete_route_tables(ec2_resource, vpc_id) + + logging.info(f"Checking security groups for VPC {vpc_name}.") + delete_security_groups(ec2_resource, vpc_id) + + logging.info(f"Deleting VPC {vpc_name}.") + try: + ec2_resource.Vpc(vpc_id).delete() + except Boto3Error as e: + logging.error(f"Deleting VPC {vpc_name} failed with error: {e}.") + + logging.info(f"Release EIP for {vpc_name}.") + release_eip(aws_region, vpc_name) + + +def get_cluster_region_by_name(cluster_name): + for rgn in REGIONS: + eks_client = boto3.client('eks', region_name=rgn) + clusters = eks_client.list_clusters()['clusters'] + if cluster_name in clusters: + logging.info(f"Cluster {cluster_name} found in {rgn} region.") + return rgn + else: + logging.info(f"Cluster {cluster_name} NOT found in {rgn} region.") + + logging.warning(f"Cluster {cluster_name} NOT found in the following regions: {REGIONS}.") + + +def terminate_cluster(cluster_name, aws_region=None): + # If no region is provided, get the region by cluster name + if not aws_region: + aws_region = get_cluster_region_by_name(cluster_name) + + if not aws_region: + raise ValueError("Could not determine the AWS region for the given cluster name.") + + # Delete the nodegroup and cluster in the specified region + delete_nodegroup(aws_region, cluster_name) + delete_cluster(aws_region, cluster_name) + + +def release_eip(aws_region, vpc_name): + ec2_client = boto3.client('ec2', region_name=aws_region) + addresses_dict = ec2_client.describe_addresses() + for eip_dict in addresses_dict['Addresses']: + if not eip_dict.get("Tags"): + logging.warning(f"EIP {eip_dict['AllocationId']} does not have tags. Review and terminate manually.") + return + name = next((tag["Value"] for tag in eip_dict["Tags"] if tag["Key"] == "Name"), None) + if name and vpc_name in name: + logging.info(f"Releasing EIP {eip_dict['PublicIp']} with name: {name}") + ec2_client.release_address(AllocationId=eip_dict['AllocationId']) + + +def retrieve_ebs_volumes(aws_region, cluster_name): + ec2 = boto3.resource('ec2', aws_region) + volumes = [] + + # Get all volumes in the region + response = ec2.volumes.all() + + for volume in response: + # Check if the volume is in use + if volume.state == "in-use": + logging.info(f"Volume {volume.id} is in use: skipping") + else: + # Check if the volume has the cluster_name in any of its tag values + cluster_tag = next((tag["Value"] for tag in volume.tags if cluster_name in tag["Value"]), None) + if cluster_tag: + volumes.append(volume.id) + + # Check for 'dynamic-pvc' or 'nfs-shared-home' in the name + name = next((tag["Value"] for tag in volume.tags if tag["Key"] == "Name"), None) + if "dynamic-pvc" in name or "nfs-shared-home" in name: + logging.info(f"Volume {volume.id} is not in use and " + f"has 'dynamic-pvc' or 'nfs-shared-home' in name: deleting...") + volumes.append(volume.id) + + print(f"Found volumes: {volumes}") + return volumes + + +def delete_ebs_volumes_by_id(aws_region, volumes): + ec2 = boto3.resource('ec2', aws_region) + + # Terminate the volumes + for volume_id in volumes: + try: + volume = ec2.Volume(volume_id) + if volume.state == "in-use": + print(f"Volume {volume_id} is in use: skipping") + continue + volume.delete() + print(f"Terminated volume: {volume_id}") + except Exception as e: + print(f"Failed to terminate volume {volume_id}: {e}") + + +def get_clusters_to_terminate(): + clusters_to_terminate = [] + for rgn in REGIONS: + eks_client = boto3.client('eks', region_name=rgn) + clusters = eks_client.list_clusters()['clusters'] + for cluster in clusters: + cluster_info = eks_client.describe_cluster(name=cluster)['cluster'] + created_date = cluster_info['createdAt'] + persist_days = cluster_info['tags'].get('persist_days', 0) + if not is_float(persist_days): + persist_days = 0 + created_date_timestamp = created_date.timestamp() + persist_seconds = float(persist_days) * 24 * 60 * 60 + now = time() + if created_date_timestamp + persist_seconds > now: + logging.info(f"Cluster {cluster} is not EOL yet, skipping...") + else: + logging.info(f"Cluster {cluster} is EOL and should be deleted.") + clusters_to_terminate.append(cluster) + return clusters_to_terminate + + +def terminate_open_id_providers(cluster_name=None): + iam_client = boto3.client('iam') + providers = iam_client.list_open_id_connect_providers()['OpenIDConnectProviderList'] + for provider in providers: + tags = iam_client.list_open_id_connect_provider_tags(OpenIDConnectProviderArn=provider['Arn'])['Tags'] + created_date = iam_client.get_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn'])['CreateDate'] + + name = next((tag["Value"] for tag in tags if tag["Key"] == "Name"), None) + if name and cluster_name and cluster_name in name: + logging.info(f"Deleting Open ID provider with name: {name}") + iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) + return + if name == 'Alfred': + logging.info(f"Skipping Alfred Open ID provider") + continue + persist_days = next((tag["Value"] for tag in tags if tag["Key"] == "persist_days"), None) + if persist_days: + if not is_float(persist_days): + persist_days = 0 + created_date_timestamp = created_date.timestamp() + persist_seconds = float(persist_days) * 24 * 60 * 60 + now = time() + if created_date_timestamp + persist_seconds > now: + logging.info(f"Open ID provider {name} is not EOL yet, skipping...") + else: + logging.info(f"Open ID provider {name} is EOL and should be deleted.") + iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) + + +def retrieve_open_identities(cluster_name, aws_region): + open_identities = [] + + try: + eks_client = boto3.client("eks", region_name=aws_region) + response = eks_client.describe_cluster(name=cluster_name) + + identity_provider = response["cluster"]["identity"]["oidc"]["issuer"] + identity_id = identity_provider.split('/id/')[-1] + open_identities.append(identity_id) + print(f"Open identity providers: {open_identities}") + except Exception as e: + print(f"Failed to retrieve Open identity providers from {cluster_name}. Skipping...") + print(f"Error details: {e}") + + return open_identities + + +def delete_open_identities_for_cluster(open_identities): + if not open_identities: + print("No OpenID Connect providers to delete.") + return + + iam_client = boto3.client('iam') + + for identity in open_identities: + try: + providers = iam_client.list_open_id_connect_providers()['OpenIDConnectProviderList'] + for provider in providers: + provider_identity_id = provider['Arn'].split('/id/')[-1] + if provider_identity_id == identity: + iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) + print(f"Deleted identity provider: {identity}") + else: + print(f"Identity '{identity}' not found in provider '{provider['Arn']}'") + except Exception as e: + print(f"Failed to delete identity provider: {identity}") + print(f"Error details: {e}") + + +def get_vpcs_to_terminate(): + vpcs_to_terminate = [] + for rgn in REGIONS: + ec2_resource = boto3.resource('ec2', region_name=rgn) + vpcs = ec2_resource.vpcs.all() + for vpc in vpcs: + + if vpc.is_default: + logging.info(f"Skipping default VPC for {rgn} region with id: {vpc.id}") + continue + + vpc_name = next((tag["Value"] for tag in vpc.tags if tag["Key"] == "Name"), None) + if "Atlassian-Standard-Infrastructure" in vpc_name: + logging.info(f"Skipping ASI CloudFormation VPC for {rgn} region with id: {vpc.id}") + continue + + # mark for remove all VPC without instances + if not list(vpc.instances.all()): + cluster_name = vpc_name.replace("-vpc", "-cluster") + if cluster_name in boto3.client('eks', region_name=rgn).list_clusters()['clusters']: + logging.info(f"Skipping VPC {vpc_name}, because this vpc has a cluster...") + continue + logging.info(f"VPC {vpc_name} tagged for termination.") + vpcs_to_terminate.append(vpc_name) + + return vpcs_to_terminate + + +def release_unused_eips(): + for rgn in REGIONS: + ec2_client = boto3.client('ec2', region_name=rgn) + addresses_dict = ec2_client.describe_addresses() + for eip_dict in addresses_dict['Addresses']: + if "NetworkInterfaceId" not in eip_dict: + eip_name = next((tag["Value"] for tag in eip_dict["Tags"] if tag["Key"] == "Name"), None) + cluster_name = eip_name.split("-vpc")[0] + "-cluster" + if cluster_name in boto3.client('eks', region_name=rgn).list_clusters()['clusters']: + logging.info(f"Skipping EIP {eip_name}, because this EIP has a cluster...") + continue + logging.info(f"Releasing EIP {eip_dict['PublicIp']} with name: {eip_name}") + ec2_client.release_address(AllocationId=eip_dict['AllocationId']) + + +def role_filter(role): + if role["RoleName"].startswith("atlas-"): + tags = boto3.client("iam").list_role_tags(RoleName=role["RoleName"]) + persist_days = None + for tag in tags["Tags"]: + if tag["Key"] == "persist_days": + try: + persist_days = float(tag["Value"]) + except ValueError: + ... + if persist_days: + eol_time = role['CreateDate'] + timedelta(days=float(persist_days)) + return datetime.now(role['CreateDate'].tzinfo) > eol_time + return False + + +def remove_cluster_specific_roles_and_policies(cluster_name, aws_region): + iam_client = boto3.client("iam", region_name=aws_region) + + # Get and filter roles by cluster name prefix + all_roles = iam_client.list_roles() + cluster_roles = [role for role in all_roles["Roles"] if role["RoleName"].startswith(cluster_name)] + + for role in cluster_roles: + role_name = role["RoleName"] + + attached_policies = iam_client.list_attached_role_policies(RoleName=role_name) + + for policy in attached_policies["AttachedPolicies"]: + # Detach policy from the role + iam_client.detach_role_policy(RoleName=role_name, PolicyArn=policy["PolicyArn"]) + print(f" Detached policy {policy['PolicyName']} from role {role_name}") + + if cluster_name in policy['PolicyName']: + # Delete the policy + iam_client.delete_policy(PolicyArn=policy["PolicyArn"]) + print(f" Deleted policy {policy['PolicyName']}") + + # Delete the role + iam_client.delete_role(RoleName=role_name) + print(f"Deleted Role: {role_name}") + + +def remove_role_and_policies(role_name, active_clusters): + if role_name.startswith(tuple(active_clusters)): + logging.info(f"There is an active cluster which can be using role {role_name}. Skip.") + return + logging.info(f"Role {role_name} is EOL and should be deleted.") + iam_client = boto3.client("iam") + attached_policies = iam_client.list_attached_role_policies(RoleName=role_name).get("AttachedPolicies") + for policy in attached_policies: + logging.info(f"Detach {policy['PolicyArn']} from {role_name}") + iam_client.detach_role_policy(PolicyArn=policy["PolicyArn"], RoleName=role_name) + if policy["PolicyName"].endswith("_Fleet-Enrollment") or policy["PolicyName"].endswith("_LaaS-policy"): + logging.info(f"Delete policy {policy['PolicyName']}") + iam_client.delete_policy(PolicyArn=policy["PolicyArn"]) + logging.info(f"Delete role {role_name}") + iam_client.delete_role(RoleName=role_name) + logging.info(f"Role {role_name} deleted successfully") + + +def get_role_names_to_terminate(): + iam_client = boto3.client("iam") + roles_paginated = iam_client.list_roles(MaxItems=1000) + all_roles = roles_paginated["Roles"] + while roles_paginated.get("Marker"): + roles_paginated = iam_client.list_roles(Marker=roles_paginated["Marker"], MaxItems=1000) + all_roles.extend(roles_paginated["Roles"]) + logging.info(f"Roles count: {len(all_roles)}") + filtered_roles = list(filter(role_filter, all_roles)) + return list(map(lambda role: role["RoleName"], filtered_roles)) + + +def delete_unused_volumes(): + for rgn in REGIONS: + logging.info(f"Region: {rgn}") + ec2_resource = boto3.resource('ec2', region_name=rgn) + volumes = ec2_resource.volumes.all() + # Filter unused volumes + for volume in volumes: + if volume.state == "in-use": + logging.info(f"Volume {volume.id} is in use: skipping") + else: + if not volume.tags: + logging.warning(f"Volume {volume} does not have tags!") + continue + # Delete unused volumes with specific tags or names + persist_days = next((tag["Value"] for tag in volume.tags if tag["Key"] == "persist_days"), None) + if persist_days: + eol_time = volume.create_time + timedelta(days=float(persist_days)) + if datetime.now(volume.create_time.tzinfo) < eol_time: + logging.info(f"Volume {volume.id} is not EOL yet, skipping...") + else: + logging.info(f"Volume {volume.id} is EOL, deleting...") + volume.delete() + else: + name = next((tag["Value"] for tag in volume.tags if tag["Key"] == "Name"), None) + if "dynamic-pvc" or "nfs-shared-home" in name: + logging.info(f"Volume {volume.id} is not in use and " + f"has 'dynamic-pvc' or 'nfs-shared-home' in name: deleting...") + volume.delete() + else: + logging.warning(f"Volume {volume.id} does not have 'persist_days' tag " + f"| Name tag {name}: skipping") + + +def main(): + parser = ArgumentParser() + parser.add_argument("--cluster_name", type=str, help='Cluster name to terminate.') + parser.add_argument('--aws_region', type=str, help='AWS region where the cluster is located (e.g., "us-east-2").') + parser.add_argument('--all', action='store_true', help='Terminate all clusters in all regions.') + args = parser.parse_args() + + if not args.all: + if not args.cluster_name: + raise SystemExit("--cluster_name argument is not provided.") + if not args.aws_region: + raise SystemExit("--aws_region argument is not provided.") + + if args.cluster_name and args.aws_region: + logging.info(f"Delete all resources for cluster {args.cluster_name}.") + open_identities = retrieve_open_identities(cluster_name=args.cluster_name, aws_region=args.aws_region) + terminate_cluster(cluster_name=args.cluster_name, aws_region=args.aws_region) + vpc_name = f'{args.cluster_name.replace("-cluster", "-vpc")}' + logging.info(f"Delete VPC for cluster {args.cluster_name}.") + terminate_vpc(vpc_name=vpc_name, aws_region=args.aws_region) + volumes = retrieve_ebs_volumes(aws_region=args.aws_region, cluster_name=args.cluster_name) + delete_open_identities_for_cluster(open_identities) + remove_cluster_specific_roles_and_policies(cluster_name=args.cluster_name, aws_region=args.aws_region) + delete_ebs_volumes_by_id(aws_region=args.aws_region, volumes=volumes) + return + + logging.info(f"--cluster_name parameter was not specified.") + logging.info("Searching for clusters to remove.") + clusters = get_clusters_to_terminate() + for cluster_name in clusters: + logging.info(f"Delete all resources and VPC for cluster {cluster_name}.") + terminate_cluster(cluster_name=cluster_name) + vpc_name = f'{cluster_name.replace("-cluster", "-vpc")}' + terminate_vpc(vpc_name=vpc_name) + terminate_open_id_providers(cluster_name=cluster_name) + vpcs = get_vpcs_to_terminate() + for vpc_name in vpcs: + logging.info(f"Delete all resources for vpc {vpc_name}.") + terminate_vpc(vpc_name=vpc_name) + logging.info("Release unused EIPs") + release_unused_eips() + logging.info("Terminate open ID providers") + terminate_open_id_providers() + role_names = get_role_names_to_terminate() + active_clusters = [] + for region in REGIONS: + eks_client = boto3.client("eks", region_name=region) + active_clusters.extend(eks_client.list_clusters().get("clusters")) + for role_name in role_names: + remove_role_and_policies(role_name, active_clusters) + logging.info("Terminate unused and expired ebs volumes") + delete_unused_volumes() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index e873b8dc8..0842c9316 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Bamboo @@ -26,44 +26,59 @@ test results for the Marketplace approval process. Preferably, use the below rec ## 1. Set up an enterprise-scale environment Bamboo Data Center on k8s -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Bamboo Data Center environment and AWS on k8s. +#### EC2 CPU Limit +The installation of Bamboo requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. #### Setup Bamboo Data Center with an enterprise-scale dataset on k8s Below process describes how to install Bamboo DC with an enterprise-scale dataset included. This configuration was created specifically for performance testing during the DC app review process. -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bamboo` - `products` - `bamboo` - `bamboo_license` - one-liner of valid bamboo license without spaces and new line symbols - `region` - **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. -9. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. +6. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. +7. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. {{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. +All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} --- @@ -79,12 +94,8 @@ Data dimensions and values for default enterprise-scale dataset uploaded are lis --- -#### Troubleshooting -See [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/) page. - #### Terminate Bamboo Data Center - -Follow steps described on [Uninstallation and cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) page. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. --- @@ -224,7 +235,7 @@ Instead, set those values directly in `.yml` file on execution environment insta application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 8085, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /babmoo in case of url like http://localhost:8085/bamboo + application_postfix: /bamboo # e.g. /babmoo in case of url like http://localhost:8085/bamboo admin_login: admin admin_password: admin load_executor: jmeter @@ -240,7 +251,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) @@ -274,8 +285,7 @@ To receive performance baseline results **without** an app installed and **witho ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bamboo/YY-MM-DD-hh-mm-ss` folder: @@ -298,8 +308,7 @@ the next steps. For an enterprise-scale environment run, the acceptable success ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml ``` {{% note %}} @@ -309,7 +318,7 @@ the next steps. For an enterprise-scale environment run, the acceptable success ##### Run 3 (~50 min) -To receive scalability benchmark results for one-node Bamboo DC **with app** and **with app-specific actions**: +To receive results for Bamboo DC **with app** and **with app-specific actions**: 1. Apply app-specific code changes to a new branch of forked repo. 1. Use SSH to connect to execution environment. @@ -318,8 +327,7 @@ To receive scalability benchmark results for one-node Bamboo DC **with app** and ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml ``` {{% note %}} @@ -366,7 +374,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -384,7 +392,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 32e7cce31..91fc257ea 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Bitbucket @@ -60,9 +60,6 @@ the process can be continued after switching to the `7.1.0` DCAPT version. ### 1. Setting up Bitbucket Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Bitbucket Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -83,39 +80,38 @@ See [Set up an enterprise-scale environment Bitbucket Data Center on AWS](#insta Below process describes how to install low-tier Bitbucket DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bitbucket-small` - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + - `instance_types` - `["t3.2xlarge"]` + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Bitbucket version specified in **bitbucket_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -204,8 +200,26 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Bitbucket Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Bitbucket Data Center environment and AWS on k8s. +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + +#### EC2 CPU Limit +The installation of 4-nodes Bitbucket requires **48** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -232,43 +246,34 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri | Total files number | ~750 000 | -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Bitbucket DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bitbucket-large` - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** - `instance_types` - `["m5.4xlarge"]` -7. Optional variables to override: +5. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Bitbucket version specified in **bitbucket_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. +6. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. {{% note %}} New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). @@ -315,7 +320,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -357,8 +362,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder: @@ -382,8 +386,7 @@ To receive performance results with an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -440,8 +443,7 @@ To receive scalability benchmark results for one-node Bitbucket DC **with** app- ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -452,24 +454,27 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~1 hour) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Bitbucket DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -480,8 +485,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~1 hour) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Bitbucket DC with app-specific actions: @@ -491,8 +496,7 @@ To receive scalability benchmark results for four-node Bitbucket DC with app-spe ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -534,7 +538,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -551,7 +555,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 9b3ef2a7f..d5b37bce9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Confluence @@ -60,9 +60,6 @@ the process can be continued after switching to the `6.3.0` DCAPT version. ### 1. Setting up Confluence Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Confluence Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -83,39 +80,37 @@ See [Set up an enterprise-scale environment Confluence Data Center on AWS](#inst Below process describes how to install low-tier Confluence DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-confluence-small` - `products` - `confluence` - `confluence_license` - one-liner of valid confluence license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -280,12 +275,30 @@ App-specific actions are required. Do not proceed with the next step until you h --- ## Enterprise-scale environment +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. ### 4. Setting up Confluence Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Confluence Data Center environment and AWS on k8s. +#### EC2 CPU Limit +The installation of 4-nodes Confluence requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -312,57 +325,45 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri | Spaces | ~5 000 | | Users | ~5 000 | -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Confluence DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-confluence-large` - `products` - `confluence` - `confluence_license` - one-liner of valid confluence license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use this server id for generation `BX02-9YO1-IN86-LO5G`. + {{% /note %}} + +5. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} +6. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- ### 5. Setting up an execution environment @@ -394,7 +395,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -436,8 +437,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/confluence/YY-MM-DD-hh-mm-ss` folder: @@ -461,8 +461,7 @@ To receive performance results with an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -519,8 +518,7 @@ To receive scalability benchmark results for one-node Confluence DC **with** app ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -531,24 +529,27 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-confluence/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Confluence DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -559,8 +560,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-confluence/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Confluence DC with app-specific actions: @@ -570,8 +571,7 @@ To receive scalability benchmark results for four-node Confluence DC with app-sp ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -613,7 +613,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -630,7 +630,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index e787770a7..98c63c59b 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Crowd @@ -23,45 +23,57 @@ In this document, we cover the use of the Data Center App Performance Toolkit on ## 1. Set up an enterprise-scale environment Crowd Data Center on k8s -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Crowd Data Center environment and AWS on k8s. +#### EC2 CPU Limit +The installation of 4-nodes Crowd requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. #### Setup Crowd Data Center with an enterprise-scale dataset on k8s Below process describes how to install Crowd DC with an enterprise-scale dataset included. This configuration was created specifically for performance testing during the DC app review process. -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-crowd` - `products` - `crowd` - `crowd_license` - one-liner of valid crowd license without spaces and new line symbols - `region` - **Do not change default region (`us-east-2`). If specific region is required, contact support.** - - `instance_types` - `["c5.xlarge"]` -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars + - `instance_types` - `["m5.xlarge"]` + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. +6. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} --- @@ -77,12 +89,8 @@ All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} --- -#### Troubleshooting -See [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/) page. - #### Terminate Crowd Data Center - -Follow steps described on [Uninstallation and cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) page. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. --- @@ -145,7 +153,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -184,8 +192,7 @@ To receive performance baseline results **without** an app installed and **witho ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/crowd/YY-MM-DD-hh-mm-ss` folder: @@ -207,8 +214,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -266,8 +272,7 @@ To receive scalability benchmark results for one-node Crowd DC **with** app-spec ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -278,17 +283,21 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Crowd DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. @@ -310,8 +319,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -322,8 +330,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Crowd DC with app-specific actions: @@ -348,8 +356,7 @@ To receive scalability benchmark results for four-node Crowd DC with app-specifi ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -396,7 +403,8 @@ After completing all your tests, delete your Crowd Data Center stacks. #### Attaching testing results to ECOHELP ticket {{% warning %}} -Do not forget to attach performance testing results to your ECOHELP ticket. +It is recommended to terminate an enterprise-scale environment after completing all tests. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} 1. Make sure you have two reports folders: one with performance profile and second with scale profile results. @@ -407,7 +415,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md index 9ec39830e..b1b5547c7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira (CloudFormation deployment) @@ -71,7 +71,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | Software | -| Version | The Data Center App Performance Toolkit officially supports `8.20.22`, `9.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `8.20.24`, `9.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -171,7 +171,7 @@ Make sure **English (United States)** language is selected as a default language - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:2990/jira. + - `application_postfix`: set to empty for CloudFormation deployment; e.g., /jira for url like this http://localhost:2990/jira. - `admin_login`: admin user username. - `admin_password`: admin user password. - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). @@ -392,7 +392,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended Value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | Software | -| Version | The Data Center App Performance Toolkit officially supports `8.20.22`, `9.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `8.20.24`, `9.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -719,8 +719,8 @@ Instead, set those values directly in `.yml` file on execution environment insta application_hostname: test_jira_instance.atlassian.com # Jira DC hostname without protocol and port e.g. test-jira.atlassian.com or localhost application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira in case of url like http://localhost:2990/jira + secure: True # set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # set to empty for CloudFromation deployment. e.g. /jira in case of url like http://localhost:2990/jira admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. @@ -732,7 +732,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -774,8 +774,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: @@ -817,8 +816,7 @@ If your Amazon RDS DB instance class is lower than `db.m5.xlarge` it is required ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -876,8 +874,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -922,8 +919,7 @@ In case if index synchronization is failed by some reason (e.g. application stat ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` @@ -949,8 +945,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index a76c6abbc..c851159f7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira @@ -46,9 +46,6 @@ DCAPT has fully transitioned to Terraform deployment. If you still wish to use C ### 1. Setting up Jira Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -69,44 +66,42 @@ See [Set up an enterprise-scale environment Jira Data Center on AWS](#instancese Below process describes how to install low-tier Jira DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-jira-small` - `products` - `jira` - `jira_license` - one-liner of valid jira license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Re-index: +7. Re-index (only for Jira 8.x, for Jira 9.x skip this step): - Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. - Select the **Full re-index** option. - Click **Re-Index** and wait until re-indexing is completed (~2s). -10. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -269,12 +264,30 @@ App-specific actions are required. Do not proceed with the next step until you h --- ## Enterprise-scale environment +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. ### 4. Setting up Jira Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Data Center environment and AWS on k8s. +#### EC2 CPU Limit +The installation of 4-nodes Jira requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -318,57 +331,45 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Jira DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: - - `environment_name` - any name for you environment, e.g. `dcapt-jira-large` +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: + - `environment_name` - any name for you environment, e.g. `dcapt-jira` - `products` - `jira` - `jira_license` - one-liner of valid jira license without spaces and new line symbols - - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` 9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} - {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- ### 5. Setting up an execution environment @@ -400,7 +401,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -442,8 +443,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: @@ -474,10 +474,14 @@ This increase in re-index time is due to a known issue which affects Jira 9.4.x, 4. Select the **Full re-index** option. 5. Click **Re-Index** and wait until re-indexing is completed. {{% note %}} -Jira will be temporarily unavailable during the re-indexing process. Once the process is complete, the system will be fully accessible and operational once again. +Jira will be temporarily unavailable during the re-indexing process - "503 Service Temporarily Unavailable" message will be displayed. Once the process is complete, the system will be fully accessible and operational once again. {{% /note %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. +{{% note %}} +Re-index information window is displayed on the **Indexing page**. If the window is not displayed, log in to Jira one more time and navigate to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. If you use the direct link to the **Indexing** page, refresh the page after the re-index is finished. +{{% /note %}} + 7. Attach the screenshot(s) to your ECOHELP ticket. **Performance results generation with the app installed:** @@ -486,8 +490,7 @@ Jira will be temporarily unavailable during the re-indexing process. Once the pr ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -545,8 +548,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -557,25 +559,28 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Jira DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` @@ -587,8 +592,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Jira DC with app-specific actions: @@ -598,8 +603,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -641,7 +645,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -658,7 +662,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md index 0f29a228e..93310e73a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira Service Management (CloudFormation deployment) @@ -71,7 +71,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | ServiceManagement | -| Version | The Data Center App Performance Toolkit officially supports `4.20.22`, `5.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `4.20.24`, `5.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -294,7 +294,7 @@ Make sure **English (United States)** language is selected as a default language - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:2990/jira. + - `application_postfix`: set to empty for CloudFormation deployment; e.g., /jira for url like this http://localhost:2990/jira. - `admin_login`: admin user username. - `admin_password`: admin user password. - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). @@ -553,7 +553,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended Value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | ServiceManagement | -| Version | The Data Center App Performance Toolkit officially supports `4.20.22`, `5.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `4.20.24`, `5.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -874,8 +874,8 @@ Instead, set those values directly in `.yml` file on execution environment insta application_hostname: test_jsm_instance.atlassian.com # Jira Service Management DC hostname without protocol and port e.g. test-jsm.atlassian.com or localhost application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira in case of url like http://localhost:2990/jira + secure: True # set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # set to empty for CloudFromation deploymente.g. /jira in case of url like http://localhost:2990/jira admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. @@ -891,7 +891,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -933,8 +933,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -976,8 +975,7 @@ If your Amazon RDS DB instance class is lower than `db.m5.xlarge` it is required ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -1035,8 +1033,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -1081,8 +1078,7 @@ In case if index synchronization is failed by some reason (e.g. application stat ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -1107,8 +1103,7 @@ To receive scalability benchmark results for four-node Jira Service Management D ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index e674fcfa8..5f6693251 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira Service Management @@ -47,9 +47,6 @@ DCAPT has fully transitioned to Terraform deployment. If you still wish to use C ### 1. Setting up Jira Service Management Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Service Management Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -70,40 +67,38 @@ See [Set up an enterprise-scale environment Jira Service Management Data Center Below process describes how to install low-tier Jira Service Management DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: - - `environment_name` - any name for you environment, e.g. `dcapt-jira-small`. +2. Navigate to `dc-apps-peformance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: + - `environment_name` - any name for you environment, e.g. `dcapt-jsm-small`. - `products` - `jira` - `jira_image_repository` - `atlassian/jira-servicemanagement` - make sure to select the **Jira Service Management** application. - `jira_license` - one-liner of valid Jira Service Management license without spaces and new line symbols. - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -125,7 +120,7 @@ Make sure **English (United States)** language is selected as a default language - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:2990/jira. + - `application_postfix`: /jira # default value for TerraForm deployment; e.g., /jira for url like this http://localhost:2990/jira. - `admin_login`: admin user username. - `admin_password`: admin user password. - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). @@ -305,12 +300,30 @@ App-specific actions are required. Do not proceed with the next step until you h --- ## Enterprise-scale environment +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. ### 4. Setting up Jira Service Management Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Service Management Data Center environment and AWS on k8s. +#### EC2 CPU Limit +The installation of 4-nodes Jira Service Management requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -349,58 +362,46 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Jira Service Management DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: - - `environment_name` - any name for you environment, e.g. `dcapt-jira-large`. +2. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: + - `environment_name` - any name for you environment, e.g. `dcapt-jsm-large`. - `products` - `jira` - `jira_image_repository` - `atlassian/jira-servicemanagement` - make sure to select the **Jira Service Management** application. - `jira_license` - one-liner of valid Jira Service Management license without spaces and new line symbols. - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} +6. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- ### 5. Setting up an execution environment @@ -421,7 +422,7 @@ Instead, set those values directly in `.yml` file on execution environment insta application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. @@ -437,7 +438,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -479,8 +480,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -515,6 +515,10 @@ Jira Service Management will be temporarily unavailable during the re-indexing p {{% /note %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. +{{% note %}} +Re-index information window is displayed on the **Indexing page**. If the window is not displayed, log in to Jira Service Management one more time and navigate to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. If you use the direct link to the **Indexing** page, refresh the page after the re-index is finished. +{{% /note %}} + 7. Attach the screenshot(s) to your ECOHELP ticket. @@ -524,8 +528,7 @@ Jira Service Management will be temporarily unavailable during the re-indexing p ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -583,8 +586,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -595,25 +597,28 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Jira Service Management DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --pull=always --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -624,8 +629,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Jira Service Management DC with app-specific actions: @@ -635,8 +640,7 @@ To receive scalability benchmark results for four-node Jira Service Management D ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -677,7 +681,8 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo 1. Once completed, in the `./reports` folder, you will be able to review action timings on Jira Service Management Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} -After completing all your tests, delete your Jira Service Management Data Center stacks. +It is recommended to terminate an enterprise-scale environment after completing all tests. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -694,7 +699,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..9ecafca3b --- /dev/null +++ b/renovate.json @@ -0,0 +1,7 @@ +{ + "extends": [ + "config:base" + ], + "baseBranches": ["dev"], + "ignorePaths": ["src/test/**"] +} diff --git a/requirements.txt b/requirements.txt index 94d91b740..798691b5d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,11 +1,12 @@ -matplotlib==3.7.1 -pandas==2.0.1 -numpy==1.24.3 -scipy==1.10.1 -pytest==7.3.1 +matplotlib==3.7.2 +pandas==2.0.3 +numpy==1.25.1 +scipy==1.11.1 +pytest==7.4.0 locust==2.15.1 -selenium==4.9.0 -filelock==3.12.0 +selenium==4.10.0 +filelock==3.12.2 packaging==23.1 -prettytable==3.7.0 -bzt==1.16.22 +prettytable==3.8.0 +bzt==1.16.23 +boto3==1.28.11