From edcaf6ac15d17dba4dc8583a002568a8f1f0a83c Mon Sep 17 00:00:00 2001 From: Renovate Bot Date: Sun, 24 Apr 2022 18:56:02 +0000 Subject: [PATCH 01/37] Add renovate.json --- renovate.json | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 renovate.json diff --git a/renovate.json b/renovate.json new file mode 100644 index 000000000..f45d8f110 --- /dev/null +++ b/renovate.json @@ -0,0 +1,5 @@ +{ + "extends": [ + "config:base" + ] +} From 8c4a2e2ae730572147dc171dc2ca45defd6776ee Mon Sep 17 00:00:00 2001 From: Dariusz Mika Date: Fri, 2 Jun 2023 13:35:34 +0200 Subject: [PATCH 02/37] Change SystemExit to Exception on webdriver visible in docker error --- app/selenium_ui/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index 4e5952549..54c48660f 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -181,7 +181,7 @@ def driver_init(): capabilities = DesiredCapabilities.CHROME capabilities["goog:loggingPrefs"] = {"performance": "ALL"} if app_settings.webdriver_visible and is_docker(): - raise SystemExit("ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") + raise Exception("ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") if not app_settings.webdriver_visible: chrome_options.add_argument("--headless") if not app_settings.secure: From 1a598d9f394a356b8d54eb2b7b8ec35db3536dc1 Mon Sep 17 00:00:00 2001 From: opopovss <86659792+opopovss@users.noreply.github.com> Date: Thu, 8 Jun 2023 11:01:32 +0300 Subject: [PATCH 03/37] Changed locator for selenium_log_out action for JSM (#1046) * Changed locator for selenium_log_out action for JSM * uncomment jsm_ui_customers.py after the test run --- app/selenium_ui/jsm/pages/customer_pages.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index c93ecf88c..f0e4b5b1a 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -36,7 +36,7 @@ def open_profile_menu(self): def logout(self): self.get_element(TopPanelSelectors.logout_button).click() - self.wait_until_invisible(TopPanelSelectors.profile_icon) + self.wait_until_visible(LoginPageLocators.login_field) class CustomerPortals(BasePage): From 76ae77185d42850d223ce59b77d49e916b05dd96 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Tue, 13 Jun 2023 14:47:08 +0100 Subject: [PATCH 04/37] DCA-2009 docker TF usage (#1052) --- app/util/k8s/README.MD | 75 +++ app/util/k8s/aws_envs | 3 + app/util/k8s/terminate_cluster.py | 731 ++++++++++++++++++++++++++++++ requirements.txt | 1 + 4 files changed, 810 insertions(+) create mode 100644 app/util/k8s/README.MD create mode 100644 app/util/k8s/aws_envs create mode 100644 app/util/k8s/terminate_cluster.py diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD new file mode 100644 index 000000000..a75f081e5 --- /dev/null +++ b/app/util/k8s/README.MD @@ -0,0 +1,75 @@ +# Development environment +## Create development environment +* set AWS credential in [aws_envs](./aws_envs) file +* set correct values in [dcapt-small.tfvars](./dcapt-small.tfvars) file: + * `environment_name` + * `products` + * `license` +* run install development environment command: +``` bash +docker run --env-file aws_envs \ +-v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/.terraform:/data-center-terraform/.terraform" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./install.sh -c config.tfvars +``` +## Terminate development environment +``` bash +docker run --env-file aws_envs \ +-v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/.terraform:/data-center-terraform/.terraform" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars +``` + +# Enterprise-scale environment +## Create enterprise-scale environment +* set AWS credential in [aws_envs](./aws_envs) file +* set correct values in [dcapt.tfvars](./dcapt.tfvars) file: + * `environment_name` + * `products` + * `license` +* run install enterprise-scale environment command: +``` bash +docker run --env-file aws_envs \ +-v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/.terraform:/data-center-terraform/.terraform" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./install.sh -c config.tfvars +``` +## Terminate enterprise-scale environment +``` bash +docker run --env-file aws_envs \ +-v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ +-v "$PWD/.terraform:/data-center-terraform/.terraform" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars +``` + +# Collect detailed k8s logs +Set AWS credential in [aws_envs](./aws_envs) file and run command: +``` bash +export CLUSTER_NAME=your_cluster_name +export REGION=us-east-2 +docker run --env-file aws_envs \ +-v "$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ +-v "$PWD/logs:/data-center-terraform/logs" \ +-it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh $CLUSTER_NAME $REGION k8s_logs +``` + +# Force terminate cluster +Set AWS credential in [aws_envs](./aws_envs) file and run command: +``` bash +export CLUSTER_NAME=your_cluster_name +export REGION=us-east-2 +docker run --env-file aws_envs \ +--workdir="/data-center-terraform" \ +--entrypoint="python" \ +-v "$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ +atlassian/dcapt terminate_cluster.py --cluster_name $CLUSTER_NAME --aws_region $REGION +``` + +# Non default product version or aws region +File [dcapt-snapshots.json](./dcapt-snapshots.json) has all available RDS and EBS snapshots IDs for all supported product +versions and AWS regions. +Set `version_tag`, `shared_home_snapshot_id` and `db_snapshot_id` values correspondingly to product version and region. \ No newline at end of file diff --git a/app/util/k8s/aws_envs b/app/util/k8s/aws_envs new file mode 100644 index 000000000..6e515f78e --- /dev/null +++ b/app/util/k8s/aws_envs @@ -0,0 +1,3 @@ +# aws_envs file should contain AWS variables needed for authorization (without quotes) +AWS_ACCESS_KEY_ID=abc +AWS_SECRET_ACCESS_KEY=efg diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py new file mode 100644 index 000000000..e8a17cb60 --- /dev/null +++ b/app/util/k8s/terminate_cluster.py @@ -0,0 +1,731 @@ +import logging +from argparse import ArgumentParser +from datetime import datetime, timedelta +from time import sleep, time + +import boto3 +import botocore +from boto3.exceptions import Boto3Error +from botocore import exceptions + +US_EAST_2 = "us-east-2" +US_EAST_1 = "us-east-1" +REGIONS = [US_EAST_2, US_EAST_1] + + +def is_float(element): + try: + float(element) + return True + except ValueError: + return False + + +def wait_for_node_group_delete(eks_client, cluster_name, node_group): + timeout = 900 # 15 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status_info = eks_client.describe_nodegroup(clusterName=cluster_name, nodegroupName=node_group)['nodegroup'] + except eks_client.exceptions.ResourceNotFoundException: + logging.info(f"Node group {node_group} for cluster {cluster_name} was successfully deleted.") + break + if status_info['status'] == "DELETING": + logging.info(f"Node group {node_group} for cluster {cluster_name} status is {status_info['status']}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"Node group {node_group} for cluster {cluster_name} has " + f"unexpected status: {status_info['status']}.") + logging.error(f"Health status: {status_info['health']}") + return + else: + logging.error(f"Node group {node_group} for cluster {cluster_name} was not deleted in {timeout} seconds.") + + +def wait_for_cluster_delete(eks_client, cluster_name): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = eks_client.describe_cluster(name=cluster_name)['cluster']['status'] + except eks_client.exceptions.ResourceNotFoundException: + logging.info(f"Cluster {cluster_name} was successfully deleted.") + break + logging.info(f"Cluster {cluster_name} status is {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"Cluster {cluster_name} was not deleted in {timeout} seconds.") + + +def wait_for_rds_delete(rds_client, db_name): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = \ + rds_client.describe_db_instances(DBInstanceIdentifier=db_name)['DBInstances'][0]['DBInstanceStatus'] + except rds_client.exceptions.DBInstanceNotFoundFault: + logging.info(f"RDS {db_name} was successfully deleted.") + break + logging.info(f"RDS {db_name} status is {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + else: + logging.error(f"RDS {db_name} was not deleted in {timeout} seconds.") + + +def delete_nodegroup(aws_region, cluster_name): + try: + eks_client = boto3.client('eks', region_name=aws_region) + autoscaling_client = boto3.client('autoscaling', region_name=aws_region) + node_groups = eks_client.list_nodegroups(clusterName=cluster_name)['nodegroups'] + + if node_groups: + for node_group in node_groups: + autoscaling_group_name = None + try: + autoscaling_group_name = eks_client.describe_nodegroup( + clusterName=cluster_name, + nodegroupName=node_group)['nodegroup']['resources']['autoScalingGroups'][0]['name'] + autoscaling_client.delete_auto_scaling_group(AutoScalingGroupName=autoscaling_group_name, + ForceDelete=True) + except Boto3Error as e: + logging.error(f"Deleting autoscaling group {autoscaling_group_name} failed with error: {e}") + + try: + eks_client.delete_nodegroup(clusterName=cluster_name, nodegroupName=node_group) + wait_for_node_group_delete(eks_client, cluster_name, node_group) + except Boto3Error as e: + logging.error(f"Deleting node group {node_group} failed with error: {e}") + else: + logging.info(f"Cluster {cluster_name} does not have nodegroups.") + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + logging.info(f"No cluster found for name: {cluster_name}") + else: + raise e + + +def delete_cluster(aws_region, cluster_name): + try: + eks_client = boto3.client('eks', region_name=aws_region) + eks_client.delete_cluster(name=cluster_name) + wait_for_cluster_delete(eks_client, cluster_name) + except botocore.exceptions.ClientError as e: + if e.response['Error']['Code'] == 'ResourceNotFoundException': + logging.info(f"No cluster found for name: {cluster_name}") + else: + raise e + + +def delete_lb(aws_region, vpc_id): + elb_client = boto3.client('elb', region_name=aws_region) + try: + lb_names = [lb['LoadBalancerName'] + for lb in elb_client.describe_load_balancers()['LoadBalancerDescriptions'] + if lb['VPCId'] == vpc_id] + except exceptions.EndpointConnectionError as e: + logging.error(f"Could not connect to the ELBv2 endpoint URL: {e}") + return + if lb_names: + for lb_name in lb_names: + try: + logging.info(f"Deleting load balancer: {lb_name} for vpc id: {vpc_id}") + elb_client.delete_load_balancer(LoadBalancerName=lb_name) + except Boto3Error as e: + logging.error(f"Deleting load balancer {lb_name} failed with error: {e}") + + +def wait_for_nat_gateway_delete(ec2, nat_gateway_id): + timeout = 600 # 10 min + attempt = 0 + sleep_time = 10 + attempts = timeout // sleep_time + + while attempt < attempts: + try: + status = ec2.describe_nat_gateways(NatGatewayIds=[nat_gateway_id])['NatGateways'][0]['State'] + except ec2.exceptions.ResourceNotFoundException: + logging.info(f"NAT gateway with id {nat_gateway_id} was not found.") + break + + if status == 'deleted': + logging.info(f"NAT gateway with id {nat_gateway_id} was successfully deleted.") + break + + logging.info(f"NAT gateway with id {nat_gateway_id} status is {status}. " + f"Attempt {attempt}/{attempts}. Sleeping {sleep_time} seconds.") + sleep(sleep_time) + attempt += 1 + + else: + logging.error(f"NAT gateway with id {nat_gateway_id} was not deleted in {timeout} seconds.") + + +def delete_nat_gateway(aws_region, vpc_id): + ec2_client = boto3.client('ec2', region_name=aws_region) + filters = [{'Name': 'vpc-id', 'Values': [f'{vpc_id}', ]}, ] + try: + nat_gateway = ec2_client.describe_nat_gateways(Filters=filters) + except exceptions.EndpointConnectionError as e: + logging.error(f"Could not retrieve NAT gateways: {e}") + return + nat_gateway_ids = [nat['NatGatewayId'] for nat in nat_gateway['NatGateways']] + if nat_gateway_ids: + for nat_gateway_id in nat_gateway_ids: + logging.info(f"Deleting NAT gateway with id: {nat_gateway_id}") + try: + ec2_client.delete_nat_gateway(NatGatewayId=nat_gateway_id) + wait_for_nat_gateway_delete(ec2_client, nat_gateway_id) + except Boto3Error as e: + logging.error(f"Deleting NAT gateway with id {nat_gateway_id} failed with error: {e}") + + +def delete_igw(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + igws = vpc_resource.internet_gateways.all() + if igws: + for igw in igws: + for retry in range(5): + try: + logging.info(f"Detaching and Removing igw id: {igw.id}") + igw.detach_from_vpc( + VpcId=vpc_id + ) + igw.delete() + break + except exceptions.ClientError as e: + if "Network vpc-" in str(e) and "has some mapped public address(es)" in str(e): + logging.warning(f"Detaching igw failed with error: {e}. Retrying in 1 minute...") + sleep(120) + except Boto3Error as e: + logging.error(f"Deleting igw failed with error: {e}") + + +def delete_subnets(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + subnets_all = vpc_resource.subnets.all() + subnets = [ec2_resource.Subnet(subnet.id) for subnet in subnets_all] + + if subnets: + try: + for sub in subnets: + logging.info(f"Removing subnet with id: {sub.id}") + sub.delete() + except Boto3Error as e: + logging.error(f"Delete of subnet failed with error: {e}") + + +def delete_route_tables(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + rtbs = vpc_resource.route_tables.all() + if rtbs: + try: + for rtb in rtbs: + if rtb.associations_attribute and rtb.associations_attribute[0]['Main'] == True: + logging.info(f"{rtb.id} is the main route table, skipping...") + continue + logging.info(f"Removing rtb-id: {rtb.id}") + table = ec2_resource.RouteTable(rtb.id) + table.delete() + except Boto3Error as e: + logging.error(f"Delete of route table failed with error: {e}") + + +def delete_security_groups(ec2_resource, vpc_id): + vpc_resource = ec2_resource.Vpc(vpc_id) + sgps = vpc_resource.security_groups.all() + if sgps: + try: + for sg in sgps: + if sg.group_name == 'default': + logging.info(f"{sg.id} is the default security group, skipping...") + continue + if sg.ip_permissions: + logging.info(f"Removing ingress rules for security group with id: {sg.id}") + sg.revoke_ingress(IpPermissions=sg.ip_permissions) + if sg.ip_permissions_egress: + logging.info(f"Removing egress rules for security group with id: {sg.id}") + sg.revoke_egress(IpPermissions=sg.ip_permissions_egress) + for sg in sgps: + if sg.group_name == 'default': + logging.info(f"{sg.id} is the default security group, skipping...") + continue + logging.info(f"Removing security group with id: {sg.id}") + sg.delete() + except Boto3Error as e: + logging.error(f"Delete of security group failed with error: {e}") + + +def get_vpc_region_by_name(vpc_name): + for rgn in REGIONS: + ec2_resource = boto3.resource('ec2', region_name=rgn) + filters = [{'Name': 'tag:Name', 'Values': [vpc_name]}] + vpc = list(ec2_resource.vpcs.filter(Filters=filters)) + if vpc: + return rgn + logging.info(f"VPC {vpc_name} NOT found in {rgn} region.") + + logging.warning(f"VPC {vpc_name} NOT found in the following regions: {REGIONS}.") + + +def delete_rds(aws_region, vpc_id): + rds_client = boto3.client('rds', region_name=aws_region) + try: + db_instances = rds_client.describe_db_instances()['DBInstances'] + except exceptions.EndpointConnectionError as e: + logging.error(f"Could not connect to the RDS endpoint URL: {e}") + return + db_names_and_subnets = [(db_instance['DBInstanceIdentifier'], db_instance['DBSubnetGroup']['DBSubnetGroupName']) + for db_instance in db_instances + if vpc_id == db_instance['DBSubnetGroup']['VpcId']] + for db_name, subnet_name in db_names_and_subnets: + try: + logging.info(f"Deleting RDS {db_name} for VPC id: {vpc_id}.") + rds_client.delete_db_instance( + DBInstanceIdentifier=db_name, SkipFinalSnapshot=True, DeleteAutomatedBackups=True) + wait_for_rds_delete(rds_client, db_name) + logging.info(f"Deleting RDS subnet group {subnet_name}") + rds_client.delete_db_subnet_group(DBSubnetGroupName=subnet_name) + except Boto3Error as e: + logging.error(f"Delete RDS {db_name} failed with error: {e}") + + +def terminate_vpc(vpc_name, aws_region=None): + if not aws_region: + aws_region = get_vpc_region_by_name(vpc_name) + + if aws_region: + ec2_resource = boto3.resource('ec2', region_name=aws_region) + filters = [{'Name': 'tag:Name', 'Values': [vpc_name]}] + vpc = list(ec2_resource.vpcs.filter(Filters=filters)) + if not vpc: + logging.warning(f"VPC {vpc_name} NOT found in region {aws_region}.") + return + vpc_id = vpc[0].id + logging.info(f"Checking RDS for VPC {vpc_name}.") + delete_rds(aws_region, vpc_id) + + logging.info(f"Checking load balancers for VPC {vpc_name}.") + delete_lb(aws_region, vpc_id) + + logging.info(f"Checking NAT gateway for VPC {vpc_name}.") + delete_nat_gateway(aws_region, vpc_id) + + logging.info(f"Checking internet gateway for VPC {vpc_name}.") + delete_igw(ec2_resource, vpc_id) + + logging.info(f"Checking subnets for VPC {vpc_name}.") + delete_subnets(ec2_resource, vpc_id) + + logging.info(f"Checking route tables for VPC {vpc_name}.") + delete_route_tables(ec2_resource, vpc_id) + + logging.info(f"Checking security groups for VPC {vpc_name}.") + delete_security_groups(ec2_resource, vpc_id) + + logging.info(f"Deleting VPC {vpc_name}.") + try: + ec2_resource.Vpc(vpc_id).delete() + except Boto3Error as e: + logging.error(f"Deleting VPC {vpc_name} failed with error: {e}.") + + logging.info(f"Release EIP for {vpc_name}.") + release_eip(aws_region, vpc_name) + + +def get_cluster_region_by_name(cluster_name): + for rgn in REGIONS: + eks_client = boto3.client('eks', region_name=rgn) + clusters = eks_client.list_clusters()['clusters'] + if cluster_name in clusters: + logging.info(f"Cluster {cluster_name} found in {rgn} region.") + return rgn + else: + logging.info(f"Cluster {cluster_name} NOT found in {rgn} region.") + + logging.warning(f"Cluster {cluster_name} NOT found in the following regions: {REGIONS}.") + + +def terminate_cluster(cluster_name, aws_region=None): + # If no region is provided, get the region by cluster name + if not aws_region: + aws_region = get_cluster_region_by_name(cluster_name) + + if not aws_region: + raise ValueError("Could not determine the AWS region for the given cluster name.") + + # Delete the nodegroup and cluster in the specified region + delete_nodegroup(aws_region, cluster_name) + delete_cluster(aws_region, cluster_name) + + +def release_eip(aws_region, vpc_name): + ec2_client = boto3.client('ec2', region_name=aws_region) + addresses_dict = ec2_client.describe_addresses() + for eip_dict in addresses_dict['Addresses']: + if not eip_dict.get("Tags"): + logging.warning(f"EIP {eip_dict['AllocationId']} does not have tags. Review and terminate manually.") + return + name = next((tag["Value"] for tag in eip_dict["Tags"] if tag["Key"] == "Name"), None) + if name and vpc_name in name: + logging.info(f"Releasing EIP {eip_dict['PublicIp']} with name: {name}") + ec2_client.release_address(AllocationId=eip_dict['AllocationId']) + + +def retrieve_ebs_volumes(aws_region, cluster_name): + ec2 = boto3.resource('ec2', aws_region) + volumes = [] + + # Get all volumes in the region + response = ec2.volumes.all() + + for volume in response: + # Check if the volume is in use + if volume.state == "in-use": + logging.info(f"Volume {volume.id} is in use: skipping") + else: + # Check if the volume has the cluster_name in any of its tag values + cluster_tag = next((tag["Value"] for tag in volume.tags if cluster_name in tag["Value"]), None) + if cluster_tag: + volumes.append(volume.id) + + # Check for 'dynamic-pvc' or 'nfs-shared-home' in the name + name = next((tag["Value"] for tag in volume.tags if tag["Key"] == "Name"), None) + if "dynamic-pvc" in name or "nfs-shared-home" in name: + logging.info(f"Volume {volume.id} is not in use and " + f"has 'dynamic-pvc' or 'nfs-shared-home' in name: deleting...") + volumes.append(volume.id) + + print(f"Found volumes: {volumes}") + return volumes + + +def delete_ebs_volumes_by_id(aws_region, volumes): + ec2 = boto3.resource('ec2', aws_region) + + # Terminate the volumes + for volume_id in volumes: + try: + volume = ec2.Volume(volume_id) + if volume.state == "in-use": + print(f"Volume {volume_id} is in use: skipping") + continue + volume.delete() + print(f"Terminated volume: {volume_id}") + except Exception as e: + print(f"Failed to terminate volume {volume_id}: {e}") + + +def get_clusters_to_terminate(): + clusters_to_terminate = [] + for rgn in REGIONS: + eks_client = boto3.client('eks', region_name=rgn) + clusters = eks_client.list_clusters()['clusters'] + for cluster in clusters: + cluster_info = eks_client.describe_cluster(name=cluster)['cluster'] + created_date = cluster_info['createdAt'] + persist_days = cluster_info['tags'].get('persist_days', 0) + if not is_float(persist_days): + persist_days = 0 + created_date_timestamp = created_date.timestamp() + persist_seconds = float(persist_days) * 24 * 60 * 60 + now = time() + if created_date_timestamp + persist_seconds > now: + logging.info(f"Cluster {cluster} is not EOL yet, skipping...") + else: + logging.info(f"Cluster {cluster} is EOL and should be deleted.") + clusters_to_terminate.append(cluster) + return clusters_to_terminate + + +def terminate_open_id_providers(cluster_name=None): + iam_client = boto3.client('iam') + providers = iam_client.list_open_id_connect_providers()['OpenIDConnectProviderList'] + for provider in providers: + tags = iam_client.list_open_id_connect_provider_tags(OpenIDConnectProviderArn=provider['Arn'])['Tags'] + created_date = iam_client.get_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn'])['CreateDate'] + + name = next((tag["Value"] for tag in tags if tag["Key"] == "Name"), None) + if name and cluster_name and cluster_name in name: + logging.info(f"Deleting Open ID provider with name: {name}") + iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) + return + if name == 'Alfred': + logging.info(f"Skipping Alfred Open ID provider") + continue + persist_days = next((tag["Value"] for tag in tags if tag["Key"] == "persist_days"), None) + if persist_days: + if not is_float(persist_days): + persist_days = 0 + created_date_timestamp = created_date.timestamp() + persist_seconds = float(persist_days) * 24 * 60 * 60 + now = time() + if created_date_timestamp + persist_seconds > now: + logging.info(f"Open ID provider {name} is not EOL yet, skipping...") + else: + logging.info(f"Open ID provider {name} is EOL and should be deleted.") + iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) + + +def retrieve_open_identities(cluster_name, aws_region): + open_identities = [] + + try: + eks_client = boto3.client("eks", region_name=aws_region) + response = eks_client.describe_cluster(name=cluster_name) + + identity_provider = response["cluster"]["identity"]["oidc"]["issuer"] + identity_id = identity_provider.split('/id/')[-1] + open_identities.append(identity_id) + print(f"Open identity providers: {open_identities}") + except Exception as e: + print(f"Failed to retrieve Open identity providers from {cluster_name}. Skipping...") + print(f"Error details: {e}") + + return open_identities + + +def delete_open_identities_for_cluster(open_identities): + if not open_identities: + print("No OpenID Connect providers to delete.") + return + + iam_client = boto3.client('iam') + + for identity in open_identities: + try: + providers = iam_client.list_open_id_connect_providers()['OpenIDConnectProviderList'] + for provider in providers: + provider_identity_id = provider['Arn'].split('/id/')[-1] + if provider_identity_id == identity: + iam_client.delete_open_id_connect_provider(OpenIDConnectProviderArn=provider['Arn']) + print(f"Deleted identity provider: {identity}") + else: + print(f"Identity '{identity}' not found in provider '{provider['Arn']}'") + except Exception as e: + print(f"Failed to delete identity provider: {identity}") + print(f"Error details: {e}") + + +def get_vpcs_to_terminate(): + vpcs_to_terminate = [] + for rgn in REGIONS: + ec2_resource = boto3.resource('ec2', region_name=rgn) + vpcs = ec2_resource.vpcs.all() + for vpc in vpcs: + + if vpc.is_default: + logging.info(f"Skipping default VPC for {rgn} region with id: {vpc.id}") + continue + + vpc_name = next((tag["Value"] for tag in vpc.tags if tag["Key"] == "Name"), None) + if "Atlassian-Standard-Infrastructure" in vpc_name: + logging.info(f"Skipping ASI CloudFormation VPC for {rgn} region with id: {vpc.id}") + continue + + # mark for remove all VPC without instances + if not list(vpc.instances.all()): + cluster_name = vpc_name.replace("-vpc", "-cluster") + if cluster_name in boto3.client('eks', region_name=rgn).list_clusters()['clusters']: + logging.info(f"Skipping VPC {vpc_name}, because this vpc has a cluster...") + continue + logging.info(f"VPC {vpc_name} tagged for termination.") + vpcs_to_terminate.append(vpc_name) + + return vpcs_to_terminate + + +def release_unused_eips(): + for rgn in REGIONS: + ec2_client = boto3.client('ec2', region_name=rgn) + addresses_dict = ec2_client.describe_addresses() + for eip_dict in addresses_dict['Addresses']: + if "NetworkInterfaceId" not in eip_dict: + eip_name = next((tag["Value"] for tag in eip_dict["Tags"] if tag["Key"] == "Name"), None) + cluster_name = eip_name.split("-vpc")[0] + "-cluster" + if cluster_name in boto3.client('eks', region_name=rgn).list_clusters()['clusters']: + logging.info(f"Skipping EIP {eip_name}, because this EIP has a cluster...") + continue + logging.info(f"Releasing EIP {eip_dict['PublicIp']} with name: {eip_name}") + ec2_client.release_address(AllocationId=eip_dict['AllocationId']) + + +def role_filter(role): + if role["RoleName"].startswith("atlas-"): + tags = boto3.client("iam").list_role_tags(RoleName=role["RoleName"]) + persist_days = None + for tag in tags["Tags"]: + if tag["Key"] == "persist_days": + try: + persist_days = float(tag["Value"]) + except ValueError: + ... + if persist_days: + eol_time = role['CreateDate'] + timedelta(days=float(persist_days)) + return datetime.now(role['CreateDate'].tzinfo) > eol_time + return False + + +def remove_cluster_specific_roles_and_policies(cluster_name, aws_region): + iam_client = boto3.client("iam", region_name=aws_region) + + # Get and filter roles by cluster name prefix + all_roles = iam_client.list_roles() + cluster_roles = [role for role in all_roles["Roles"] if role["RoleName"].startswith(cluster_name)] + + for role in cluster_roles: + role_name = role["RoleName"] + + attached_policies = iam_client.list_attached_role_policies(RoleName=role_name) + + for policy in attached_policies["AttachedPolicies"]: + # Detach policy from the role + iam_client.detach_role_policy(RoleName=role_name, PolicyArn=policy["PolicyArn"]) + print(f" Detached policy {policy['PolicyName']} from role {role_name}") + + if cluster_name in policy['PolicyName']: + # Delete the policy + iam_client.delete_policy(PolicyArn=policy["PolicyArn"]) + print(f" Deleted policy {policy['PolicyName']}") + + # Delete the role + iam_client.delete_role(RoleName=role_name) + print(f"Deleted Role: {role_name}") + + +def remove_role_and_policies(role_name, active_clusters): + if role_name.startswith(tuple(active_clusters)): + logging.info(f"There is an active cluster which can be using role {role_name}. Skip.") + return + logging.info(f"Role {role_name} is EOL and should be deleted.") + iam_client = boto3.client("iam") + attached_policies = iam_client.list_attached_role_policies(RoleName=role_name).get("AttachedPolicies") + for policy in attached_policies: + logging.info(f"Detach {policy['PolicyArn']} from {role_name}") + iam_client.detach_role_policy(PolicyArn=policy["PolicyArn"], RoleName=role_name) + if policy["PolicyName"].endswith("_Fleet-Enrollment") or policy["PolicyName"].endswith("_LaaS-policy"): + logging.info(f"Delete policy {policy['PolicyName']}") + iam_client.delete_policy(PolicyArn=policy["PolicyArn"]) + logging.info(f"Delete role {role_name}") + iam_client.delete_role(RoleName=role_name) + logging.info(f"Role {role_name} deleted successfully") + + +def get_role_names_to_terminate(): + iam_client = boto3.client("iam") + roles_paginated = iam_client.list_roles(MaxItems=1000) + all_roles = roles_paginated["Roles"] + while roles_paginated.get("Marker"): + roles_paginated = iam_client.list_roles(Marker=roles_paginated["Marker"], MaxItems=1000) + all_roles.extend(roles_paginated["Roles"]) + logging.info(f"Roles count: {len(all_roles)}") + filtered_roles = list(filter(role_filter, all_roles)) + return list(map(lambda role: role["RoleName"], filtered_roles)) + + +def delete_unused_volumes(): + for rgn in REGIONS: + logging.info(f"Region: {rgn}") + ec2_resource = boto3.resource('ec2', region_name=rgn) + volumes = ec2_resource.volumes.all() + # Filter unused volumes + for volume in volumes: + if volume.state == "in-use": + logging.info(f"Volume {volume.id} is in use: skipping") + else: + if not volume.tags: + logging.warning(f"Volume {volume} does not have tags!") + continue + # Delete unused volumes with specific tags or names + persist_days = next((tag["Value"] for tag in volume.tags if tag["Key"] == "persist_days"), None) + if persist_days: + eol_time = volume.create_time + timedelta(days=float(persist_days)) + if datetime.now(volume.create_time.tzinfo) < eol_time: + logging.info(f"Volume {volume.id} is not EOL yet, skipping...") + else: + logging.info(f"Volume {volume.id} is EOL, deleting...") + volume.delete() + else: + name = next((tag["Value"] for tag in volume.tags if tag["Key"] == "Name"), None) + if "dynamic-pvc" or "nfs-shared-home" in name: + logging.info(f"Volume {volume.id} is not in use and " + f"has 'dynamic-pvc' or 'nfs-shared-home' in name: deleting...") + volume.delete() + else: + logging.warning(f"Volume {volume.id} does not have 'persist_days' tag " + f"| Name tag {name}: skipping") + + +def main(): + parser = ArgumentParser() + parser.add_argument("--cluster_name", type=str, help='Cluster name to terminate.') + parser.add_argument('--aws_region', type=str, help='AWS region where the cluster is located (e.g., "us-east-2").') + parser.add_argument('--all', action='store_true', help='Terminate all clusters in all regions.') + args = parser.parse_args() + + if not args.all: + if not args.cluster_name: + raise SystemExit("--cluster_name argument is not provided.") + if not args.aws_region: + raise SystemExit("--aws_region argument is not provided.") + + if args.cluster_name and args.aws_region: + logging.info(f"Delete all resources for cluster {args.cluster_name}.") + open_identities = retrieve_open_identities(cluster_name=args.cluster_name, aws_region=args.aws_region) + terminate_cluster(cluster_name=args.cluster_name, aws_region=args.aws_region) + vpc_name = f'{args.cluster_name.replace("-cluster", "-vpc")}' + logging.info(f"Delete VPC for cluster {args.cluster_name}.") + terminate_vpc(vpc_name=vpc_name, aws_region=args.aws_region) + volumes = retrieve_ebs_volumes(aws_region=args.aws_region, cluster_name=args.cluster_name) + delete_open_identities_for_cluster(open_identities) + remove_cluster_specific_roles_and_policies(cluster_name=args.cluster_name, aws_region=args.aws_region) + delete_ebs_volumes_by_id(aws_region=args.aws_region, volumes=volumes) + return + + logging.info(f"--cluster_name parameter was not specified.") + logging.info("Searching for clusters to remove.") + clusters = get_clusters_to_terminate() + for cluster_name in clusters: + logging.info(f"Delete all resources and VPC for cluster {cluster_name}.") + terminate_cluster(cluster_name=cluster_name) + vpc_name = f'{cluster_name.replace("-cluster", "-vpc")}' + terminate_vpc(vpc_name=vpc_name) + terminate_open_id_providers(cluster_name=cluster_name) + vpcs = get_vpcs_to_terminate() + for vpc_name in vpcs: + logging.info(f"Delete all resources for vpc {vpc_name}.") + terminate_vpc(vpc_name=vpc_name) + logging.info("Release unused EIPs") + release_unused_eips() + logging.info("Terminate open ID providers") + terminate_open_id_providers() + role_names = get_role_names_to_terminate() + active_clusters = [] + for region in REGIONS: + eks_client = boto3.client("eks", region_name=region) + active_clusters.extend(eks_client.list_clusters().get("clusters")) + for role_name in role_names: + remove_role_and_policies(role_name, active_clusters) + logging.info("Terminate unused and expired ebs volumes") + delete_unused_volumes() + + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + main() diff --git a/requirements.txt b/requirements.txt index 94d91b740..07aa9f681 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ filelock==3.12.0 packaging==23.1 prettytable==3.7.0 bzt==1.16.22 +boto3==1.26.149 From 61c9b6c53fe6e0a701f7a0d5b838e46d5f0a965a Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Wed, 14 Jun 2023 12:10:53 +0100 Subject: [PATCH 05/37] Update selnium version to 4.10.0 (#1055) --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- requirements.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index ddf9173c0..68dc10e9e 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -49,7 +49,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: jmeter executor: jmeter diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 052173b4b..9adee6b6f 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -36,7 +36,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor} concurrency: ${concurrency} diff --git a/app/confluence.yml b/app/confluence.yml index e9d6712b0..e04110af3 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jira.yml b/app/jira.yml index ae29de8ed..a3bd56e4f 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jsm.yml b/app/jsm.yml index 4fb1d62f3..0e39a6194 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -67,7 +67,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.0 + - selenium==4.10.0 execution: - scenario: ${load_executor}_agents executor: ${load_executor} diff --git a/requirements.txt b/requirements.txt index 07aa9f681..dcd908c42 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ numpy==1.24.3 scipy==1.10.1 pytest==7.3.1 locust==2.15.1 -selenium==4.9.0 +selenium==4.10.0 filelock==3.12.0 packaging==23.1 prettytable==3.7.0 From 33618992b03d18ac8ff0339689432ddb605964ac Mon Sep 17 00:00:00 2001 From: opopovss <86659792+opopovss@users.noreply.github.com> Date: Wed, 14 Jun 2023 17:03:50 +0300 Subject: [PATCH 06/37] downgrade selenium version to 4.9.1 (#1056) --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- requirements.txt | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 68dc10e9e..23bfde585 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -49,7 +49,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.10.0 + - selenium==4.9.1 execution: - scenario: jmeter executor: jmeter diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 9adee6b6f..e4eaa6a5b 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -36,7 +36,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.10.0 + - selenium==4.9.1 execution: - scenario: ${load_executor} concurrency: ${concurrency} diff --git a/app/confluence.yml b/app/confluence.yml index e04110af3..2219bdc99 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.10.0 + - selenium==4.9.1 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jira.yml b/app/jira.yml index a3bd56e4f..4f13b9288 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.10.0 + - selenium==4.9.1 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jsm.yml b/app/jsm.yml index 0e39a6194..068e7e893 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -67,7 +67,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.10.0 + - selenium==4.9.1 execution: - scenario: ${load_executor}_agents executor: ${load_executor} diff --git a/requirements.txt b/requirements.txt index dcd908c42..702a73b0a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ numpy==1.24.3 scipy==1.10.1 pytest==7.3.1 locust==2.15.1 -selenium==4.10.0 +selenium==4.9.1 filelock==3.12.0 packaging==23.1 prettytable==3.7.0 From 297a0185fa024af038e9556892d0127be52856be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 Jun 2023 22:32:31 +0000 Subject: [PATCH 07/37] Bump guava in /app/util/bamboo/bamboo_dataset_generator Bumps [guava](https://github.com/google/guava) from 29.0-jre to 32.0.0-jre. - [Release notes](https://github.com/google/guava/releases) - [Commits](https://github.com/google/guava/commits) --- updated-dependencies: - dependency-name: com.google.guava:guava dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- app/util/bamboo/bamboo_dataset_generator/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index f09606be9..823c8fab9 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -62,7 +62,7 @@ com.google.guava guava - 29.0-jre + 32.0.0-jre com.fasterxml.jackson.core From a94d8d16fb613d539ce43031455e9bd74e6a6d53 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Thu, 15 Jun 2023 10:30:01 +0300 Subject: [PATCH 08/37] dev rebase --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- app/util/conf.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 2ff62cfa7..ddf9173c0 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor view_all_builds: 15 view_build_result: 15 diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 9e3495c6a..052173b4b 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/confluence.yml b/app/confluence.yml index d80d0cd02..e9d6712b0 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors extended_metrics: False view_page: 33 diff --git a/app/jira.yml b/app/jira.yml index b21d24016..ae29de8ed 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors create_issue: 4 search_jql: 11 diff --git a/app/jsm.yml b/app/jsm.yml index 5d9ad18e2..4fb1d62f3 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -23,7 +23,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 agent_view_request: 24 diff --git a/app/util/conf.py b/app/util/conf.py index a021d4995..db0c93623 100644 --- a/app/util/conf.py +++ b/app/util/conf.py @@ -2,7 +2,7 @@ from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML -TOOLKIT_VERSION = '7.4.1' +TOOLKIT_VERSION = '7.5.0' UNSUPPORTED_VERSION = '6.3.0' From 4714afa167e5a2f860f31ad3f0d097d11a5cf0a1 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Thu, 15 Jun 2023 10:31:00 +0300 Subject: [PATCH 09/37] dev rebase --- app/crowd.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/crowd.yml b/app/crowd.yml index 992736743..9ab60e7cc 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -32,7 +32,7 @@ settings: JMETER_VERSION: 5.4.3 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: From aaac243364cce9e331b91a6bf8498526d149ec8c Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Thu, 15 Jun 2023 12:00:21 +0200 Subject: [PATCH 10/37] Pin docker base image (#1058) --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 00cc2f782..6eb75064f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # bzt run: docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml # interactive run: docker run -it --entrypoint="/bin/bash" -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt -FROM python:3.11-slim +FROM python:3.11-slim-bullseye ENV APT_INSTALL="apt-get -y install --no-install-recommends" From b9806bd114ea96e92f0b0df2038a28ed473dbd34 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Mon, 19 Jun 2023 15:16:50 +0200 Subject: [PATCH 11/37] Fix delete emoji upload test (#1061) --- app/jmeter/confluence.jmx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/jmeter/confluence.jmx b/app/jmeter/confluence.jmx index dd3b3a954..60127d893 100644 --- a/app/jmeter/confluence.jmx +++ b/app/jmeter/confluence.jmx @@ -5362,7 +5362,7 @@ if (response.contains("Successfully added emoji")) { - ${application.postfix}/rest/emoticons/1.0/custom/delete-your-upload/${emoji_shortcut} + ${application.postfix}/rest/emoticons/1.0/custom/delete-your-upload?shortcut=${emoji_shortcut} DELETE true false From 88b935819a9bccfb594f6dd2219fe558b829bc71 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 21 Jun 2023 10:22:09 +0300 Subject: [PATCH 12/37] upd configuration --- renovate.json | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/renovate.json b/renovate.json index f45d8f110..9ecafca3b 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,7 @@ { "extends": [ "config:base" - ] + ], + "baseBranches": ["dev"], + "ignorePaths": ["src/test/**"] } From 37ceb6fe80f5a6527e2359d326e5061d237a02de Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 21 Jun 2023 12:04:45 +0300 Subject: [PATCH 13/37] sync tfrvars --- app/util/k8s/dcapt-small.tfvars | 11 ++++++++ app/util/k8s/dcapt.tfvars | 49 +++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 91ba64bd0..edfa2e329 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -160,6 +160,10 @@ jira_db_name = "jira" jira_db_master_username = "atljira" jira_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# jira_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Confluence Settings ################################################################################ @@ -258,6 +262,9 @@ confluence_db_master_password = "Password1!" # Enables Collaborative editing in Confluence confluence_collaborative_editing_enabled = true +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# confluence_custom_values_file = "/path/to/values.yaml" ################################################################################ # Bitbucket Settings @@ -369,3 +376,7 @@ bitbucket_db_name = "bitbucket" # If password is not provided, a random password will be generated. bitbucket_db_master_username = "atlbitbucket" bitbucket_db_master_password = "Password1!" + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# bitbucket_custom_values_file = "/path/to/values.yaml" \ No newline at end of file diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 5c88e5812..bbee97a75 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -153,6 +153,10 @@ jira_db_name = "jira" jira_db_master_username = "atljira" jira_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# jira_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Confluence Settings ################################################################################ @@ -249,6 +253,14 @@ confluence_db_master_password = "Password1!" # Enables Collaborative editing in Confluence confluence_collaborative_editing_enabled = true +# Use AWS S3 to store attachments. See: https://confluence.atlassian.com/doc/configuring-s3-object-storage-1206794554.html +# Terraform will automatically create S3 bucket, IAM role and policy +#confluence_s3_attachments_storage = true + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# confluence_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Bitbucket Settings ################################################################################ @@ -357,6 +369,10 @@ bitbucket_db_name = "bitbucket" bitbucket_db_master_username = "atlbitbucket" bitbucket_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# bitbucket_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Crowd Settings ################################################################################ @@ -443,6 +459,10 @@ crowd_termination_grace_period = 0 crowd_db_master_username = "atlcrowd" crowd_db_master_password = "Password1!" +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# crowd_custom_values_file = "/path/to/values.yaml" + ################################################################################ # Bamboo Settings ################################################################################ @@ -530,3 +550,32 @@ bamboo_db_name = "bamboo" # See https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bamboo # bamboo_dataset_url = "https://centaurus-datasets.s3.amazonaws.com/bamboo/dcapt-bamboo.zip" + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# bamboo_custom_values_file = "/path/to/values.yaml" + +################################################################################ +# Monitoring settings +################################################################################ + +# Deploy https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack Helm chart +# to kube-monitoring namespace. Defaults to false. +# monitoring_enabled = true + +# Create Grafana service of LoadBalancer type. Defaults to false. To restric access to LB URL +# the list of CIRDs from whitelist_cidr will be automatically applied. +# monitoring_grafana_expose_lb = true + +# Prometheus Persistent Volume Claim size. Defaults to 10Gi. +# Out of the box EKS cluster is created with gp2 storage class which does not allow volume expansion, +# i.e. if you expect a high volume of metrics or metrics with high cardinality it is recommended +# to override the default Prometheus 10Gi PVC storage request when creating enabling monitoring for the first time. +# prometheus_pvc_disk_size = "100Gi" + +# Grafana Persistent Volume Claim size. Defaults to 10Gi. +# grafana_pvc_disk_size = "20Gi" + +# Custom values file location. Defaults to an empty string which means only values from config.tfvars +# are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. +# monitoring_custom_values_file = "/path/to/values.yaml" From d01d72beb77190f1caa233076adc5e2a41cd89c5 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 21 Jun 2023 12:13:36 +0300 Subject: [PATCH 14/37] add empty line --- app/util/k8s/dcapt-small.tfvars | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index edfa2e329..4e98d508b 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -379,4 +379,4 @@ bitbucket_db_master_password = "Password1!" # Custom values file location. Defaults to an empty string which means only values from config.tfvars # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. -# bitbucket_custom_values_file = "/path/to/values.yaml" \ No newline at end of file +# bitbucket_custom_values_file = "/path/to/values.yaml" From 8d051f31b72ef16cf0bc41c8aa7810bfaf94342d Mon Sep 17 00:00:00 2001 From: Dariusz Mika Date: Thu, 22 Jun 2023 15:13:17 +0200 Subject: [PATCH 15/37] Add info about 503 error during reindex to jira docs --- docs/dc-apps-performance-toolkit-user-guide-jira.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index a76c6abbc..f8ce5547c 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -474,7 +474,7 @@ This increase in re-index time is due to a known issue which affects Jira 9.4.x, 4. Select the **Full re-index** option. 5. Click **Re-Index** and wait until re-indexing is completed. {{% note %}} -Jira will be temporarily unavailable during the re-indexing process. Once the process is complete, the system will be fully accessible and operational once again. +Jira will be temporarily unavailable during the re-indexing process - "503 Service Temporarily Unavailable" message will be displayed. Once the process is complete, the system will be fully accessible and operational once again. {{% /note %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. From ca697eafc998f9290458c0496f9697f06b079872 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Wed, 5 Jul 2023 09:50:22 +0300 Subject: [PATCH 16/37] add bb 8.9 support, cut bb 7.17 support --- app/util/k8s/dcapt-small.tfvars | 12 ++--- app/util/k8s/dcapt-snapshots.json | 87 ++++++------------------------- app/util/k8s/dcapt.tfvars | 12 ++--- 3 files changed, 25 insertions(+), 86 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 4e98d508b..978011e1e 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -290,20 +290,16 @@ bitbucket_version_tag = "7.21.11" # # 7.21.11 DCAPT small dataset EBS snapshot bitbucket_shared_home_snapshot_id = "snap-01f510d0c4405ce78" -# 8.8.3 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-01f510d0c4405ce78" -# 7.17.16 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-0c945dbcbd3e4ebff" +# 8.9.2 DCAPT small dataset EBS snapshot +#bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.11 DCAPT small dataset RDS snapshot bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11" -# 8.8.3 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-8-3" -# 7.17.16 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-17-16" +# 8.9.2 DCAPT small dataset RDS snapshot +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 524066431..f98df5467 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -384,59 +384,6 @@ }, "bitbucket": { "versions": [ - { - "version": "7.17.16", - "data": [ - { - "type": "rds", - "size": "large", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-17-16", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-17-16", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-17-16", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-17-16" - } - ] - }, - { - "type": "rds", - "size": "small", - "snapshots": [ - { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-17-16", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-17-16", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-17-16", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-17-16" - } - ] - }, - { - "type": "ebs", - "size": "large", - "snapshots": [ - { - "us-east-1": "snap-0849ecbad09d7aa15", - "us-east-2": "snap-06fceac7bdcc3844c", - "us-west-1": "snap-06147e64d7ae138ff", - "us-west-2": "snap-04f83a4e7360b64c9" - } - ] - }, - { - "type": "ebs", - "size": "small", - "snapshots": [ - { - "us-east-1": "snap-017f427062d1eef34", - "us-east-2": "snap-0c945dbcbd3e4ebff", - "us-west-1": "snap-0bef4d24419315e7d", - "us-west-2": "snap-07826f85a1de3ccf6" - } - ] - } - ] - }, { "version": "7.21.11", "data": [ @@ -491,17 +438,17 @@ ] }, { - "version": "8.8.3", + "version": "8.9.2", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-8-3", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-8-3", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-8-3", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-8-3" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-2", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-8-9-2", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" } ] }, @@ -510,10 +457,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-8-3", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-8-3", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-8-3", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-8-3" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-2", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-2", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" } ] }, @@ -522,10 +469,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0c5a9cd13b3259403", - "us-east-2": "snap-04138d264fb24f2e7", - "us-west-1": "snap-0d24266fe20d821f7", - "us-west-2": "snap-09d3c3152fc0123ce" + "us-east-1": "snap-0c5d82e754dd8f536", + "us-east-2": "snap-0d933f3d40000e877", + "us-west-1": "snap-039620171723e6e2c", + "us-west-2": "snap-01a121265b13ee758" } ] }, @@ -534,10 +481,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-009ef66688cdb0919", - "us-east-2": "snap-01f510d0c4405ce78", - "us-west-1": "snap-0b35c2aed07dd644b", - "us-west-2": "snap-0c5e05bb471a65d37" + "us-east-1": "snap-039acd2608c3bce3f", + "us-east-2": "snap-0fb8cd6bf387057c0", + "us-west-1": "snap-012dc94feaaa30490", + "us-west-2": "snap-0a0f1b03ec0e8bf36" } ] } diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index bbee97a75..3335fa5a9 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -285,20 +285,16 @@ bitbucket_version_tag = "7.21.11" # # 7.21.11 DCAPT large dataset EBS snapshot bitbucket_shared_home_snapshot_id = "snap-0456406e413ff835b" -# 8.8.3 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-04138d264fb24f2e7" -# 7.17.16 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-06fceac7bdcc3844c" +# 8.9.2 DCAPT large dataset EBS snapshot +#bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.11 DCAPT large dataset RDS snapshot bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-11" -# 8.8.3 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-8-3" -# 7.17.16 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-17-16" +# 8.9.2 DCAPT large dataset RDS snapshot +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" From 31d09cd025274db80585f3bd1dc1b375d372f27b Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Fri, 7 Jul 2023 12:14:08 +0200 Subject: [PATCH 17/37] Modify codeowners (#1071) --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index db4dbae86..c4a79c422 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # Default code owners - Atlassian Data Center App Performance Toolkit -* @ometelytsia @SergeyMoroz0703 @opopovss @OlehStefanyshyn @dmika1 \ No newline at end of file +* @ometelytsia @SergeyMoroz0703 @OlehStefanyshyn \ No newline at end of file From c9693f3a31678fd95762112354b52bcf7c2a30fa Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Tue, 11 Jul 2023 17:23:08 +0200 Subject: [PATCH 18/37] Bump jira/jsm version (#1072) * Bump jira/jsm version * updtate tfvars files --- README.md | 6 +- app/util/k8s/dcapt-small.tfvars | 36 ++++---- app/util/k8s/dcapt-snapshots.json | 136 +++++++++++++++--------------- app/util/k8s/dcapt.tfvars | 36 ++++---- 4 files changed, 107 insertions(+), 107 deletions(-) diff --git a/README.md b/README.md index 9785d2c88..148752063 100644 --- a/README.md +++ b/README.md @@ -5,16 +5,16 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.20.22` and `9.4.6` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.4.8` and `8.20.24` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `4.20.22` and `5.4.6` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.4.8` and `4.20.24` * Supported Confluence versions: * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.9`, `7.13.17` and `8.1.4` platform release * Supported Bitbucket Server versions: - * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.21.11`, `7.17.16`, and `8.8.3` platform release. + * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.2` and `7.21.11` * Supported Crowd versions: * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.0.5` diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 978011e1e..4c5113afc 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -85,34 +85,34 @@ jira_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # # Jira version -jira_version_tag = "9.4.6" +jira_version_tag = "9.4.8" # JSM version -# jira_version_tag = "5.4.6" +# jira_version_tag = "5.4.8" # Shared home restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # -# Jira 9.4.6 DCAPT small dataset EBS snapshot - jira_shared_home_snapshot_id = "snap-0651a00c1234ca355" -# Jira 8.20.22 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-060af366a595cf019" -# JSM 5.4.6 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0b989ff3e3236e707" -# JSM 4.20.22 DCAPT small dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-02cf7f70e3872320f" +# Jira 9.4.8 DCAPT small dataset EBS snapshot + jira_shared_home_snapshot_id = "snap-0005a8c3cc297b294" +# Jira 8.20.24 DCAPT small dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0c3cb60ddc50c1136" +# JSM 5.4.8 DCAPT small dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-02f299ef7f1f524b2" +# JSM 4.20.24 DCAPT small dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0971e128b8d1d2af9" # Database restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. # -# Jira 9.4.6 DCAPT small dataset RDS snapshot - jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-6" -# Jira 8.20.22 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-22" -# JSM 5.4.6 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-6" -# JSM 4.20.22 DCAPT small dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-22" +# Jira 9.4.8 DCAPT small dataset RDS snapshot + jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-8" +# Jira 8.20.24 DCAPT small dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-24" +# JSM 5.4.8 DCAPT small dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-8" +# JSM 4.20.24 DCAPT small dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-24" # Helm chart version of Jira # jira_helm_chart_version = "" diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index f98df5467..f064a3f48 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -2,17 +2,17 @@ "jira": { "versions": [ { - "version": "9.4.6", + "version": "9.4.8", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-9-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-9-4-8" } ] }, @@ -21,10 +21,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-9-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-9-4-8" } ] }, @@ -33,10 +33,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-00dbfdccd242ed1b3", - "us-east-2": "snap-051b68559232b9c52", - "us-west-1": "snap-06680b532a5124558", - "us-west-2": "snap-02a7ba9c66ef7867a" + "us-east-1": "snap-0c98167037a14f031", + "us-east-2": "snap-07099cb68dacf9e8e", + "us-west-1": "snap-07b153b928501f065", + "us-west-2": "snap-0bd8c254fbf4feb25" } ] }, @@ -45,27 +45,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0d43933fdd4672594", - "us-east-2": "snap-0651a00c1234ca355", - "us-west-1": "snap-008c03d701800ff3f", - "us-west-2": "snap-0c47ff88937f1a169" + "us-east-1": "snap-05a61b57dbb4f9834", + "us-east-2": "snap-0005a8c3cc297b294", + "us-west-1": "snap-0dfb346bb01f4709a", + "us-west-2": "snap-0c17be9ae98bbd1ed" } ] } ] }, { - "version": "8.20.22", + "version": "8.20.24", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-8-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-8-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-8-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-8-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-8-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-8-20-24" } ] }, @@ -74,10 +74,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-8-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-8-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-8-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-8-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-8-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jira-small-8-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jira-small-8-20-24" } ] }, @@ -86,10 +86,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0d641c27d1335f1e4", - "us-east-2": "snap-07eabc725b2784dd8", - "us-west-1": "snap-021b200aee83f4c42", - "us-west-2": "snap-0c447a319a3062d00" + "us-east-1": "snap-029edbed07ab594e0", + "us-east-2": "snap-0b5f4473954e6d959", + "us-west-1": "snap-0ffaa992ba449a53d", + "us-west-2": "snap-01e69efc1d8943038" } ] }, @@ -98,10 +98,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-08fa076e03f4d7489", - "us-east-2": "snap-060af366a595cf019", - "us-west-1": "snap-033251c6028eb205a", - "us-west-2": "snap-09919bd45a9bf234c" + "us-east-1": "snap-083d2d8f5797f907e", + "us-east-2": "snap-0c3cb60ddc50c1136", + "us-west-1": "snap-07de609e058d28a03", + "us-west-2": "snap-01fa045458071eda5" } ] } @@ -112,17 +112,17 @@ "jsm": { "versions": [ { - "version": "5.4.6", + "version": "5.4.8", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-5-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-5-4-8" } ] }, @@ -131,10 +131,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-6", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-6", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-6", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-6" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-8", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-8", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-5-4-8", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-5-4-8" } ] }, @@ -143,10 +143,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-08c764486b2faa09a", - "us-east-2": "snap-0a65d52f20fc43d4e", - "us-west-1": "snap-05eff0f22140b59e8", - "us-west-2": "snap-0b32ffd526d769aea" + "us-east-1": "snap-0a13271b63872a2a6", + "us-east-2": "snap-0fb58e8d005edeb32", + "us-west-1": "snap-05d6aa53717fb3c6c", + "us-west-2": "snap-043842d9319f25659" } ] }, @@ -155,27 +155,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0390ceabfc3292865", - "us-east-2": "snap-0b989ff3e3236e707", - "us-west-1": "snap-03a0c97a99f84fcf9", - "us-west-2": "snap-0528f3b63c100cf48" + "us-east-1": "snap-08bc969471d0ee11f", + "us-east-2": "snap-02f299ef7f1f524b2", + "us-west-1": "snap-0fa5b8bd27f66e6c3", + "us-west-2": "snap-063211f90e6d81bbd" } ] } ] }, { - "version": "4.20.22", + "version": "4.20.24", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-4-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-4-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-4-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-4-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-4-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-4-20-24" } ] }, @@ -184,10 +184,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-4-20-22", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-22", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-4-20-22", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-4-20-22" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-4-20-24", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-4-20-24", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-jsm-small-4-20-24", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-jsm-small-4-20-24" } ] }, @@ -196,10 +196,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0ff68d6d06e69fffb", - "us-east-2": "snap-02cf7f70e3872320f", - "us-west-1": "snap-076395fc4d09c9020", - "us-west-2": "snap-0f148cab48b6efea3" + "us-east-1": "snap-0c95405b316f28ec8", + "us-east-2": "snap-0cae5febc8127250b", + "us-west-1": "snap-081f548dda005c97e", + "us-west-2": "snap-08e23754ddc402ec4" } ] }, @@ -208,10 +208,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0ac5442e3b594cbd2", - "us-east-2": "snap-0b9d0488da3b12e0a", - "us-west-1": "snap-08bc0efc05e414230", - "us-west-2": "snap-0a05f25582e8ccd9f" + "us-east-1": "snap-0d933d20b989beb7b", + "us-east-2": "snap-0971e128b8d1d2af9", + "us-west-1": "snap-0c6d8b6aa53b93e78", + "us-west-2": "snap-0734518fb7d55f7ce" } ] } diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 3335fa5a9..daee307b1 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -84,34 +84,34 @@ jira_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # # Jira version -jira_version_tag = "9.4.6" +jira_version_tag = "9.4.8" # JSM version -# jira_version_tag = "5.4.6" +# jira_version_tag = "5.4.8" # Shared home restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # -# Jira 9.4.6 DCAPT large dataset EBS snapshot -jira_shared_home_snapshot_id = "snap-051b68559232b9c52" -# Jira 8.20.22 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-07eabc725b2784dd8" -# JSM 5.4.6 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-0a65d52f20fc43d4e" -# JSM 4.20.22 DCAPT large dataset EBS snapshot -# jira_shared_home_snapshot_id = "snap-02cf7f70e3872320f" +# Jira 9.4.8 DCAPT large dataset EBS snapshot +jira_shared_home_snapshot_id = "snap-07099cb68dacf9e8e" +# Jira 8.20.24 DCAPT large dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0b5f4473954e6d959" +# JSM 5.4.8 DCAPT large dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0fb58e8d005edeb32" +# JSM 4.20.24 DCAPT large dataset EBS snapshot +# jira_shared_home_snapshot_id = "snap-0cae5febc8127250b" # Database restore configuration. # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # Build number stored within the snapshot and Jira license are also required, so that Jira can be fully setup prior to start. # -# Jira 9.4.6 DCAPT large dataset RDS snapshot -jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-6" -# Jira 8.20.22 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-22" -# JSM 5.4.6 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-6" -# JSM 4.20.22 DCAPT large dataset RDS snapshot -# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-22" +# Jira 9.4.8 DCAPT large dataset RDS snapshot +jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-8" +# Jira 8.20.24 DCAPT large dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-8-20-24" +# JSM 5.4.8 DCAPT large dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-8" +# JSM 4.20.24 DCAPT large dataset RDS snapshot +# jira_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-4-20-24" # Helm chart version of Jira # jira_helm_chart_version = "" From fff06e93e95d13424b82c33ca99001b348a8a2bf Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Wed, 12 Jul 2023 09:56:12 +0200 Subject: [PATCH 19/37] update jira/jsm sh scripts versions (#1073) --- app/util/jira/populate_db.sh | 4 ++-- app/util/jira/upload_attachments.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/app/util/jira/populate_db.sh b/app/util/jira/populate_db.sh index cce3cdb88..1ee377dc5 100644 --- a/app/util/jira/populate_db.sh +++ b/app/util/jira/populate_db.sh @@ -43,8 +43,8 @@ JIRA_DB_PASS="Password1!" # Jira/JSM supported versions -SUPPORTED_JIRA_VERSIONS=(8.20.22 9.4.6) -SUPPORTED_JSM_VERSIONS=(4.20.22 5.4.6) +SUPPORTED_JIRA_VERSIONS=(8.20.24 9.4.8) +SUPPORTED_JSM_VERSIONS=(4.20.24 5.4.8) SUPPORTED_VERSIONS=("${SUPPORTED_JIRA_VERSIONS[@]}") # JSM section diff --git a/app/util/jira/upload_attachments.sh b/app/util/jira/upload_attachments.sh index f5616ad84..695289770 100644 --- a/app/util/jira/upload_attachments.sh +++ b/app/util/jira/upload_attachments.sh @@ -29,8 +29,8 @@ JIRA_VERSION_FILE="/media/atl/jira/shared/jira-software.version" # Jira/JSM supported versions -SUPPORTED_JIRA_VERSIONS=(8.20.22 9.4.6) -SUPPORTED_JSM_VERSIONS=(4.20.22 5.4.6) +SUPPORTED_JIRA_VERSIONS=(8.20.24 9.4.8) +SUPPORTED_JSM_VERSIONS=(4.20.24 5.4.8) SUPPORTED_VERSIONS=("${SUPPORTED_JIRA_VERSIONS[@]}") if [[ ${jsm} == 1 ]]; then From c81c85ebd160b4bdd553f2eafec3333defaa4bf5 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Mon, 17 Jul 2023 10:17:53 +0200 Subject: [PATCH 20/37] DCA-2009 docker docs (#1070) * Modify jira docs * Update all docs with tf docker --- ...s-performance-toolkit-user-guide-bamboo.md | 59 ++++---- ...erformance-toolkit-user-guide-bitbucket.md | 112 +++++++-------- ...rformance-toolkit-user-guide-confluence.md | 129 ++++++++--------- ...ps-performance-toolkit-user-guide-crowd.md | 69 +++++---- ...pps-performance-toolkit-user-guide-jira.md | 133 ++++++++--------- ...apps-performance-toolkit-user-guide-jsm.md | 134 ++++++++---------- 6 files changed, 294 insertions(+), 342 deletions(-) diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index e873b8dc8..082cb9b64 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-07-06" --- # Data Center App Performance Toolkit User Guide For Bamboo @@ -26,44 +26,43 @@ test results for the Marketplace approval process. Preferably, use the below rec ## 1. Set up an enterprise-scale environment Bamboo Data Center on k8s -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Bamboo Data Center environment and AWS on k8s. - #### Setup Bamboo Data Center with an enterprise-scale dataset on k8s Below process describes how to install Bamboo DC with an enterprise-scale dataset included. This configuration was created specifically for performance testing during the DC app review process. -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bamboo` - `products` - `bamboo` - `bamboo_license` - one-liner of valid bamboo license without spaces and new line symbols - `region` - **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. -9. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. +6. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. +7. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. {{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. +All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} --- @@ -79,12 +78,8 @@ Data dimensions and values for default enterprise-scale dataset uploaded are lis --- -#### Troubleshooting -See [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/) page. - #### Terminate Bamboo Data Center - -Follow steps described on [Uninstallation and cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) page. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. --- @@ -366,7 +361,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -384,7 +379,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 32e7cce31..03cbce1c1 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-07-06" --- # Data Center App Performance Toolkit User Guide For Bitbucket @@ -60,9 +60,6 @@ the process can be continued after switching to the `7.1.0` DCAPT version. ### 1. Setting up Bitbucket Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Bitbucket Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -83,39 +80,37 @@ See [Set up an enterprise-scale environment Bitbucket Data Center on AWS](#insta Below process describes how to install low-tier Bitbucket DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bitbucket-small` - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Bitbucket version specified in **bitbucket_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -204,8 +199,10 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Bitbucket Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Bitbucket Data Center environment and AWS on k8s. +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. @@ -232,43 +229,34 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri | Total files number | ~750 000 | -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Bitbucket DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-bitbucket-large` - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** - `instance_types` - `["m5.4xlarge"]` -7. Optional variables to override: +5. Optional variables to override: - `bitbucket_version_tag` - Bitbucket version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Bitbucket version specified in **bitbucket_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. +6. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. {{% note %}} New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). @@ -458,11 +446,15 @@ The same article has instructions on how to increase limit if needed. To receive scalability benchmark results for two-node Bitbucket DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: @@ -534,7 +526,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -551,7 +543,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 9b3ef2a7f..c5ef42543 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-07-06" --- # Data Center App Performance Toolkit User Guide For Confluence @@ -60,9 +60,6 @@ the process can be continued after switching to the `6.3.0` DCAPT version. ### 1. Setting up Confluence Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Confluence Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -83,39 +80,37 @@ See [Set up an enterprise-scale environment Confluence Data Center on AWS](#inst Below process describes how to install low-tier Confluence DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-confluence-small` - `products` - `confluence` - `confluence_license` - one-liner of valid confluence license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -280,13 +275,15 @@ App-specific actions are required. Do not proceed with the next step until you h --- ## Enterprise-scale environment +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. ### 4. Setting up Confluence Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Confluence Data Center environment and AWS on k8s. - ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services, and may vary from the estimates the Calculator has provided. @@ -312,57 +309,45 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri | Spaces | ~5 000 | | Users | ~5 000 | -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Confluence DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-confluence-large` - `products` - `confluence` - `confluence_license` - one-liner of valid confluence license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use this server id for generation `BX02-9YO1-IN86-LO5G`. + {{% /note %}} + +5. Optional variables to override: - `confluence_version_tag` - Confluence version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} +6. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- ### 5. Setting up an execution environment @@ -537,11 +522,15 @@ The same article has instructions on how to increase limit if needed. To receive scalability benchmark results for two-node Confluence DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: @@ -613,7 +602,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -630,7 +619,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index e787770a7..f8b0e307e 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-07-06" --- # Data Center App Performance Toolkit User Guide For Crowd @@ -23,45 +23,41 @@ In this document, we cover the use of the Data Center App Performance Toolkit on ## 1. Set up an enterprise-scale environment Crowd Data Center on k8s -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Crowd Data Center environment and AWS on k8s. - #### Setup Crowd Data Center with an enterprise-scale dataset on k8s Below process describes how to install Crowd DC with an enterprise-scale dataset included. This configuration was created specifically for performance testing during the DC app review process. -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-crowd` - `products` - `crowd` - `crowd_license` - one-liner of valid crowd license without spaces and new line symbols - `region` - **Do not change default region (`us-east-2`). If specific region is required, contact support.** - `instance_types` - `["c5.xlarge"]` -7. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. +6. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} --- @@ -77,12 +73,8 @@ All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} --- -#### Troubleshooting -See [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/) page. - #### Terminate Crowd Data Center - -Follow steps described on [Uninstallation and cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) page. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. --- @@ -284,11 +276,15 @@ The same article has instructions on how to increase limit if needed. To receive scalability benchmark results for two-node Crowd DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. @@ -396,7 +392,8 @@ After completing all your tests, delete your Crowd Data Center stacks. #### Attaching testing results to ECOHELP ticket {{% warning %}} -Do not forget to attach performance testing results to your ECOHELP ticket. +It is recommended to terminate an enterprise-scale environment after completing all tests. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} 1. Make sure you have two reports folders: one with performance profile and second with scale profile results. @@ -407,7 +404,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index f8ce5547c..bd8fb5837 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-07-06" --- # Data Center App Performance Toolkit User Guide For Jira @@ -46,9 +46,6 @@ DCAPT has fully transitioned to Terraform deployment. If you still wish to use C ### 1. Setting up Jira Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -69,44 +66,42 @@ See [Set up an enterprise-scale environment Jira Data Center on AWS](#instancese Below process describes how to install low-tier Jira DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: - `environment_name` - any name for you environment, e.g. `dcapt-jira-small` - `products` - `jira` - `jira_license` - one-liner of valid jira license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Re-index: +7. Re-index (only for Jira 8.x, for Jira 9.x skip this step): - Go to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. - Select the **Full re-index** option. - Click **Re-Index** and wait until re-indexing is completed (~2s). -10. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -269,13 +264,15 @@ App-specific actions are required. Do not proceed with the next step until you h --- ## Enterprise-scale environment +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. ### 4. Setting up Jira Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Data Center environment and AWS on k8s. - #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services and may vary from the estimates the Calculator has provided. @@ -318,57 +315,45 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Jira DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: - - `environment_name` - any name for you environment, e.g. `dcapt-jira-large` +2. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: + - `environment_name` - any name for you environment, e.g. `dcapt-jira` - `products` - `jira` - `jira_license` - one-liner of valid jira license without spaces and new line symbols - - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` 9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} - {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- ### 5. Setting up an execution environment @@ -563,11 +548,15 @@ The same article has instructions on how to increase limit if needed. To receive scalability benchmark results for two-node Jira DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: @@ -641,7 +630,7 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo {{% warning %}} It is recommended to terminate an enterprise-scale environment after completing all tests. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -658,7 +647,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index e674fcfa8..2a68911d5 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-07-06" --- # Data Center App Performance Toolkit User Guide For Jira Service Management @@ -47,9 +47,6 @@ DCAPT has fully transitioned to Terraform deployment. If you still wish to use C ### 1. Setting up Jira Service Management Data Center development environment -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Service Management Data Center environment and AWS on k8s. - #### AWS cost estimation for the development environment {{% note %}} @@ -70,40 +67,38 @@ See [Set up an enterprise-scale environment Jira Service Management Data Center Below process describes how to install low-tier Jira Service Management DC with "small" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt-small.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt-small.tfvars - ``` -6. Set **required** variables in `dcapt-small.tfvars` file: - - `environment_name` - any name for you environment, e.g. `dcapt-jira-small`. +2. Navigate to `dc-apps-peformance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt-small.tfvars` file: + - `environment_name` - any name for you environment, e.g. `dcapt-jsm-small`. - `products` - `jira` - `jira_image_repository` - `atlassian/jira-servicemanagement` - make sure to select the **Jira Service Management** application. - `jira_license` - one-liner of valid Jira Service Management license without spaces and new line symbols. - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~20 min): - ```bash - ./install.sh -c dcapt-small.tfvars +6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use `BX02-9YO1-IN86-LO5G` Server ID for generation. -{{% /note %}} +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. @@ -305,13 +300,15 @@ App-specific actions are required. Do not proceed with the next step until you h --- ## Enterprise-scale environment +{{% warning %}} +It is recommended to terminate a development environment before creating an enterprise-scale environment. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. +{{% /warning %}} + After adding your custom app-specific actions, you should now be ready to run the required tests for the Marketplace Data Center Apps Approval process. To do this, you'll need an **enterprise-scale environment**. ### 4. Setting up Jira Service Management Data Center enterprise-scale environment with "large" dataset -We recommend that you use the [official documentation](https://atlassian-labs.github.io/data-center-terraform/) -how to deploy a Jira Service Management Data Center environment and AWS on k8s. - #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services and may vary from the estimates the Calculator has provided. @@ -349,58 +346,46 @@ Data dimensions and values for an enterprise-scale dataset are listed and descri All the datasets use the standard `admin`/`admin` credentials. {{% /note %}} -{{% warning %}} -It is recommended to terminate a development environment before creating an enterprise-scale environment. -Follow [Uninstallation and Cleanup](https://atlassian-labs.github.io/data-center-terraform/userguide/CLEANUP/) instructions. -If you want to keep a development environment up, read [How do I deal with a pre-existing state in multiple environments?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#:~:text=How%20do%20I%20deal%20with%20pre%2Dexisting%20state%20in%20multiple%20environment%3F) -{{% /warning %}} - Below process describes how to install enterprise-scale Jira Service Management DC with "large" dataset included: -1. Read [requirements](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#requirements) - section of the official documentation. -2. Set up [environment](https://atlassian-labs.github.io/data-center-terraform/userguide/PREREQUISITES/#environment-setup). -3. Set up [AWS security credentials](https://atlassian-labs.github.io/data-center-terraform/userguide/INSTALLATION/#1-set-up-aws-security-credentials). +1. Create [access keys for IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html#Using_CreateAccessKey). {{% warning %}} Do not use `root` user credentials for cluster creation. Instead, [create an admin user](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-set-up.html#create-an-admin). {{% /warning %}} -4. Clone the project repo: - ```bash - git clone -b 2.4.0 https://github.com/atlassian-labs/data-center-terraform.git && cd data-center-terraform - ``` -5. Copy [`dcapt.tfvars`](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars) file to the `data-center-terraform` folder. - ``` bash - wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/k8s/dcapt.tfvars - ``` -6. Set **required** variables in `dcapt.tfvars` file: - - `environment_name` - any name for you environment, e.g. `dcapt-jira-large`. +2. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. +3. Set AWS access keys created in step1 in `aws_envs` file: + - `AWS_ACCESS_KEY_ID` + - `AWS_SECRET_ACCESS_KEY` +4. Set **required** variables in `dcapt.tfvars` file: + - `environment_name` - any name for you environment, e.g. `dcapt-jsm-large`. - `products` - `jira` - `jira_image_repository` - `atlassian/jira-servicemanagement` - make sure to select the **Jira Service Management** application. - `jira_license` - one-liner of valid Jira Service Management license without spaces and new line symbols. - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** -7. Optional variables to override: + + {{% note %}} + New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). + Use `BX02-9YO1-IN86-LO5G` Server ID for generation. + {{% /note %}} + +5. Optional variables to override: - `jira_version_tag` - Jira Service Management version to deploy. Supported versions see in [README.md](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/README.md). - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. -8. From local terminal (Git bash terminal for Windows) start the installation (~40min): - ```bash - ./install.sh -c dcapt.tfvars - ``` -9. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. - -{{% note %}} -New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). -Use this server id for generation `BX02-9YO1-IN86-LO5G`. -{{% /note %}} +6. From local terminal (Git bash terminal for Windows) start the installation (~40min): + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars + ``` +7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. {{% note %}} All the datasets use the standard `admin`/`admin` credentials. It's recommended to change default password from UI account page for security reasons. {{% /note %}} -{{% warning %}} -Terminate cluster when it is not used for performance results generation. -{{% /warning %}} - --- ### 5. Setting up an execution environment @@ -601,11 +586,15 @@ The same article has instructions on how to increase limit if needed. To receive scalability benchmark results for two-node Jira Service Management DC **with** app-specific actions: -1. Navigate to `data-center-terraform` folder. +1. Navigate to `dc-app-perfrormance-toolkit/app/util/k8s` folder. 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): - ```bash - ./install.sh -c dcapt.tfvars + ``` bash + docker run --env-file aws_envs \ + -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ + -v "$PWD/.terraform:/data-center-terraform/.terraform" \ + -v "$PWD/logs:/data-center-terraform/logs" \ + -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` 4. Use SSH to connect to execution environment. 5. Run toolkit with docker from the execution environment instance: @@ -677,7 +666,8 @@ Use [scp](https://man7.org/linux/man-pages/man1/scp.1.html) command to copy repo 1. Once completed, in the `./reports` folder, you will be able to review action timings on Jira Service Management Data Center with different numbers of nodes. If you see a significant variation in any action timings between configurations, we recommend taking a look into the app implementation to understand the root cause of this delta. {{% warning %}} -After completing all your tests, delete your Jira Service Management Data Center stacks. +It is recommended to terminate an enterprise-scale environment after completing all tests. +Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-enterprise-scale-environment) instructions. {{% /warning %}} #### Attaching testing results to ECOHELP ticket @@ -694,7 +684,7 @@ Do not forget to attach performance testing results to your ECOHELP ticket. ## Support For Terraform deploy related questions see [Troubleshooting tips](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/)page. -If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. -For instructions on how to do this, see [How to troubleshoot a failed Helm release installation?](https://atlassian-labs.github.io/data-center-terraform/troubleshooting/TROUBLESHOOTING/#_1). +If the installation script fails on installing Helm release or any other reason, collect the logs, zip and share to [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. +For instructions on how to collect detailed logs, see [Collect detailed k8s logs](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#collect-detailed-k8s-logs). In case of the above problem or any other technical questions, issues with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. From 3ab514a689646b58b94d3268d76deef2f9d15a36 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Tue, 18 Jul 2023 11:28:21 +0300 Subject: [PATCH 21/37] DCA-2058/2059/2060 bump bb/confluence/crowd versions --- README.md | 8 +++--- app/util/k8s/dcapt-small.tfvars | 34 +++++++++++----------- app/util/k8s/dcapt.tfvars | 50 ++++++++++++++++----------------- 3 files changed, 46 insertions(+), 46 deletions(-) diff --git a/README.md b/README.md index 148752063..42592b2be 100644 --- a/README.md +++ b/README.md @@ -11,16 +11,16 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.4.8` and `4.20.24` * Supported Confluence versions: - * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.9`, `7.13.17` and `8.1.4` platform release + * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.11`, `7.13.18` and `8.1.4` platform release * Supported Bitbucket Server versions: - * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.2` and `7.21.11` + * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.2` and `7.21.14` * Supported Crowd versions: - * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.0.5` + * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.3` * Supported Bamboo versions: - * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.1` + * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.3` ## Support In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 4c5113afc..dde001e5b 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -181,32 +181,32 @@ confluence_license = "confluence-license" confluence_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "7.19.9" +confluence_version_tag = "7.19.11" # Shared home restore configuration. # 8.1.4 DCAPT small dataset EBS snapshot # confluence_shared_home_snapshot_id = "snap-0815ada397b953b93" -# 7.19.9 DCAPT small dataset EBS snapshot -confluence_shared_home_snapshot_id = "snap-04a21b844f187b645" -# 7.13.17 DCAPT small dataset EBS snapshot -# confluence_shared_home_snapshot_id = "snap-02107cfb60888ccbf" +# 7.19.11 DCAPT small dataset EBS snapshot +confluence_shared_home_snapshot_id = "snap-00ede7dca448a6243" +# 7.13.18 DCAPT small dataset EBS snapshot +# confluence_shared_home_snapshot_id = "snap-055811dae848f13ae" # Database restore configuration. # Build number stored within the snapshot and Confluence license are also required, so that Confluence can be fully setup prior to start. # 8.1.4 DCAPT small dataset RDS snapshot # confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-1-4" -# 7.19.9 DCAPT small dataset RDS snapshot -confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-9" -# 7.13.17 DCAPT small dataset RDS snapshot -# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-17" +# 7.19.11 DCAPT small dataset RDS snapshot +confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-11" +# 7.13.18 DCAPT small dataset RDS snapshot +# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-18" # Build number for a specific Confluence version can be found in the link below: # https://developer.atlassian.com/server/confluence/confluence-build-information -# 8.1.1 +# 8.1.4 # confluence_db_snapshot_build_number = "9003" -# 7.19.6 +# 7.19.11 confluence_db_snapshot_build_number = "8804" -# 7.13.14 +# 7.13.18 # confluence_db_snapshot_build_number = "8703" # Helm chart version of Confluence @@ -283,21 +283,21 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.11" +bitbucket_version_tag = "7.21.14" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT small dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-01f510d0c4405ce78" +# 7.21.14 DCAPT small dataset EBS snapshot +bitbucket_shared_home_snapshot_id = "snap-03893c494ba7edcf4" # 8.9.2 DCAPT small dataset EBS snapshot #bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT small dataset RDS snapshot - bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11" +# 7.21.14 DCAPT small dataset RDS snapshot + bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" # 8.9.2 DCAPT small dataset RDS snapshot #bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index daee307b1..8ccd118fd 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -174,17 +174,17 @@ confluence_license = "confluence-license" confluence_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "7.19.9" +confluence_version_tag = "7.19.11" # Shared home restore configuration. # Make sure confluence version set in `confluence_version_tag` match the snapshot version. # # 8.1.4 DCAPT large dataset EBS snapshot # confluence_shared_home_snapshot_id = "snap-0125fdfcf37dabef5" -# 7.19.9 DCAPT large dataset EBS snapshot -confluence_shared_home_snapshot_id = "snap-0bd74575c95014c10" -# 7.13.17 DCAPT large dataset EBS snapshot -# confluence_shared_home_snapshot_id = "snap-08abae6cf1937e958" +# 7.19.11 DCAPT large dataset EBS snapshot +confluence_shared_home_snapshot_id = "snap-09365c581a158a979" +# 7.13.18 DCAPT large dataset EBS snapshot +# confluence_shared_home_snapshot_id = "snap-04cc3d8455b1ef6e9" # Database restore configuration. # Make sure confluence version set in `confluence_version_tag` match the snapshot version. @@ -192,18 +192,18 @@ confluence_shared_home_snapshot_id = "snap-0bd74575c95014c10" # # 8.1.4 DCAPT large dataset RDS snapshot # confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-1-4" -# 7.19.9 DCAPT large dataset RDS snapshot -confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-9" -# 7.13.17 DCAPT large dataset RDS snapshot -# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-17" +# 7.19.11 DCAPT large dataset RDS snapshot +confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-11" +# 7.13.18 DCAPT large dataset RDS snapshot +# confluence_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-18" # Build number for a specific Confluence version can be found in the link below: # https://developer.atlassian.com/server/confluence/confluence-build-information # 8.1.4 # confluence_db_snapshot_build_number = "9003" -# 7.19.9 +# 7.19.11 confluence_db_snapshot_build_number = "8804" -# 7.13.17 +# 7.13.18 # confluence_db_snapshot_build_number = "8703" # Helm chart version of Confluence @@ -212,7 +212,7 @@ confluence_db_snapshot_build_number = "8804" # Installation timeout # Different variables can influence how long it takes the application from installation to ready state. These # can be dataset restoration, resource requirements, number of replicas and others. -confluence_installation_timeout = 25 +confluence_installation_timeout = 30 # Confluence instance resource configuration confluence_cpu = "4" @@ -278,21 +278,21 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.11" +bitbucket_version_tag = "7.21.14" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT large dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-0456406e413ff835b" +# 7.21.14 DCAPT large dataset EBS snapshot +bitbucket_shared_home_snapshot_id = "snap-0ccb8c3d34ff171f1" # 8.9.2 DCAPT large dataset EBS snapshot #bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # -# 7.21.11 DCAPT large dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-11" +# 7.21.14 DCAPT large dataset RDS snapshot +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" # 8.9.2 DCAPT large dataset RDS snapshot #bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" @@ -386,7 +386,7 @@ crowd_license = "crowd-license" crowd_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -crowd_version_tag = "5.0.5" +crowd_version_tag = "5.1.3" # Dataset Restore @@ -395,8 +395,8 @@ crowd_version_tag = "5.0.5" # This volume will be mounted to the NFS server and used when the product is started. # Make sure the snapshot is available in the region you are deploying to and it follows all product requirements. # -# Crowd 5.0.5 DCAPT large dataset EBS snapshot -crowd_shared_home_snapshot_id = "snap-0da31ed523c51a0af" +# Crowd 5.1.3 DCAPT large dataset EBS snapshot +crowd_shared_home_snapshot_id = "snap-0799b1778e63d824b" # Database restore configuration # If you want to restore the database from a snapshot, uncomment the following line and provide the snapshot identifier. @@ -404,9 +404,9 @@ crowd_shared_home_snapshot_id = "snap-0da31ed523c51a0af" # The snapshot should be in the same AWS account and region as the environment to be deployed. # Please also provide crowd_db_master_username and crowd_db_master_password that matches the ones in snapshot # -# Crowd 5.0.2 DCAPT large dataset RDS snapshot -crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-0-5" -crowd_db_snapshot_build_number = "1794" +# Crowd 5.1.3 DCAPT large dataset RDS snapshot +crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-3" +crowd_db_snapshot_build_number = "1892" # Helm chart version of Crowd and Crowd agent instances. By default the latest version is installed. # crowd_helm_chart_version = "" @@ -473,8 +473,8 @@ bamboo_license = "bamboo-license" # By default, latest supported by DCAPT version is set. # https://hub.docker.com/r/atlassian/bamboo/tags # https://hub.docker.com/r/atlassian/bamboo-agent-base/tags -bamboo_version_tag = "9.2.1" -bamboo_agent_version_tag = "9.2.1" +bamboo_version_tag = "9.2.3" +bamboo_agent_version_tag = "9.2.3" # Helm chart version of Bamboo and Bamboo agent instances # bamboo_helm_chart_version = "" From 9d8c45eda433d2c82be97de9ccb66b1775d52c65 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Tue, 18 Jul 2023 12:29:52 +0300 Subject: [PATCH 22/37] update bb small docs / add instance type info (#1075) --- docs/dc-apps-performance-toolkit-user-guide-bitbucket.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 03cbce1c1..173ec14d9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -93,6 +93,7 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase - `products` - `bitbucket` - `bitbucket_license` - one-liner of valid bitbucket license without spaces and new line symbols - `region` - AWS region for deployment. **Do not change default region (`us-east-2`). If specific region is required, contact support.** + - `instance_types` - `["t3.2xlarge"]` {{% note %}} New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). From 0c063f4a27df0407fc7b82e1d3eaf1c308f7cd13 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Tue, 18 Jul 2023 17:26:15 +0200 Subject: [PATCH 23/37] new jira snapshots (#1076) * new jira snapshots * all regions --- app/util/k8s/dcapt-snapshots.json | 8 ++++---- app/util/k8s/dcapt.tfvars | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index f064a3f48..b1a2ce4e5 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -33,10 +33,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0c98167037a14f031", - "us-east-2": "snap-07099cb68dacf9e8e", - "us-west-1": "snap-07b153b928501f065", - "us-west-2": "snap-0bd8c254fbf4feb25" + "us-east-1": "snap-0640210f62a262aaf", + "us-east-2": "snap-0d619095feaa2eca5", + "us-west-1": "snap-00f6a0fc8ba4c4cce", + "us-west-2": "snap-0d23a05be5f527030" } ] }, diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index daee307b1..216c86a22 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -92,7 +92,7 @@ jira_version_tag = "9.4.8" # Make sure Jira/JSM version set in `jira_version_tag` match the snapshot version. # # Jira 9.4.8 DCAPT large dataset EBS snapshot -jira_shared_home_snapshot_id = "snap-07099cb68dacf9e8e" +jira_shared_home_snapshot_id = "snap-0d619095feaa2eca5" # Jira 8.20.24 DCAPT large dataset EBS snapshot # jira_shared_home_snapshot_id = "snap-0b5f4473954e6d959" # JSM 5.4.8 DCAPT large dataset EBS snapshot From 73c660eae4d8df85c20c62b3be950e262c07f865 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 24 Jul 2023 12:36:53 +0300 Subject: [PATCH 24/37] jira/jsm/update-reindex-info-in-docs (#1078) --- docs/dc-apps-performance-toolkit-user-guide-jira.md | 4 ++++ docs/dc-apps-performance-toolkit-user-guide-jsm.md | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index bd8fb5837..ba8a94409 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -463,6 +463,10 @@ Jira will be temporarily unavailable during the re-indexing process - "503 Servi {{% /note %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. +{{% note %}} +Re-index information window is displayed on the **Indexing page**. If the window is not displayed, log in to Jira one more time and navigate to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. If you use the direct link to the **Indexing** page, refresh the page after the re-index is finished. +{{% /note %}} + 7. Attach the screenshot(s) to your ECOHELP ticket. **Performance results generation with the app installed:** diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 2a68911d5..a7734c441 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -500,6 +500,10 @@ Jira Service Management will be temporarily unavailable during the re-indexing p {{% /note %}} 6. **Take a screenshot of the acknowledgment screen** displaying the re-index time and Lucene index timing. +{{% note %}} +Re-index information window is displayed on the **Indexing page**. If the window is not displayed, log in to Jira Service Management one more time and navigate to **![cog icon](/platform/marketplace/images/cog.png) > System > Indexing**. If you use the direct link to the **Indexing** page, refresh the page after the re-index is finished. +{{% /note %}} + 7. Attach the screenshot(s) to your ECOHELP ticket. From 9e0cabc74037b510280df4e35484c788d8c82e11 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 24 Jul 2023 12:37:34 +0300 Subject: [PATCH 25/37] terraform/add-cpu-limit-info-to-docs (#1079) * terraform/add-cpu-limit-info-to-docs * add 4-nodes clarification --- ...s-performance-toolkit-user-guide-bamboo.md | 16 +++++++++++++ ...erformance-toolkit-user-guide-bitbucket.md | 24 +++++++++++++++---- ...rformance-toolkit-user-guide-confluence.md | 24 +++++++++++++++---- ...ps-performance-toolkit-user-guide-crowd.md | 24 +++++++++++++++---- ...pps-performance-toolkit-user-guide-jira.md | 24 +++++++++++++++---- ...apps-performance-toolkit-user-guide-jsm.md | 24 +++++++++++++++---- 6 files changed, 116 insertions(+), 20 deletions(-) diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 082cb9b64..15fc71957 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -26,6 +26,22 @@ test results for the Marketplace approval process. Preferably, use the below rec ## 1. Set up an enterprise-scale environment Bamboo Data Center on k8s +#### EC2 CPU Limit +The installation of Bamboo requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. + #### Setup Bamboo Data Center with an enterprise-scale dataset on k8s Below process describes how to install Bamboo DC with an enterprise-scale dataset included. This configuration was created diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 173ec14d9..339451863 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -205,6 +205,22 @@ It is recommended to terminate a development environment before creating an ente Follow [Terminate development environment](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/util/k8s/README.MD#terminate-development-environment) instructions. {{% /warning %}} +#### EC2 CPU Limit +The installation of 4-nodes Bitbucket requires **48** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. + ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services, and may vary from the estimates the Calculator has provided. @@ -441,8 +457,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~1 hour) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Bitbucket DC **with** app-specific actions: @@ -473,8 +489,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~1 hour) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-bitbucket/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Bitbucket DC with app-specific actions: diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index c5ef42543..bc8b3e963 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -284,6 +284,22 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Confluence Data Center enterprise-scale environment with "large" dataset +#### EC2 CPU Limit +The installation of 4-nodes Confluence requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. + ### AWS cost estimation ### [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services, and may vary from the estimates the Calculator has provided. @@ -516,8 +532,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-confluence/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Confluence DC **with** app-specific actions: @@ -548,8 +564,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-confluence/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Confluence DC with app-specific actions: diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index f8b0e307e..1e282fbc0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -23,6 +23,22 @@ In this document, we cover the use of the Data Center App Performance Toolkit on ## 1. Set up an enterprise-scale environment Crowd Data Center on k8s +#### EC2 CPU Limit +The installation of 4-nodes Crowd requires **16** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. + #### Setup Crowd Data Center with an enterprise-scale dataset on k8s Below process describes how to install Crowd DC with an enterprise-scale dataset included. This configuration was created @@ -270,8 +286,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Crowd DC **with** app-specific actions: @@ -318,8 +334,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-crowd/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Crowd DC with app-specific actions: diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index ba8a94409..a33e835a9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -273,6 +273,22 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Jira Data Center enterprise-scale environment with "large" dataset +#### EC2 CPU Limit +The installation of 4-nodes Jira requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. + #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services and may vary from the estimates the Calculator has provided. @@ -546,8 +562,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Jira DC **with** app-specific actions: @@ -580,8 +596,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Jira DC with app-specific actions: diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index a7734c441..74bc03429 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -309,6 +309,22 @@ After adding your custom app-specific actions, you should now be ready to run th ### 4. Setting up Jira Service Management Data Center enterprise-scale environment with "large" dataset +#### EC2 CPU Limit +The installation of 4-nodes Jira Service Management requires **32** CPU Cores. Make sure that the current EC2 CPU limit is set to higher number of CPU Cores. [AWS Service Quotas](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) service shows the limit for All Standard Spot Instance Requests. **Applied quota value** is the current CPU limit in the specific region. + +The limit can be increased by creating AWS Support ticket. To request the limit increase fill in [Amazon EC2 Limit increase request form](https://aws.amazon.com/contact-us/ec2-request/): + +| Parameter | Value | +|-----------------------|---------------------------------------------------------------------------------| +| Limit type | EC2 Instances | +| Severity | Urgent business impacting question | +| Region | US East (Ohio) _or your specific region the product is going to be deployed in_ | +| Primary Instance Type | All Standard (A, C, D, H, I, M, R, T, Z) instances | +| Limit | Instance Limit | +| New limit value | _The needed limit of CPU Cores_ | +| Case description | _Give a small description of your case_ | +Select the **Contact Option** and click **Submit** button. + #### AWS cost estimation [AWS Pricing Calculator](https://calculator.aws/) provides an estimate of usage charges for AWS services based on certain information you provide. Monthly charges will be based on your actual usage of AWS services and may vary from the estimates the Calculator has provided. @@ -584,8 +600,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for two-node Jira Service Management DC **with** app-specific actions: @@ -617,8 +633,8 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. -Use [vCPU limits calculator](https://aws.amazon.com/premiumsupport/knowledge-center/ec2-on-demand-instance-vcpu-increase/) to see current limit. -The same article has instructions on how to increase limit if needed. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +[EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} To receive scalability benchmark results for four-node Jira Service Management DC with app-specific actions: From 0a84611703dc9bc121ce3938b6a675412e7b2bef Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Mon, 24 Jul 2023 13:56:44 +0300 Subject: [PATCH 26/37] update version --- app/util/k8s/dcapt-small.tfvars | 10 +-- app/util/k8s/dcapt-snapshots.json | 120 +++++++++++++++--------------- app/util/k8s/dcapt.tfvars | 10 +-- 3 files changed, 70 insertions(+), 70 deletions(-) diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index dde001e5b..b819d61f8 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -283,23 +283,23 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.14" +bitbucket_version_tag = "8.9.2" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT small dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-03893c494ba7edcf4" +#bitbucket_shared_home_snapshot_id = "snap-03893c494ba7edcf4" # 8.9.2 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" +bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT small dataset RDS snapshot - bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" # 8.9.2 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index f064a3f48..635472f78 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -222,17 +222,17 @@ "confluence": { "versions": [ { - "version": "7.19.9", + "version": "7.19.11", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-9", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-9", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-9", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-9" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-11", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-11", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-19-11", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-19-11" } ] }, @@ -241,10 +241,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-9", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-9", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-9", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-9" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-11", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-11", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-19-11", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-19-11" } ] }, @@ -253,10 +253,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-036dea82bc99ce12c", - "us-east-2": "snap-0bd74575c95014c10", - "us-west-1": "snap-0312783bdbba635ae", - "us-west-2": "snap-01e1d66f4372a9f03" + "us-east-1": "snap-03b3a8541b7466ec3", + "us-east-2": "snap-09365c581a158a979", + "us-west-1": "snap-01bc9fdb49bc6641e", + "us-west-2": "snap-061919924738ea4c3" } ] }, @@ -265,27 +265,27 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-0bf417b074f07022f", - "us-east-2": "snap-04a21b844f187b645", - "us-west-1": "snap-047a000c8a98ef356", - "us-west-2": "snap-0374a93b1da17e01d" + "us-east-1": "snap-0dad75f94da7f317b", + "us-east-2": "snap-00ede7dca448a6243", + "us-west-1": "snap-0f72ad2146e3a19c3", + "us-west-2": "snap-09ff2c4be549518a0" } ] } ] }, { - "version": "7.13.17", + "version": "7.13.18", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-13-17", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-17", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-13-17", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-13-17" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-13-18", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-13-18", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-7-13-18", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-7-13-18" } ] }, @@ -294,10 +294,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-13-17", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-17", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-13-17", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-13-17" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-13-18", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-13-18", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-confluence-small-7-13-18", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-confluence-small-7-13-18" } ] }, @@ -306,10 +306,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-0cca34405ae9b37e5", - "us-east-2": "snap-08abae6cf1937e958", - "us-west-1": "snap-0f6ea8d66f79f2e74", - "us-west-2": "snap-023c61d7135250437" + "us-east-1": "snap-08156f8bb0099942f", + "us-east-2": "snap-04cc3d8455b1ef6e9", + "us-west-1": "snap-039e3a985cf126fc0", + "us-west-2": "snap-0ce8a0947cd581752" } ] }, @@ -318,10 +318,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-045cd3b59a68e281d", - "us-east-2": "snap-02107cfb60888ccbf", - "us-west-1": "snap-051a51af7954fe568", - "us-west-2": "snap-05978ae2c8615fb38" + "us-east-1": "snap-01df9653b5c8f9f64", + "us-east-2": "snap-055811dae848f13ae", + "us-west-1": "snap-058e37561a1cce3e9", + "us-west-2": "snap-0bb261f0b3266d136" } ] } @@ -385,17 +385,17 @@ "bitbucket": { "versions": [ { - "version": "7.21.11", + "version": "7.21.14", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-11", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-11", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-11", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-11" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-14", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-7-21-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" } ] }, @@ -404,10 +404,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-11", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-11", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-11" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-14", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-14", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" } ] }, @@ -416,10 +416,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-05a4660e8b2b672cc", - "us-east-2": "snap-0456406e413ff835b", - "us-west-1": "snap-037d014b99517b76a", - "us-west-2": "snap-08f8db72740f75e87" + "us-east-1": "snap-01873c2840b4dd3c3", + "us-east-2": "snap-0ccb8c3d34ff171f1", + "us-west-1": "snap-0cde4bd0ed0358d0e", + "us-west-2": "snap-0f8b60b668f3bbb66" } ] }, @@ -428,10 +428,10 @@ "size": "small", "snapshots": [ { - "us-east-1": "snap-08d61da85a886e5cc", - "us-east-2": "snap-0a6edc9184118695c", - "us-west-1": "snap-0cb0694c018915313", - "us-west-2": "snap-0be44afd82c89b714" + "us-east-1": "snap-0a869d88cb2829bc4", + "us-east-2": "snap-03893c494ba7edcf4", + "us-west-1": "snap-020733b644be01f62", + "us-west-2": "snap-07e784e26a282e18c" } ] } @@ -495,17 +495,17 @@ "crowd": { "versions": [ { - "version": "5.0.5", + "version": "5.1.3", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-0-5", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-0-5", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-0-5", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-0-5" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-1-3", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-3", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-1-3", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-1-3" } ] }, @@ -514,10 +514,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-07b043e36e6b0df13", - "us-east-2": "snap-0da31ed523c51a0af", - "us-west-1": "snap-065aeb9f22113f544", - "us-west-2": "snap-0c70c43eba3e0e5a0" + "us-east-1": "snap-07faa3e5be795352c", + "us-east-2": "snap-0799b1778e63d824b", + "us-west-1": "snap-073ba0d9dbb31fed6", + "us-west-2": "snap-03da698dad0107ffa" } ] } diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 8ccd118fd..fc6107d0d 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -278,23 +278,23 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "7.21.14" +bitbucket_version_tag = "8.9.2" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT large dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-0ccb8c3d34ff171f1" +#bitbucket_shared_home_snapshot_id = "snap-0ccb8c3d34ff171f1" # 8.9.2 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" +bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT large dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" # 8.9.2 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" From d42e7f046176d65904afa07035bcf2d554c7e6de Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Wed, 26 Jul 2023 15:55:13 +0200 Subject: [PATCH 27/37] DCA-2049 Selenium 4.10.0, JMeter 5.6.2, update requirements.txt (#1080) * DCA-2049 Selenium 4.10.0, JMeter 5.6.2, update requirements.txt * fix --- app/bamboo.yml | 4 ++-- app/bitbucket.yml | 4 ++-- app/confluence.yml | 4 ++-- app/crowd.yml | 2 +- app/jira.yml | 4 ++-- app/jsm.yml | 4 ++-- app/selenium_ui/conftest.py | 27 ++++++++++++--------------- app/util/k8s/README.MD | 14 ++++++-------- requirements.txt | 20 ++++++++++---------- 9 files changed, 39 insertions(+), 44 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 23bfde585..485665e85 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -18,7 +18,7 @@ settings: ramp-up: 5m # time to spin all concurrent threads total_actions_per_hour: 2000 # number of total JMeter actions per hour WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.6.2 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor @@ -49,7 +49,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.1 + - selenium==4.10.0 execution: - scenario: jmeter executor: jmeter diff --git a/app/bitbucket.yml b/app/bitbucket.yml index e4eaa6a5b..34fdcf14a 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -18,7 +18,7 @@ settings: ramp-up: 10m # time to spin all concurrent users total_actions_per_hour: 32700 WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.6.2 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: @@ -36,7 +36,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.1 + - selenium==4.10.0 execution: - scenario: ${load_executor} concurrency: ${concurrency} diff --git a/app/confluence.yml b/app/confluence.yml index 2219bdc99..424a7ba1b 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -18,7 +18,7 @@ settings: ramp-up: 5m # time to spin all concurrent users total_actions_per_hour: 20000 WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.6.2 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.1 + - selenium==4.10.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/crowd.yml b/app/crowd.yml index 9ab60e7cc..f87bc8049 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -30,7 +30,7 @@ settings: # ramp-up: 5s # time to spin all concurrent threads # total_actions_per_hour: 720000 # number of total JMeter actions per hour - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.6.2 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: diff --git a/app/jira.yml b/app/jira.yml index 4f13b9288..82401ba1d 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -18,7 +18,7 @@ settings: ramp-up: 3m # time to spin all concurrent users total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.6.2 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.1 + - selenium==4.10.0 execution: - scenario: ${load_executor} executor: ${load_executor} diff --git a/app/jsm.yml b/app/jsm.yml index 068e7e893..ab27a647f 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -21,7 +21,7 @@ settings: total_actions_per_hour_customers: 15000 insight: False # Set True to enable Insight specific tests WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.4.3 + JMETER_VERSION: 5.6.2 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors @@ -67,7 +67,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.9.1 + - selenium==4.10.0 execution: - scenario: ${load_executor}_agents executor: ${load_executor} diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index 54c48660f..60839ce1a 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -4,19 +4,17 @@ import functools import json import os +import re import sys -import time from datetime import timezone -import re from pprint import pprint +from time import sleep, time import filelock import pytest from selenium.common.exceptions import WebDriverException -from selenium.webdriver.common.desired_capabilities import DesiredCapabilities from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options -from time import sleep from util.conf import CONFLUENCE_SETTINGS, JIRA_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, BAMBOO_SETTINGS from util.exceptions import WebDriverExceptionPostpone @@ -24,7 +22,7 @@ JIRA_DATASET_PROJECTS, JIRA_DATASET_SCRUM_BOARDS, JIRA_DATASET_USERS, JIRA_DATASET_CUSTOM_ISSUES, BITBUCKET_USERS, \ BITBUCKET_PROJECTS, BITBUCKET_REPOS, BITBUCKET_PRS, CONFLUENCE_BLOGS, CONFLUENCE_PAGES, CONFLUENCE_CUSTOM_PAGES, \ CONFLUENCE_USERS, ENV_TAURUS_ARTIFACT_DIR, JSM_DATASET_REQUESTS, JSM_DATASET_CUSTOMERS, JSM_DATASET_AGENTS, \ - JSM_DATASET_SERVICE_DESKS_L, JSM_DATASET_SERVICE_DESKS_M, JSM_DATASET_SERVICE_DESKS_S, JSM_DATASET_CUSTOM_ISSUES,\ + JSM_DATASET_SERVICE_DESKS_L, JSM_DATASET_SERVICE_DESKS_M, JSM_DATASET_SERVICE_DESKS_S, JSM_DATASET_CUSTOM_ISSUES, \ JSM_DATASET_INSIGHT_SCHEMAS, JSM_DATASET_INSIGHT_ISSUES, BAMBOO_USERS, BAMBOO_BUILD_PLANS SCREEN_WIDTH = 1920 @@ -134,7 +132,7 @@ def wrapper(*args, **kwargs): if globals.login_failed: pytest.skip("login is failed") node_ip = "" - start = time.time() + start = time() error_msg = 'Success' full_exception = '' if args: @@ -148,14 +146,14 @@ def wrapper(*args, **kwargs): # https://docs.python.org/2/library/sys.html#sys.exc_info exc_type, full_exception = sys.exc_info()[:2] error_msg = f"Failed measure: {interaction} - {exc_type.__name__}" - end = time.time() + end = time() timing = str(int((end - start) * 1000)) lockfile = f'{selenium_results_file}.lock' with filelock.SoftFileLock(lockfile): with open(selenium_results_file, "a+") as jtl_file: - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) if explicit_timing: jtl_file.write(f"{timestamp},{explicit_timing*1000},{interaction},,{error_msg}," f",{success},0,0,0,0,,0\n") @@ -178,8 +176,6 @@ def wrapper(*args, **kwargs): def webdriver(app_settings): def driver_init(): chrome_options = Options() - capabilities = DesiredCapabilities.CHROME - capabilities["goog:loggingPrefs"] = {"performance": "ALL"} if app_settings.webdriver_visible and is_docker(): raise Exception("ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") if not app_settings.webdriver_visible: @@ -191,7 +187,8 @@ def driver_init(): chrome_options.add_argument("--disable-infobars") chrome_options.add_argument('lang=en') chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'}) - driver = Chrome(options=chrome_options, desired_capabilities=capabilities) + chrome_options.set_capability('goog:loggingPrefs', {'performance': 'ALL'}) + driver = Chrome(options=chrome_options) driver.app_settings = app_settings return driver @@ -271,7 +268,7 @@ def get_wait_browser_metrics(webdriver, expected_metrics): return data print(f'Waiting for browser metrics, attempt {i}, sleep {sleep_time}') - time.sleep(sleep_time) + sleep(sleep_time) return {} @@ -287,7 +284,7 @@ def measure_dom_requests(webdriver, interaction, description=''): success = True with filelock.SoftFileLock(lockfile): with open(selenium_results_file, "a+") as jtl_file: - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) jtl_file.write( f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") print(f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") @@ -351,7 +348,7 @@ def measure_browser_navi_metrics(webdriver, dataset, expected_metrics): for metric in metrics: interaction = metric['key'] ready_for_user_timing = metric['ready_for_user'] - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) node_ip = webdriver.node_ip jtl_file.write( f"{timestamp},{ready_for_user_timing},{interaction},,{error_msg},,{success},0,0,0,0,{node_ip},0\n") @@ -430,7 +427,7 @@ def get_screen_shots(request, webdriver): action_name = request.node.rep_call.head_line error_text = request.node.rep_call.longreprtext with open(selenium_error_file, mode) as err_file: - timestamp = round(time.time() * 1000) + timestamp = round(time() * 1000) dt = datetime.datetime.now() utc_time = dt.replace(tzinfo=timezone.utc) str_time = utc_time.strftime("%m-%d-%Y, %H:%M:%S") diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index a75f081e5..adc3f251f 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -9,7 +9,6 @@ ``` bash docker run --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ --v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -17,7 +16,6 @@ docker run --env-file aws_envs \ ``` bash docker run --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ --v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars ``` @@ -33,7 +31,6 @@ docker run --env-file aws_envs \ ``` bash docker run --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ --v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` @@ -41,7 +38,6 @@ docker run --env-file aws_envs \ ``` bash docker run --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ --v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars ``` @@ -49,24 +45,26 @@ docker run --env-file aws_envs \ # Collect detailed k8s logs Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash -export CLUSTER_NAME=your_cluster_name +export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 + docker run --env-file aws_envs \ -v "$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ -v "$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh $CLUSTER_NAME $REGION k8s_logs +-it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs ``` # Force terminate cluster Set AWS credential in [aws_envs](./aws_envs) file and run command: ``` bash -export CLUSTER_NAME=your_cluster_name +export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 + docker run --env-file aws_envs \ --workdir="/data-center-terraform" \ --entrypoint="python" \ -v "$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ -atlassian/dcapt terminate_cluster.py --cluster_name $CLUSTER_NAME --aws_region $REGION +atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION ``` # Non default product version or aws region diff --git a/requirements.txt b/requirements.txt index 702a73b0a..798691b5d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,12 +1,12 @@ -matplotlib==3.7.1 -pandas==2.0.1 -numpy==1.24.3 -scipy==1.10.1 -pytest==7.3.1 +matplotlib==3.7.2 +pandas==2.0.3 +numpy==1.25.1 +scipy==1.11.1 +pytest==7.4.0 locust==2.15.1 -selenium==4.9.1 -filelock==3.12.0 +selenium==4.10.0 +filelock==3.12.2 packaging==23.1 -prettytable==3.7.0 -bzt==1.16.22 -boto3==1.26.149 +prettytable==3.8.0 +bzt==1.16.23 +boto3==1.28.11 From 4f63f39333ef7cb17ffd255bfb1397d3e539a227 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Wed, 26 Jul 2023 16:50:44 +0200 Subject: [PATCH 28/37] Fix default postfix for jira/jsm, update docs (#1081) * Fix default postfix for jira/jsm, update docs * align indent --- app/crowd.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- docs/dc-apps-performance-toolkit-user-guide-bamboo.md | 2 +- docs/dc-apps-performance-toolkit-user-guide-jira-cf.md | 6 +++--- docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md | 6 +++--- docs/dc-apps-performance-toolkit-user-guide-jsm.md | 4 ++-- 7 files changed, 12 insertions(+), 12 deletions(-) diff --git a/app/crowd.yml b/app/crowd.yml index f87bc8049..4dae44c23 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 4990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /crowd in case of url like http://localhost:4990/crowd + application_postfix: /crowd # e.g. /crowd in case of url like http://localhost:4990/crowd admin_login: admin admin_password: admin application_name: crowd diff --git a/app/jira.yml b/app/jira.yml index 82401ba1d..d0040bb4f 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. diff --git a/app/jsm.yml b/app/jsm.yml index ab27a647f..31f59be6e 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -9,7 +9,7 @@ settings: application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 15fc71957..681ce0151 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -235,7 +235,7 @@ Instead, set those values directly in `.yml` file on execution environment insta application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 8085, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /babmoo in case of url like http://localhost:8085/bamboo + application_postfix: /bamboo # e.g. /babmoo in case of url like http://localhost:8085/bamboo admin_login: admin admin_password: admin load_executor: jmeter diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md index 9ec39830e..f6376e4db 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md @@ -171,7 +171,7 @@ Make sure **English (United States)** language is selected as a default language - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:2990/jira. + - `application_postfix`: set to empty for CloudFormation deployment; e.g., /jira for url like this http://localhost:2990/jira. - `admin_login`: admin user username. - `admin_password`: admin user password. - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). @@ -719,8 +719,8 @@ Instead, set those values directly in `.yml` file on execution environment insta application_hostname: test_jira_instance.atlassian.com # Jira DC hostname without protocol and port e.g. test-jira.atlassian.com or localhost application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira in case of url like http://localhost:2990/jira + secure: True # set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # set to empty for CloudFromation deployment. e.g. /jira in case of url like http://localhost:2990/jira admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md index 0f29a228e..d54c7c52b 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md @@ -294,7 +294,7 @@ Make sure **English (United States)** language is selected as a default language - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:2990/jira. + - `application_postfix`: set to empty for CloudFormation deployment; e.g., /jira for url like this http://localhost:2990/jira. - `admin_login`: admin user username. - `admin_password`: admin user password. - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). @@ -874,8 +874,8 @@ Instead, set those values directly in `.yml` file on execution environment insta application_hostname: test_jsm_instance.atlassian.com # Jira Service Management DC hostname without protocol and port e.g. test-jsm.atlassian.com or localhost application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc - secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: # e.g. /jira in case of url like http://localhost:2990/jira + secure: True # set False to allow insecure connections, e.g. when using self-signed SSL certificate + application_postfix: # set to empty for CloudFromation deploymente.g. /jira in case of url like http://localhost:2990/jira admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 74bc03429..255f6f86a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -120,7 +120,7 @@ Make sure **English (United States)** language is selected as a default language - `application_protocol`: http or https. - `application_port`: for HTTP - 80, for HTTPS - 443, 8080, 2990 or your instance-specific port. - `secure`: True or False. Default value is True. Set False to allow insecure connections, e.g. when using self-signed SSL certificate. - - `application_postfix`: it is empty by default; e.g., /jira for url like this http://localhost:2990/jira. + - `application_postfix`: /jira # default value for TerraForm deployment; e.g., /jira for url like this http://localhost:2990/jira. - `admin_login`: admin user username. - `admin_password`: admin user password. - `load_executor`: executor for load tests. Valid options are [jmeter](https://jmeter.apache.org/) (default) or [locust](https://locust.io/). @@ -422,7 +422,7 @@ Instead, set those values directly in `.yml` file on execution environment insta application_protocol: http # http or https application_port: 80 # 80, 443, 8080, 2990, etc secure: True # Set False to allow insecure connections, e.g. when using self-signed SSL certificate - application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. + application_postfix: /jira # e.g. /jira for TerraForm deployment url like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. Leave this value blank for url without postfix. admin_login: admin admin_password: admin load_executor: jmeter # jmeter and locust are supported. jmeter by default. From 7771bc2dcb880c7cb3234f3f10430b06f8f6a90e Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Tue, 1 Aug 2023 11:40:36 +0300 Subject: [PATCH 29/37] bitbucket/set-bb7-as-main-version --- README.md | 2 +- app/util/k8s/dcapt-small.tfvars | 10 +++++----- app/util/k8s/dcapt.tfvars | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 42592b2be..c0a936a10 100644 --- a/README.md +++ b/README.md @@ -14,7 +14,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.19.11`, `7.13.18` and `8.1.4` platform release * Supported Bitbucket Server versions: - * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.2` and `7.21.14` + * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.21.14` and `8.9.2` * Supported Crowd versions: * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.3` diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index b819d61f8..c50270074 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -283,23 +283,23 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "8.9.2" +bitbucket_version_tag = "7.21.14" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT small dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-03893c494ba7edcf4" +bitbucket_shared_home_snapshot_id = "snap-03893c494ba7edcf4" # 8.9.2 DCAPT small dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" +#bitbucket_shared_home_snapshot_id = "snap-0fb8cd6bf387057c0" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT small dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-14" # 8.9.2 DCAPT small dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index da68a4aa2..642ae3c45 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -278,23 +278,23 @@ bitbucket_license = "bitbucket-license" bitbucket_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "8.9.2" +bitbucket_version_tag = "7.21.14" # Shared home restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT large dataset EBS snapshot -#bitbucket_shared_home_snapshot_id = "snap-0ccb8c3d34ff171f1" +bitbucket_shared_home_snapshot_id = "snap-0ccb8c3d34ff171f1" # 8.9.2 DCAPT large dataset EBS snapshot -bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" +#bitbucket_shared_home_snapshot_id = "snap-0d933f3d40000e877" # Database restore configuration. # Make sure Bitbucket version set in `bitbucket_version_tag` match the snapshot version. # # 7.21.14 DCAPT large dataset RDS snapshot -#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" +bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-14" # 8.9.2 DCAPT large dataset RDS snapshot -bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" +#bitbucket_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-2" # Helm chart version of Bitbucket #bitbucket_helm_chart_version = "" From 2de5435fc1a5a93c17efdf86043902a726fbbd2d Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Thu, 3 Aug 2023 13:34:47 +0200 Subject: [PATCH 30/37] Downgrade JMeter version to 5.5 due to problem with emoji test (#1085) --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/crowd.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- app/util/jmeter/start_jmeter_ui.py | 2 +- docs/dc-apps-performance-toolkit-user-guide-bamboo.md | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 485665e85..ff6c8e26d 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -18,7 +18,7 @@ settings: ramp-up: 5m # time to spin all concurrent threads total_actions_per_hour: 2000 # number of total JMeter actions per hour WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.6.2 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 34fdcf14a..87bb74395 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -18,7 +18,7 @@ settings: ramp-up: 10m # time to spin all concurrent users total_actions_per_hour: 32700 WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.6.2 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: diff --git a/app/confluence.yml b/app/confluence.yml index 424a7ba1b..dbc5fc68e 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -18,7 +18,7 @@ settings: ramp-up: 5m # time to spin all concurrent users total_actions_per_hour: 20000 WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.6.2 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors diff --git a/app/crowd.yml b/app/crowd.yml index 4dae44c23..056f02626 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -30,7 +30,7 @@ settings: # ramp-up: 5s # time to spin all concurrent threads # total_actions_per_hour: 720000 # number of total JMeter actions per hour - JMETER_VERSION: 5.6.2 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: diff --git a/app/jira.yml b/app/jira.yml index d0040bb4f..02f8b2ecc 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -18,7 +18,7 @@ settings: ramp-up: 3m # time to spin all concurrent users total_actions_per_hour: 54500 # number of total JMeter/Locust actions per hour WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.6.2 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors diff --git a/app/jsm.yml b/app/jsm.yml index 31f59be6e..d6c7cb2d4 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -21,7 +21,7 @@ settings: total_actions_per_hour_customers: 15000 insight: False # Set True to enable Insight specific tests WEBDRIVER_VISIBLE: False - JMETER_VERSION: 5.6.2 + JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors diff --git a/app/util/jmeter/start_jmeter_ui.py b/app/util/jmeter/start_jmeter_ui.py index 13c4e4a98..fa1ddefe5 100644 --- a/app/util/jmeter/start_jmeter_ui.py +++ b/app/util/jmeter/start_jmeter_ui.py @@ -137,7 +137,7 @@ def print_settings(self, settings): print(setting.replace('\n', '')) def launch_jmeter_ui(self): - jmeter_path = JMETER_HOME / self.env_settings['JMETER_VERSION'] / 'bin' / 'jmeter' + jmeter_path = JMETER_HOME / str(self.env_settings['JMETER_VERSION']) / 'bin' / 'jmeter' command = [str(jmeter_path), "-p", str(PROPERTIES), "-t", str(self.jmx)] print("JMeter start command: {}".format(' '.join(command))) print("Working dir: {}".format(APP_DIR)) diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 681ce0151..9ef18877a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -320,7 +320,7 @@ the next steps. For an enterprise-scale environment run, the acceptable success ##### Run 3 (~50 min) -To receive scalability benchmark results for one-node Bamboo DC **with app** and **with app-specific actions**: +To receive results for Bamboo DC **with app** and **with app-specific actions**: 1. Apply app-specific code changes to a new branch of forked repo. 1. Use SSH to connect to execution environment. From 902e5fcba01e14061f7f274381e3a1959cf6ca78 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Thu, 3 Aug 2023 15:46:18 +0200 Subject: [PATCH 31/37] Bamboo docs improvements (#1086) --- app/util/bamboo/bamboo_dataset_generator/README.md | 5 +++++ .../src/main/java/bamboogenerator/Main.java | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/app/util/bamboo/bamboo_dataset_generator/README.md b/app/util/bamboo/bamboo_dataset_generator/README.md index 5a9a55bb1..d5731a06c 100644 --- a/app/util/bamboo/bamboo_dataset_generator/README.md +++ b/app/util/bamboo/bamboo_dataset_generator/README.md @@ -7,6 +7,11 @@ Configuration located inside: [src/main/java/bamboogenerator/Main.java](src/main **Client Configuration** - `BAMBOO_SERVER_URL` - the URL of Bamboo + + For TerraForm deployment URL should have port and postfix + ``` + BAMBOO_SERVER_URL = "http://my-bamboo.amazonaws.com:80/bamboo" + ``` - `ADMIN_USER_NAME` - the username of admin account diff --git a/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java b/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java index 2a543cf5a..f27a1fff3 100644 --- a/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java +++ b/app/util/bamboo/bamboo_dataset_generator/src/main/java/bamboogenerator/Main.java @@ -27,7 +27,9 @@ public class Main { private static final Logger LOG = LoggerFactory.getLogger(Main.class); - private static final String BAMBOO_SERVER_URL = "http://0.0.0.0:8085"; + // e.g. for TerraForm deployment: BAMBOO_SERVER_URL = "http://my-babmoo.amazonaws.com:80/bamboo" + // e.g. for localhost deployment: BAMBOO_SERVER_URL = "http://0.0.0.0:8085" + private static final String BAMBOO_SERVER_URL = "http://my-babmoo.amazonaws.com:80/bamboo"; private static final String ADMIN_USER_NAME = "admin"; // NOTE: Please make sure you haven't changed these values after initial run From 28e0eee226360fa9efa7fd2140745b0f2d377d4d Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Thu, 10 Aug 2023 13:15:44 +0300 Subject: [PATCH 32/37] update Crowd resources --- app/util/k8s/dcapt.tfvars | 14 +++++++------- ...dc-apps-performance-toolkit-user-guide-crowd.md | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 642ae3c45..4616a04bf 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -31,7 +31,7 @@ resource_tags = {Name: "dcapt-testing"} # Instance types that is preferred for EKS node group. # Confluence, Bamboo, Jira - use default value # Bitbucket - ["m5.4xlarge"] -# Crowd - ["c5.xlarge"] +# Crowd - ["m5.xlarge"] # ! REQUIRED ! instance_types = ["m5.2xlarge"] instance_disk_size = 100 @@ -418,19 +418,19 @@ crowd_installation_timeout = 20 # Crowd instance resource configuration crowd_cpu = "2" -crowd_mem = "3Gi" -crowd_min_heap = "1024m" -crowd_max_heap = "1024m" +crowd_mem = "8Gi" +crowd_min_heap = "2048m" +crowd_max_heap = "2048m" # Storage -crowd_local_home_size = "10Gi" -crowd_shared_home_size = "10Gi" +crowd_local_home_size = "15Gi" +crowd_shared_home_size = "15Gi" # Crowd NFS instance resource configuration crowd_nfs_requests_cpu = "1" crowd_nfs_requests_memory = "1Gi" crowd_nfs_limits_cpu = "1" -crowd_nfs_limits_memory = "1Gi" +crowd_nfs_limits_memory = "2Gi" # RDS instance configurable attributes. Note that the allowed value of allocated storage and iops may vary based on instance type. # You may want to adjust these values according to your needs. diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 1e282fbc0..185dfbc76 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -57,7 +57,7 @@ specifically for performance testing during the DC app review process. - `products` - `crowd` - `crowd_license` - one-liner of valid crowd license without spaces and new line symbols - `region` - **Do not change default region (`us-east-2`). If specific region is required, contact support.** - - `instance_types` - `["c5.xlarge"]` + - `instance_types` - `["m5.xlarge"]` {{% note %}} New trial license could be generated on [my atlassian](https://my.atlassian.com/license/evaluation). From b554f96eaa00d42194dc4a29f6011a03cd91c7a6 Mon Sep 17 00:00:00 2001 From: OlehStefanyshyn Date: Thu, 10 Aug 2023 13:49:11 +0300 Subject: [PATCH 33/37] bump Crowd 5.1.4 --- README.md | 2 +- app/util/k8s/dcapt-snapshots.json | 18 +++++++++--------- app/util/k8s/dcapt.tfvars | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/README.md b/README.md index c0a936a10..82a56a7a2 100644 --- a/README.md +++ b/README.md @@ -17,7 +17,7 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `7.21.14` and `8.9.2` * Supported Crowd versions: - * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.3` + * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.1.4` * Supported Bamboo versions: * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.3` diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 91c071772..51e46840f 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -495,17 +495,17 @@ "crowd": { "versions": [ { - "version": "5.1.3", + "version": "5.1.4", "data": [ { "type": "rds", "size": "large", "snapshots": [ { - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-1-3", - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-3", - "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-1-3", - "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-1-3" + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-1-4", + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-4", + "us-west-1": "arn:aws:rds:us-west-1:585036043680:snapshot:dcapt-crowd-5-1-4", + "us-west-2": "arn:aws:rds:us-west-2:585036043680:snapshot:dcapt-crowd-5-1-4" } ] }, @@ -514,10 +514,10 @@ "size": "large", "snapshots": [ { - "us-east-1": "snap-07faa3e5be795352c", - "us-east-2": "snap-0799b1778e63d824b", - "us-west-1": "snap-073ba0d9dbb31fed6", - "us-west-2": "snap-03da698dad0107ffa" + "us-east-1": "", + "us-east-2": "snap-0a8e229690be9ae30", + "us-west-1": "", + "us-west-2": "" } ] } diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 4616a04bf..4d59db180 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -386,7 +386,7 @@ crowd_license = "crowd-license" crowd_replica_count = 1 # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -crowd_version_tag = "5.1.3" +crowd_version_tag = "5.1.4" # Dataset Restore @@ -395,8 +395,8 @@ crowd_version_tag = "5.1.3" # This volume will be mounted to the NFS server and used when the product is started. # Make sure the snapshot is available in the region you are deploying to and it follows all product requirements. # -# Crowd 5.1.3 DCAPT large dataset EBS snapshot -crowd_shared_home_snapshot_id = "snap-0799b1778e63d824b" +# Crowd 5.1.4 DCAPT large dataset EBS snapshot +crowd_shared_home_snapshot_id = "snap-0a8e229690be9ae30" # Database restore configuration # If you want to restore the database from a snapshot, uncomment the following line and provide the snapshot identifier. @@ -404,9 +404,9 @@ crowd_shared_home_snapshot_id = "snap-0799b1778e63d824b" # The snapshot should be in the same AWS account and region as the environment to be deployed. # Please also provide crowd_db_master_username and crowd_db_master_password that matches the ones in snapshot # -# Crowd 5.1.3 DCAPT large dataset RDS snapshot -crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-3" -crowd_db_snapshot_build_number = "1892" +# Crowd 5.1.4 DCAPT large dataset RDS snapshot +crowd_db_snapshot_id = "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-1-4" +crowd_db_snapshot_build_number = "1893" # Helm chart version of Crowd and Crowd agent instances. By default the latest version is installed. # crowd_helm_chart_version = "" From 31968a58c9072eb9333bb8ec7c59a4f3867796d3 Mon Sep 17 00:00:00 2001 From: Alex Metelytsia Date: Wed, 16 Aug 2023 13:13:04 +0200 Subject: [PATCH 34/37] Docs updates for 7.5.0 (#1089) --- README.md | 2 +- app/util/k8s/README.MD | 12 ++++----- ...s-performance-toolkit-user-guide-bamboo.md | 15 +++++------ ...erformance-toolkit-user-guide-bitbucket.md | 21 ++++++---------- ...rformance-toolkit-user-guide-confluence.md | 25 ++++++++----------- ...ps-performance-toolkit-user-guide-crowd.md | 23 +++++++---------- ...-performance-toolkit-user-guide-jira-cf.md | 23 +++++++---------- ...pps-performance-toolkit-user-guide-jira.md | 25 ++++++++----------- ...s-performance-toolkit-user-guide-jsm-cf.md | 23 +++++++---------- ...apps-performance-toolkit-user-guide-jsm.md | 25 ++++++++----------- 10 files changed, 78 insertions(+), 116 deletions(-) diff --git a/README.md b/README.md index 82a56a7a2..2286db13e 100644 --- a/README.md +++ b/README.md @@ -95,7 +95,7 @@ git --version ``` We recommend using [virtualenv](https://virtualenv.pypa.io/en/latest/) for Taurus. See example setup below. -## Example setup for clean Ubuntu 20.04 +## Example setup for clean Ubuntu 22.04 JDK setup (if missing): ``` sudo apt-get update diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index adc3f251f..d612862e4 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -7,14 +7,14 @@ * `license` * run install development environment command: ``` bash -docker run --env-file aws_envs \ +docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` ## Terminate development environment ``` bash -docker run --env-file aws_envs \ +docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars @@ -29,14 +29,14 @@ docker run --env-file aws_envs \ * `license` * run install enterprise-scale environment command: ``` bash -docker run --env-file aws_envs \ +docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./install.sh -c config.tfvars ``` ## Terminate enterprise-scale environment ``` bash -docker run --env-file aws_envs \ +docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./uninstall.sh -t -c config.tfvars @@ -48,7 +48,7 @@ Set AWS credential in [aws_envs](./aws_envs) file and run command: export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 -docker run --env-file aws_envs \ +docker run --pull=always --env-file aws_envs \ -v "$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ -v "$PWD/logs:/data-center-terraform/logs" \ -it atlassianlabs/terraform ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs @@ -60,7 +60,7 @@ Set AWS credential in [aws_envs](./aws_envs) file and run command: export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 -docker run --env-file aws_envs \ +docker run --pull=always --env-file aws_envs \ --workdir="/data-center-terraform" \ --entrypoint="python" \ -v "$PWD/terminate_cluster.py:/data-center-terraform/terminate_cluster.py" \ diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index 9ef18877a..0842c9316 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-07-06" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Bamboo @@ -68,7 +68,7 @@ specifically for performance testing during the DC app review process. 5. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -251,7 +251,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) @@ -285,8 +285,7 @@ To receive performance baseline results **without** an app installed and **witho ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bamboo/YY-MM-DD-hh-mm-ss` folder: @@ -309,8 +308,7 @@ the next steps. For an enterprise-scale environment run, the acceptable success ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml ``` {{% note %}} @@ -329,8 +327,7 @@ To receive results for Bamboo DC **with app** and **with app-specific actions**: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bamboo.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 339451863..91fc257ea 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-07-06" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Bitbucket @@ -320,7 +320,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -362,8 +362,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder: @@ -387,8 +386,7 @@ To receive performance results with an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -445,8 +443,7 @@ To receive scalability benchmark results for one-node Bitbucket DC **with** app- ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -467,7 +464,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- 2. Open `dcapt.tfvars` file and set `bitbucket_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -477,8 +474,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- 5. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} @@ -500,8 +496,7 @@ To receive scalability benchmark results for four-node Bitbucket DC with app-spe ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt bitbucket.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index bc8b3e963..d5b37bce9 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-07-06" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Confluence @@ -104,7 +104,7 @@ Below process describes how to install low-tier Confluence DC with "small" datas - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -351,7 +351,7 @@ Below process describes how to install enterprise-scale Confluence DC with "larg - Make sure that the Confluence version specified in **confluence_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 6. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -395,7 +395,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -437,8 +437,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/confluence/YY-MM-DD-hh-mm-ss` folder: @@ -462,8 +461,7 @@ To receive performance results with an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -520,8 +518,7 @@ To receive scalability benchmark results for one-node Confluence DC **with** app ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -542,7 +539,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app 2. Open `dcapt.tfvars` file and set `confluence_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -552,8 +549,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app 5. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} @@ -575,8 +571,7 @@ To receive scalability benchmark results for four-node Confluence DC with app-sp ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt confluence.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 185dfbc76..98c63c59b 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-07-06" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Crowd @@ -66,7 +66,7 @@ specifically for performance testing during the DC app review process. 5. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -153,7 +153,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -192,8 +192,7 @@ To receive performance baseline results **without** an app installed and **witho ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/crowd/YY-MM-DD-hh-mm-ss` folder: @@ -215,8 +214,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -274,8 +272,7 @@ To receive scalability benchmark results for one-node Crowd DC **with** app-spec ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -296,7 +293,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec 2. Open `dcapt.tfvars` file and set `crowd_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -322,8 +319,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} @@ -360,8 +356,7 @@ To receive scalability benchmark results for four-node Crowd DC with app-specifi ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt crowd.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md index f6376e4db..b1b5547c7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira-cf.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira (CloudFormation deployment) @@ -71,7 +71,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | Software | -| Version | The Data Center App Performance Toolkit officially supports `8.20.22`, `9.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `8.20.24`, `9.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -392,7 +392,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended Value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | Software | -| Version | The Data Center App Performance Toolkit officially supports `8.20.22`, `9.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `8.20.24`, `9.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -732,7 +732,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -774,8 +774,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: @@ -817,8 +816,7 @@ If your Amazon RDS DB instance class is lower than `db.m5.xlarge` it is required ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -876,8 +874,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -922,8 +919,7 @@ In case if index synchronization is failed by some reason (e.g. application stat ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` @@ -949,8 +945,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index a33e835a9..c851159f7 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-07-06" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira @@ -90,7 +90,7 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -357,7 +357,7 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat - Make sure that the Jira version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -401,7 +401,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -443,8 +443,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: @@ -491,8 +490,7 @@ Re-index information window is displayed on the **Indexing page**. If the window ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -550,8 +548,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} @@ -572,7 +569,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -583,8 +580,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` @@ -607,8 +603,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md index d54c7c52b..93310e73a 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm-cf.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-04-20" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira Service Management (CloudFormation deployment) @@ -71,7 +71,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | ServiceManagement | -| Version | The Data Center App Performance Toolkit officially supports `4.20.22`, `5.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `4.20.24`, `5.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -553,7 +553,7 @@ All important parameters are listed and described in this section. For all other | Parameter | Recommended Value | | --------- |--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | Jira Product | ServiceManagement | -| Version | The Data Center App Performance Toolkit officially supports `4.20.22`, `5.4.6` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | +| Version | The Data Center App Performance Toolkit officially supports `4.20.24`, `5.4.8` ([Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html)) | **Cluster nodes** @@ -891,7 +891,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -933,8 +933,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -976,8 +975,7 @@ If your Amazon RDS DB instance class is lower than `db.m5.xlarge` it is required ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -1035,8 +1033,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -1081,8 +1078,7 @@ In case if index synchronization is failed by some reason (e.g. application stat ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -1107,8 +1103,7 @@ To receive scalability benchmark results for four-node Jira Service Management D ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 255f6f86a..5f6693251 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2023-07-06" +date: "2023-08-15" --- # Data Center App Performance Toolkit User Guide For Jira Service Management @@ -92,7 +92,7 @@ Below process describes how to install low-tier Jira Service Management DC with - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 6. From local terminal (Git bash terminal for Windows) start the installation (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt-small.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -389,7 +389,7 @@ Below process describes how to install enterprise-scale Jira Service Management - Make sure that the Jira Service Management version specified in **jira_version_tag** is consistent with the EBS and RDS snapshot versions. Additionally, ensure that corresponding version snapshot lines are uncommented. 6. From local terminal (Git bash terminal for Windows) start the installation (~40min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -438,7 +438,7 @@ Instead, set those values directly in `.yml` file on execution environment insta 1. Push your changes to the forked repository. 1. [Launch AWS EC2 instance](https://console.aws.amazon.com/ec2/). - * OS: select from Quick Start `Ubuntu Server 20.04 LTS`. + * OS: select from Quick Start `Ubuntu Server 22.04 LTS`. * Instance type: [`c5.2xlarge`](https://aws.amazon.com/ec2/instance-types/c5/) * Storage size: `30` GiB 1. Connect to the instance using [SSH](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) or the [AWS Systems Manager Sessions Manager](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager.html). @@ -480,8 +480,7 @@ To receive performance baseline results **without** an app installed: ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -529,8 +528,7 @@ Re-index information window is displayed on the **Indexing page**. If the window ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -588,8 +586,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -610,7 +607,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC 2. Open `dcapt.tfvars` file and set `jira_replica_count` value to `2`. 3. From local terminal (Git bash terminal for Windows) start scaling (~20 min): ``` bash - docker run --env-file aws_envs \ + docker run --pull=always --env-file aws_envs \ -v "$PWD/dcapt.tfvars:/data-center-terraform/config.tfvars" \ -v "$PWD/.terraform:/data-center-terraform/.terraform" \ -v "$PWD/logs:/data-center-terraform/logs" \ @@ -621,8 +618,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} @@ -644,8 +640,7 @@ To receive scalability benchmark results for four-node Jira Service Management D ``` bash cd dc-app-performance-toolkit - docker pull atlassian/dcapt - docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml + docker run --pull=always --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jsm.yml ``` {{% note %}} From 4e8224aef55062c5cc8f752bdd9314047d7419d5 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 16 Aug 2023 13:18:49 +0200 Subject: [PATCH 35/37] Release 7.5.0 --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/crowd.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index ff6c8e26d..32f0ef018 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor view_all_builds: 15 view_build_result: 15 diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 87bb74395..a01519649 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/confluence.yml b/app/confluence.yml index dbc5fc68e..54403ab72 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors extended_metrics: False view_page: 33 diff --git a/app/crowd.yml b/app/crowd.yml index 056f02626..4493a48de 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -32,7 +32,7 @@ settings: JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/jira.yml b/app/jira.yml index 02f8b2ecc..1dc2d0e9d 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors create_issue: 4 search_jql: 11 diff --git a/app/jsm.yml b/app/jsm.yml index d6c7cb2d4..db0c188dc 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -23,7 +23,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 agent_view_request: 24 From 5f546840248a1fc3a60894df5b0612d0cbb5c95a Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 16 Aug 2023 13:26:21 +0200 Subject: [PATCH 36/37] Revert allow_analytics --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/crowd.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index 32f0ef018..ff6c8e26d 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor view_all_builds: 15 view_build_result: 15 diff --git a/app/bitbucket.yml b/app/bitbucket.yml index a01519649..87bb74395 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/confluence.yml b/app/confluence.yml index 54403ab72..dbc5fc68e 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors extended_metrics: False view_page: 33 diff --git a/app/crowd.yml b/app/crowd.yml index 4493a48de..056f02626 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -32,7 +32,7 @@ settings: JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/jira.yml b/app/jira.yml index 1dc2d0e9d..02f8b2ecc 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors create_issue: 4 search_jql: 11 diff --git a/app/jsm.yml b/app/jsm.yml index db0c188dc..d6c7cb2d4 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -23,7 +23,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 agent_view_request: 24 From 1405557d178d6bfd2fb9bdadd48cc27d98d976e6 Mon Sep 17 00:00:00 2001 From: Oleksandr Metelytsia Date: Wed, 16 Aug 2023 13:27:38 +0200 Subject: [PATCH 37/37] Release 7.5.0 --- app/bamboo.yml | 2 +- app/bitbucket.yml | 2 +- app/confluence.yml | 2 +- app/crowd.yml | 2 +- app/jira.yml | 2 +- app/jsm.yml | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/app/bamboo.yml b/app/bamboo.yml index ff6c8e26d..32f0ef018 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter load executor view_all_builds: 15 view_build_result: 15 diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 87bb74395..a01519649 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/confluence.yml b/app/confluence.yml index dbc5fc68e..54403ab72 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for JMeter and Locust load executors extended_metrics: False view_page: 33 diff --git a/app/crowd.yml b/app/crowd.yml index 056f02626..4493a48de 100644 --- a/app/crowd.yml +++ b/app/crowd.yml @@ -32,7 +32,7 @@ settings: JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. services: - module: shellexec prepare: diff --git a/app/jira.yml b/app/jira.yml index 02f8b2ecc..1dc2d0e9d 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -20,7 +20,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors create_issue: 4 search_jql: 11 diff --git a/app/jsm.yml b/app/jsm.yml index d6c7cb2d4..db0c188dc 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -23,7 +23,7 @@ settings: WEBDRIVER_VISIBLE: False JMETER_VERSION: 5.5 LANGUAGE: en_US.utf8 - allow_analytics: No # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. + allow_analytics: Yes # Allow sending basic run analytics to Atlassian. These analytics help us to understand how the tool is being used and help us to continue to invest in this tooling. For more details please see our README. # Action percentage for Jmeter and Locust load executors agent_browse_projects: 10 agent_view_request: 24