diff --git a/Dockerfile b/Dockerfile index 848564930..fb016ad96 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # bzt run: docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml # interactive run: docker run -it --entrypoint="/bin/bash" -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt -FROM blazemeter/taurus:1.16.4 +FROM blazemeter/taurus:1.16.3 ENV APT_INSTALL="apt-get -y install --no-install-recommends" diff --git a/app/util/analytics/log_reader.py b/app/util/analytics/log_reader.py index 9f752443e..130d1744a 100644 --- a/app/util/analytics/log_reader.py +++ b/app/util/analytics/log_reader.py @@ -132,9 +132,12 @@ def __init__(self): def get_results_log(self): lines = [] - with open(self.results_log_path, 'r') as res_file: - for line in csv.DictReader(res_file): - lines.append(line) + if os.path.exists(self.results_log_path) and os.path.getsize(self.results_log_path) > 0: + with open(self.results_log_path, 'r') as res_file: + for line in csv.DictReader(res_file): + lines.append(line) + else: + raise SystemExit(f"ERROR: file {self.results_log_path} does not exist or empty.") headers_list = list(lines[0].keys()) self.validate_headers(headers_list, self.header_validation) self.validate_file_not_empty(lines) diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 5242c2fcd..eba3a8c27 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -72,7 +72,7 @@ com.fasterxml.jackson.core jackson-databind - 2.12.4 + 2.12.6.1 com.jayway.jsonpath @@ -115,4 +115,4 @@ https://packages.atlassian.com/mvn/maven-external/ - \ No newline at end of file + diff --git a/app/util/conf.py b/app/util/conf.py index e6d64acd0..e4f319ad9 100644 --- a/app/util/conf.py +++ b/app/util/conf.py @@ -2,8 +2,8 @@ from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML -TOOLKIT_VERSION = '6.2.0' -UNSUPPORTED_VERSION = '4.2.0' +TOOLKIT_VERSION = '6.2.1' +UNSUPPORTED_VERSION = '5.0.0' def read_yml_file(file): diff --git a/app/util/jira/index-sync.sh b/app/util/jira/index-sync.sh index c15eedb31..597289e7e 100644 --- a/app/util/jira/index-sync.sh +++ b/app/util/jira/index-sync.sh @@ -36,9 +36,5 @@ function find_word_in_log() { fi } -find_word_in_log "Index restore started" -find_word_in_log "indexes - 60%" -find_word_in_log "indexes - 80%" -find_word_in_log "indexes - 100%" -find_word_in_log "Index restore complete" +find_word_in_log "Index restore complete\|Done recovering indexes from snapshot found in shared home" echo "DCAPT util script execution is finished successfully." diff --git a/app/util/jtl_convertor/jtls-to-csv.py b/app/util/jtl_convertor/jtls-to-csv.py index 17d6299ff..d3cbaa4a1 100644 --- a/app/util/jtl_convertor/jtls-to-csv.py +++ b/app/util/jtl_convertor/jtls-to-csv.py @@ -64,11 +64,11 @@ def __reset_file_stream(stream: IO) -> None: def __convert_jtl_to_csv(input_file_path: Path, output_file_path: Path, default_test_actions: list) -> None: if not input_file_path.exists(): - raise SystemExit(f'Input file {output_file_path} does not exist') + raise SystemExit(f'ERROR: Input file {output_file_path} does not exist') start = time.time() convert_to_csv(output_csv=output_file_path, input_jtl=input_file_path, default_test_actions=default_test_actions) if not output_file_path.exists(): - raise SystemExit(f'Something went wrong. Output file {output_file_path} does not exist') + raise SystemExit(f'ERROR: Something went wrong. Output file {output_file_path} does not exist') print(f'Created file {output_file_path}. Converted from jtl to csv in {time.time() - start} ') @@ -99,7 +99,7 @@ def __create_results_csv(csv_list: List[Path], results_file_path: Path) -> None: __read_csv_without_first_line(results_file_stream, temp_csv_path) if not results_file_path.exists(): - raise SystemExit(f'Something went wrong. Output file {results_file_path} does not exist') + raise SystemExit(f'ERROR: Something went wrong. Output file {results_file_path} does not exist') print(f'Created file {results_file_path}') @@ -108,15 +108,23 @@ def __validate_file_names(file_names: List[str]): for file_name in file_names: if '.' not in file_name: - raise SystemExit(f'File name {file_name} does not have extension') + raise SystemExit(f'ERROR: File name {file_name} does not have extension') file_name_without_extension = __get_file_name_without_extension(file_name) if file_name_without_extension in file_names_set: - raise SystemExit(f'Duplicated file name {file_name_without_extension}') + raise SystemExit(f'ERROR: Duplicated file name {file_name_without_extension}') file_names_set.add(file_name_without_extension) +def __validate_file_length(file_names: List[str]): + for file_name in file_names: + lines_count = sum(1 for _ in open(ENV_TAURUS_ARTIFACT_DIR / file_name)) + if lines_count <= 1: + raise SystemExit(f'ERROR: File {ENV_TAURUS_ARTIFACT_DIR / file_name} does not have content.\n' + f'See logs for detailed error: {ENV_TAURUS_ARTIFACT_DIR}') + + def __pathname_pattern_expansion(args: List[str]) -> List[str]: file_names: List[str] = [] for arg in args: @@ -177,6 +185,7 @@ def main(): args = sys.argv[1:] file_names = __pathname_pattern_expansion(args) __validate_file_names(file_names) + __validate_file_length(file_names) with tempfile.TemporaryDirectory() as tmp_dir: temp_csv_list: List[Path] = [] diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 2f1a999a4..9a2206670 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2022-05-06" +date: "2022-05-16" --- # Data Center App Performance Toolkit User Guide For Jira @@ -743,7 +743,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o If you are submitting a Jira app, you are required to conduct a Lucene Index timing test. This involves conducting a foreground re-index on a single-node Data Center deployment (with your app installed) and a dataset that has 1M issues. {{% note %}} -Jira 7 index time for 1M issues on a User Guide [recommended configuration](#quick-start-parameters) is about ~100 min, Jira 8 index time is about ~30 min. +Jira 8 index time is about ~30 min. {{% /note %}} {{% note %}} @@ -844,10 +844,10 @@ The same article has instructions on how to increase limit if needed. To receive scalability benchmark results for two-node Jira DC **with** app-specific actions: 1. In the AWS console, go to **CloudFormation** > **Stack details** > **Select your stack**. -1. On the **Update** tab, select **Use current template**, and then click **Next**. -1. Enter `2` in the **Maximum number of cluster nodes** and the **Minimum number of cluster nodes** fields. -1. Click **Next** > **Next** > **Update stack** and wait until stack is updated. -1. Make sure that Jira index successfully synchronized to the second node. To do that, use SSH to connect to the second node via Bastion (where `NODE_IP` is the IP of the second node): +2. On the **Update** tab, select **Use current template**, and then click **Next**. +3. Enter `2` in the **Maximum number of cluster nodes** and the **Minimum number of cluster nodes** fields. +4. Click **Next** > **Next** > **Update stack** and wait until stack is updated. +5. Make sure that Jira index successfully synchronized to the second node. To do that, use SSH to connect to the second node via Bastion (where `NODE_IP` is the IP of the second node): ```bash ssh-add path_to_your_private_key_pem @@ -857,25 +857,22 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci export SSH_OPTS2='-o ServerAliveCountMax=30' ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP} ``` -1. Once you're in the second node, download the [index-sync.sh](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/index-sync.sh) file. Then, make it executable and run it: +6. Once you're in the second node, download the [index-sync.sh](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/index-sync.sh) file. Then, make it executable and run it: ```bash wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/index-sync.sh && chmod +x index-sync.sh ./index-sync.sh 2>&1 | tee -a index-sync.log ``` - Index synchronizing time is about 5-10 minutes. When index synchronizing is successfully completed, the following lines will be displayed in console output: - ```bash - Index restore started - indexes - 60% - indexes - 80% - indexes - 100% - Index restore complete - ``` -{{% note %}} -If index synchronization is failed by some reason, you can manually copy index from the first node. To do it, login to the second node (use private browser window and check footer information to see which node is current), go to **System** > **Indexing**. In the **Copy the Search Index from another node**, choose the source node (first node) and the target node (current node). The index will copied from one instance to another. -{{% /note %}} + Index synchronizing time is about 5-10 minutes. -1. Run toolkit with docker from the execution environment instance: + {{% note %}} + Make sure **System** > **Clustering** page has expected number of nodes with node status `ACTIVE` and application status `RUNNIG`. + + If index synchronization is failed by some reason (e.g. application status is `MAINTENANCE`), you can manually copy index from the first node. To do it, login to the second node (use private browser window and check footer information to see which node is current), go to **System** > **Indexing**. In the **Copy the Search Index from another node**, choose the source node (first node) and the target node (current node). The index will be copied from one instance to another. + {{% /note %}} + + +7. Run toolkit with docker from the execution environment instance: ``` bash cd dc-app-performance-toolkit @@ -963,3 +960,4 @@ Do not forget to attach performance testing results to your DCHELP ticket. ## Support In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. + diff --git a/requirements.txt b/requirements.txt index 1fc426459..8f6148ec0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,4 @@ selenium==4.1.3 filelock==3.6.0 packaging==21.3 prettytable==3.2.0 -bzt==1.16.4 +bzt==1.16.3 # make sure Dockerfile bzt version is the same