Skip to content

Commit

Permalink
Merge pull request #836 from atlassian/release/6.2.1
Browse files Browse the repository at this point in the history
Release 6.2.1
  • Loading branch information
mariakami committed May 17, 2022
2 parents e5b19a5 + 0cd3455 commit a306099
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 38 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# bzt run: docker run --shm-size=4g -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml
# interactive run: docker run -it --entrypoint="/bin/bash" -v "$PWD:/dc-app-performance-toolkit" atlassian/dcapt

FROM blazemeter/taurus:1.16.4
FROM blazemeter/taurus:1.16.3

ENV APT_INSTALL="apt-get -y install --no-install-recommends"

Expand Down
9 changes: 6 additions & 3 deletions app/util/analytics/log_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,12 @@ def __init__(self):

def get_results_log(self):
lines = []
with open(self.results_log_path, 'r') as res_file:
for line in csv.DictReader(res_file):
lines.append(line)
if os.path.exists(self.results_log_path) and os.path.getsize(self.results_log_path) > 0:
with open(self.results_log_path, 'r') as res_file:
for line in csv.DictReader(res_file):
lines.append(line)
else:
raise SystemExit(f"ERROR: file {self.results_log_path} does not exist or empty.")
headers_list = list(lines[0].keys())
self.validate_headers(headers_list, self.header_validation)
self.validate_file_not_empty(lines)
Expand Down
4 changes: 2 additions & 2 deletions app/util/bamboo/bamboo_dataset_generator/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
<version>2.12.4</version>
<version>2.12.6.1</version>
</dependency>
<dependency>
<groupId>com.jayway.jsonpath</groupId>
Expand Down Expand Up @@ -115,4 +115,4 @@
<url>https://packages.atlassian.com/mvn/maven-external/</url>
</pluginRepository>
</pluginRepositories>
</project>
</project>
4 changes: 2 additions & 2 deletions app/util/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@

from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML

TOOLKIT_VERSION = '6.2.0'
UNSUPPORTED_VERSION = '4.2.0'
TOOLKIT_VERSION = '6.2.1'
UNSUPPORTED_VERSION = '5.0.0'


def read_yml_file(file):
Expand Down
6 changes: 1 addition & 5 deletions app/util/jira/index-sync.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,5 @@ function find_word_in_log() {
fi
}

find_word_in_log "Index restore started"
find_word_in_log "indexes - 60%"
find_word_in_log "indexes - 80%"
find_word_in_log "indexes - 100%"
find_word_in_log "Index restore complete"
find_word_in_log "Index restore complete\|Done recovering indexes from snapshot found in shared home"
echo "DCAPT util script execution is finished successfully."
19 changes: 14 additions & 5 deletions app/util/jtl_convertor/jtls-to-csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,11 @@ def __reset_file_stream(stream: IO) -> None:

def __convert_jtl_to_csv(input_file_path: Path, output_file_path: Path, default_test_actions: list) -> None:
if not input_file_path.exists():
raise SystemExit(f'Input file {output_file_path} does not exist')
raise SystemExit(f'ERROR: Input file {output_file_path} does not exist')
start = time.time()
convert_to_csv(output_csv=output_file_path, input_jtl=input_file_path, default_test_actions=default_test_actions)
if not output_file_path.exists():
raise SystemExit(f'Something went wrong. Output file {output_file_path} does not exist')
raise SystemExit(f'ERROR: Something went wrong. Output file {output_file_path} does not exist')

print(f'Created file {output_file_path}. Converted from jtl to csv in {time.time() - start} ')

Expand Down Expand Up @@ -99,7 +99,7 @@ def __create_results_csv(csv_list: List[Path], results_file_path: Path) -> None:
__read_csv_without_first_line(results_file_stream, temp_csv_path)

if not results_file_path.exists():
raise SystemExit(f'Something went wrong. Output file {results_file_path} does not exist')
raise SystemExit(f'ERROR: Something went wrong. Output file {results_file_path} does not exist')
print(f'Created file {results_file_path}')


Expand All @@ -108,15 +108,23 @@ def __validate_file_names(file_names: List[str]):

for file_name in file_names:
if '.' not in file_name:
raise SystemExit(f'File name {file_name} does not have extension')
raise SystemExit(f'ERROR: File name {file_name} does not have extension')

file_name_without_extension = __get_file_name_without_extension(file_name)
if file_name_without_extension in file_names_set:
raise SystemExit(f'Duplicated file name {file_name_without_extension}')
raise SystemExit(f'ERROR: Duplicated file name {file_name_without_extension}')

file_names_set.add(file_name_without_extension)


def __validate_file_length(file_names: List[str]):
for file_name in file_names:
lines_count = sum(1 for _ in open(ENV_TAURUS_ARTIFACT_DIR / file_name))
if lines_count <= 1:
raise SystemExit(f'ERROR: File {ENV_TAURUS_ARTIFACT_DIR / file_name} does not have content.\n'
f'See logs for detailed error: {ENV_TAURUS_ARTIFACT_DIR}')


def __pathname_pattern_expansion(args: List[str]) -> List[str]:
file_names: List[str] = []
for arg in args:
Expand Down Expand Up @@ -177,6 +185,7 @@ def main():
args = sys.argv[1:]
file_names = __pathname_pattern_expansion(args)
__validate_file_names(file_names)
__validate_file_length(file_names)

with tempfile.TemporaryDirectory() as tmp_dir:
temp_csv_list: List[Path] = []
Expand Down
36 changes: 17 additions & 19 deletions docs/dc-apps-performance-toolkit-user-guide-jira.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ platform: platform
product: marketplace
category: devguide
subcategory: build
date: "2022-05-06"
date: "2022-05-16"
---
# Data Center App Performance Toolkit User Guide For Jira

Expand Down Expand Up @@ -743,7 +743,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o
If you are submitting a Jira app, you are required to conduct a Lucene Index timing test. This involves conducting a foreground re-index on a single-node Data Center deployment (with your app installed) and a dataset that has 1M issues.

{{% note %}}
Jira 7 index time for 1M issues on a User Guide [recommended configuration](#quick-start-parameters) is about ~100 min, Jira 8 index time is about ~30 min.
Jira 8 index time is about ~30 min.
{{% /note %}}

{{% note %}}
Expand Down Expand Up @@ -844,10 +844,10 @@ The same article has instructions on how to increase limit if needed.
To receive scalability benchmark results for two-node Jira DC **with** app-specific actions:

1. In the AWS console, go to **CloudFormation** > **Stack details** > **Select your stack**.
1. On the **Update** tab, select **Use current template**, and then click **Next**.
1. Enter `2` in the **Maximum number of cluster nodes** and the **Minimum number of cluster nodes** fields.
1. Click **Next** > **Next** > **Update stack** and wait until stack is updated.
1. Make sure that Jira index successfully synchronized to the second node. To do that, use SSH to connect to the second node via Bastion (where `NODE_IP` is the IP of the second node):
2. On the **Update** tab, select **Use current template**, and then click **Next**.
3. Enter `2` in the **Maximum number of cluster nodes** and the **Minimum number of cluster nodes** fields.
4. Click **Next** > **Next** > **Update stack** and wait until stack is updated.
5. Make sure that Jira index successfully synchronized to the second node. To do that, use SSH to connect to the second node via Bastion (where `NODE_IP` is the IP of the second node):

```bash
ssh-add path_to_your_private_key_pem
Expand All @@ -857,25 +857,22 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci
export SSH_OPTS2='-o ServerAliveCountMax=30'
ssh ${SSH_OPTS1} ${SSH_OPTS2} -o "proxycommand ssh -W %h:%p ${SSH_OPTS1} ${SSH_OPTS2} ec2-user@${BASTION_IP}" ec2-user@${NODE_IP}
```
1. Once you're in the second node, download the [index-sync.sh](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/index-sync.sh) file. Then, make it executable and run it:
6. Once you're in the second node, download the [index-sync.sh](https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/index-sync.sh) file. Then, make it executable and run it:

```bash
wget https://raw.githubusercontent.com/atlassian/dc-app-performance-toolkit/master/app/util/jira/index-sync.sh && chmod +x index-sync.sh
./index-sync.sh 2>&1 | tee -a index-sync.log
```
Index synchronizing time is about 5-10 minutes. When index synchronizing is successfully completed, the following lines will be displayed in console output:
```bash
Index restore started
indexes - 60%
indexes - 80%
indexes - 100%
Index restore complete
```
{{% note %}}
If index synchronization is failed by some reason, you can manually copy index from the first node. To do it, login to the second node (use private browser window and check footer information to see which node is current), go to **System** > **Indexing**. In the **Copy the Search Index from another node**, choose the source node (first node) and the target node (current node). The index will copied from one instance to another.
{{% /note %}}
Index synchronizing time is about 5-10 minutes.

1. Run toolkit with docker from the execution environment instance:
{{% note %}}
Make sure **System** > **Clustering** page has expected number of nodes with node status `ACTIVE` and application status `RUNNIG`.

If index synchronization is failed by some reason (e.g. application status is `MAINTENANCE`), you can manually copy index from the first node. To do it, login to the second node (use private browser window and check footer information to see which node is current), go to **System** > **Indexing**. In the **Copy the Search Index from another node**, choose the source node (first node) and the target node (current node). The index will be copied from one instance to another.
{{% /note %}}


7. Run toolkit with docker from the execution environment instance:

``` bash
cd dc-app-performance-toolkit
Expand Down Expand Up @@ -963,3 +960,4 @@ Do not forget to attach performance testing results to your DCHELP ticket.

## <a id="support"></a> Support
In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel.

2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ selenium==4.1.3
filelock==3.6.0
packaging==21.3
prettytable==3.2.0
bzt==1.16.4
bzt==1.16.3 # make sure Dockerfile bzt version is the same

0 comments on commit a306099

Please sign in to comment.