Skip to content

Commit

Permalink
Merge pull request #176 from atlassian/release/2.0.0
Browse files Browse the repository at this point in the history
Release 2.0.0
  • Loading branch information
ometelytsia committed Apr 7, 2020
2 parents 8583203 + d5d5a47 commit 26cba68
Show file tree
Hide file tree
Showing 42 changed files with 1,994 additions and 771 deletions.
13 changes: 6 additions & 7 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,18 @@ The Data Center App Performance Toolkit extends [Taurus](https://gettaurus.org/)

This repository contains Taurus scripts for performance testing of Atlassian Data Center products: Jira, Confluence, and Bitbucket.

At the moment, Jira DC, Confluence DC and Bitbucket DC support is in beta.

## Supported versions
* Supported Jira versions:
* The latest Platform Release: 8.0.3
* The following Jira [Enterprise Releases](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): 7.13.6 and 8.5.0
* Jira [Enterprise Releases](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): 7.13.6 and 8.5.0
* Jira Platform Release: 8.0.3

* Supported Confluence versions:
* The latest Confluence [Enterprise Release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): 6.13.8
* The latest Confluence Platform Release: 7.0.4
* Confluence [Enterprise Release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): 6.13.8
* Confluence Platform Release: 7.0.4

* Supported Bitbucket Server versions:
* The latest Bitbucket Server [Enterprise Release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): 6.10.0
* Bitbucket Server [Enterprise Release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): 6.10.0
* Bitbucket Server Platform Release: 7.0.0

## Support
In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel.
Expand Down
5 changes: 4 additions & 1 deletion app/bitbucket.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,14 @@ services:
- module: shellexec
prepare:
- python util/environment_checker.py
- python util/git_client_check.py
- python util/data_preparation/bitbucket/prepare-data.py
shutdown:
- python util/jmeter_post_check.py
- python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl
post-process:
- python util/analytics.py bitbucket
- python util/cleanup_results_dir.py
execution:
- scenario: jmeter
concurrency: ${concurrency}
Expand Down Expand Up @@ -78,7 +81,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
version: "80.0.3987.16" # Supports Chrome version 80. You can refer to http://chromedriver.chromium.org/downloads
version: "80.0.3987.106" # Supports Chrome version 80. You can refer to http://chromedriver.chromium.org/downloads
reporting:
- data-source: sample-labels
module: junit-xml
4 changes: 3 additions & 1 deletion app/confluence.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@ services:
- python util/environment_checker.py
- python util/data_preparation/confluence/prepare-data.py
shutdown:
- python util/jmeter_post_check.py
- python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl
post-process:
- python util/analytics.py confluence
- python util/cleanup_results_dir.py
execution:
- scenario: jmeter
concurrency: ${concurrency}
Expand Down Expand Up @@ -86,7 +88,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
version: "80.0.3987.16" # Supports Chrome version 80. You can refer to http://chromedriver.chromium.org/downloads
version: "80.0.3987.106" # Supports Chrome version 80. You can refer to http://chromedriver.chromium.org/downloads
reporting:
- data-source: sample-labels
module: junit-xml
16 changes: 6 additions & 10 deletions app/extension/bitbucket/extension_ui.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,25 @@
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec

from selenium_ui.confluence.modules import _wait_until
from selenium_ui.conftest import print_timing
from util.conf import BITBUCKET_SETTINGS

APPLICATION_URL = BITBUCKET_SETTINGS.server_url
timeout = 20
from selenium_ui.base_page import BasePage


def custom_action(webdriver, datasets):
page = BasePage(webdriver)
@print_timing
def measure(webdriver, interaction):
@print_timing
def measure(webdriver, interaction):
webdriver.get(f'{APPLICATION_URL}/plugins/servlet/some-app/reporter')
_wait_until(webdriver, ec.visibility_of_element_located((By.ID, "plugin-element")), interaction)
page.go_to_url(f"{BITBUCKET_SETTINGS.server_url}/plugin/report")
page.wait_until_visible((By.ID, 'report_app_element_id'), interaction)

measure(webdriver, 'selenium_app_custom_action:view_report')

@print_timing
def measure(webdriver, interaction):
webdriver.get(f'{APPLICATION_URL}/plugins/servlet/some-app/administration')
_wait_until(webdriver, ec.visibility_of_element_located((By.ID, "plugin-element")), interaction)
page.go_to_url(f"{BITBUCKET_SETTINGS.server_url}/plugin/dashboard")
page.wait_until_visible((By.ID, 'dashboard_app_element_id'), interaction)

measure(webdriver, 'selenium_app_custom_action:view_dashboard')

measure(webdriver, 'selenium_app_custom_action')
16 changes: 6 additions & 10 deletions app/extension/confluence/extension_ui.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,25 @@
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec

from selenium_ui.confluence.modules import _wait_until
from selenium_ui.conftest import print_timing
from util.conf import CONFLUENCE_SETTINGS

APPLICATION_URL = CONFLUENCE_SETTINGS.server_url
timeout = 20
from selenium_ui.base_page import BasePage


def custom_action(webdriver, datasets):
page = BasePage(webdriver)
@print_timing
def measure(webdriver, interaction):
@print_timing
def measure(webdriver, interaction):
webdriver.get(f'{APPLICATION_URL}/plugins/servlet/some-app/reporter')
_wait_until(webdriver, ec.visibility_of_element_located((By.ID, "plugin-element")), interaction)
page.go_to_url(f"{CONFLUENCE_SETTINGS.server_url}/plugin/report")
page.wait_until_visible((By.ID, 'report_app_element_id'), interaction)

measure(webdriver, 'selenium_app_custom_action:view_report')

@print_timing
def measure(webdriver, interaction):
webdriver.get(f'{APPLICATION_URL}/plugins/servlet/some-app/administration')
_wait_until(webdriver, ec.visibility_of_element_located((By.ID, "plugin-element")), interaction)
page.go_to_url(f"{CONFLUENCE_SETTINGS.server_url}/plugin/dashboard")
page.wait_until_visible((By.ID, 'dashboard_app_element_id'), interaction)

measure(webdriver, 'selenium_app_custom_action:view_dashboard')

measure(webdriver, 'selenium_app_custom_action')
10 changes: 6 additions & 4 deletions app/extension/jira/examples/drawio/extension_ui.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from jira.selenium_ui.conftest import print_timing, application_url
from selenium.webdriver.support import expected_conditions as ec
from app.selenium_ui.conftest import print_timing
from util.conf import JIRA_SETTINGS

APPLICATION_URL = application_url()
APPLICATION_URL = JIRA_SETTINGS.server_url
timeout = 20


# This should be called after an issue is viewed.
def custom_action(webdriver, datasets):
# Click more
webdriver.find_element_by_id('opsbar-operations_more').click()

@print_timing
def measure(webdriver, interaction):
# Click to add a diagram (opens the drawio editor)
webdriver.find_element_by_id('drawio-add-menu-item').click()
WebDriverWait(webdriver, timeout).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'drawioEditor')))
WebDriverWait(webdriver, timeout).until(ec.frame_to_be_available_and_switch_to_it((By.ID, 'drawioEditor')))
measure(webdriver, 'selenium_open_drawio_editor')
16 changes: 6 additions & 10 deletions app/extension/jira/extension_ui.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,25 @@
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec

from selenium_ui.conftest import print_timing
from selenium_ui.jira.modules import _wait_until
from util.conf import JIRA_SETTINGS

APPLICATION_URL = JIRA_SETTINGS.server_url
timeout = 20
from selenium_ui.base_page import BasePage


def custom_action(webdriver, datasets):
page = BasePage(webdriver)
@print_timing
def measure(webdriver, interaction):
@print_timing
def measure(webdriver, interaction):
webdriver.get(f'{APPLICATION_URL}/plugins/servlet/some-app/reporter')
_wait_until(webdriver, ec.visibility_of_element_located((By.ID, "plugin-element")), interaction)
page.go_to_url(f"{JIRA_SETTINGS.server_url}/plugin/report")
page.wait_until_visible((By.ID, 'report_app_element_id'), interaction)

measure(webdriver, 'selenium_app_custom_action:view_report')

@print_timing
def measure(webdriver, interaction):
webdriver.get(f'{APPLICATION_URL}/plugins/servlet/some-app/administration')
_wait_until(webdriver, ec.visibility_of_element_located((By.ID, "plugin-dashboard")), interaction)
page.go_to_url(f"{JIRA_SETTINGS.server_url}/plugin/dashboard")
page.wait_until_visible((By.ID, 'dashboard_app_element_id'), interaction)

measure(webdriver, 'selenium_app_custom_action:view_dashboard')

measure(webdriver, 'selenium_app_custom_action')
4 changes: 3 additions & 1 deletion app/jira.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,11 @@ services:
- python util/environment_checker.py
- python util/data_preparation/jira/prepare-data.py
shutdown:
- python util/jmeter_post_check.py
- python util/jtl_convertor/jtls-to-csv.py kpi.jtl selenium.jtl
post-process:
- python util/analytics.py jira
- python util/cleanup_results_dir.py
execution:
- scenario: jmeter
concurrency: ${concurrency}
Expand Down Expand Up @@ -88,7 +90,7 @@ modules:
httpsampler.ignore_failed_embedded_resources: "true"
selenium:
chromedriver:
version: "80.0.3987.16" # Supports Chrome version 80. You can refer to http://chromedriver.chromium.org/downloads
version: "80.0.3987.106" # Supports Chrome version 80. You can refer to http://chromedriver.chromium.org/downloads
reporting:
- data-source: sample-labels
module: junit-xml
4 changes: 2 additions & 2 deletions app/reports_generation/README.md
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
## Reports generator - a tool that creates an aggregated .csv file and chart from multiple run results.
## Reports generator - a tool that creates an aggregated .csv file, chart and summary report from multiple run results.
Before you start, make sure you have installed Python packages from [requirements.txt](../../requirements.txt).

Otherwise, run the `pip install -r requirements.txt` command from DCAPT [root](../..) directory to install necessary packages to your virtual environment.

To create reports, run the <br>
`python csv_chart_generator.py [performance_profile.yml or scale_profile.yml]` command from the `reports_generation` folder.

The aggregated .csv files and charts are stored in the `results/reports` directory.
The aggregated .csv files, charts and summary report are stored in the `results/reports` directory.
Before run, you should edit `performance_profile.yml` or `scale_profile.yml` and set appropriate `fullPath` values.

**Configuration**
Expand Down
9 changes: 5 additions & 4 deletions app/reports_generation/csv_chart_generator.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,17 @@
import datetime
from pathlib import Path

from scripts import config_provider, csv_aggregator, chart_generator
from scripts import config_provider, csv_aggregator, chart_generator, summary_aggregator


def main():
results_dir = __get_results_dir()

csv_aggregator_config = config_provider.get_csv_aggregator_config()
agg_csv = csv_aggregator.aggregate(csv_aggregator_config, results_dir)
chart_generator_config = config_provider.get_chart_generator_config(csv_aggregator_config, agg_csv)
config = config_provider.get_config()
agg_csv = csv_aggregator.aggregate(config, results_dir)
chart_generator_config = config_provider.get_chart_generator_config(config, agg_csv)
chart_generator.perform_chart_creation(chart_generator_config, results_dir)
summary_aggregator.aggregate(config, results_dir)


def __get_results_dir() -> Path:
Expand Down
2 changes: 1 addition & 1 deletion app/reports_generation/scripts/config_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import yaml


def get_csv_aggregator_config() -> dict:
def get_config() -> dict:
config_path = resolve_file_path(__get_config_file())
config = __read_config_file(config_path)
config['profile'] = config_path.stem
Expand Down
76 changes: 76 additions & 0 deletions app/reports_generation/scripts/summary_aggregator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
from pathlib import Path
from typing import List

from scripts.utils import validate_str_is_not_blank, validate_file_exists, resolve_path

SUMMARY_FILE_NAME = "results_summary.log"
DELIMITER = ('\n================================================================================'
'========================================\n')


def __validate_config(config: dict):
validate_str_is_not_blank(config, 'column_name')
validate_str_is_not_blank(config, 'profile')

runs = config.get('runs')
if not isinstance(runs, list):
raise SystemExit(f'Config key "runs" should be a list')

for run in runs:
if not isinstance(run, dict):
raise SystemExit(f'Config key "run" should be a dictionary')

validate_str_is_not_blank(run, 'runName')
validate_str_is_not_blank(run, 'fullPath')


def __get_summary_files(config: dict) -> List[Path]:
summary_files = []
for run in config['runs']:
file_path = resolve_path(run['fullPath']) / SUMMARY_FILE_NAME
validate_file_exists(file_path, f"File {file_path} does not exists")
summary_files.append(resolve_path(run['fullPath']) / SUMMARY_FILE_NAME)
return summary_files


def __get_run_names(config: dict) -> list:
run_names = []
for run in config['runs']:
run_names.append(run['runName'])
return run_names


def __write_to_summary_report(file_names: List[Path], run_names: List, status: str, output_filename: Path):
with output_filename.open('a') as outfile:
outfile.write(f"Scenario status: {status}")
outfile.write(DELIMITER)
for file, run_name in zip(file_names, run_names):
with file.open('r') as infile:
outfile.write(f"Run name: {run_name}\n\n")
outfile.write(infile.read())
outfile.write(DELIMITER)


def __get_output_file_path(config, results_dir) -> Path:
return results_dir / f"{config['profile']}_summary.log"


def __get_overall_status(files: List[Path]) -> bool:
for file in files:
with file.open('r') as f:
first_line = f.readline()
if 'FAIL' in first_line:
return False
return True


def aggregate(config: dict, results_dir: Path) -> Path:
__validate_config(config)
output_file_path = __get_output_file_path(config, results_dir)
summary_files = __get_summary_files(config)
run_names = __get_run_names(config)
status_message = 'OK' if __get_overall_status(summary_files) else "FAIL"
__write_to_summary_report(summary_files, run_names, status_message, output_file_path)
validate_file_exists(output_file_path, f"Results file {output_file_path} is not created")
print(f'Results file {output_file_path.absolute()} is created')
return output_file_path
Loading

0 comments on commit 26cba68

Please sign in to comment.