diff --git a/README.md b/README.md index 82df5131c..745341aac 100644 --- a/README.md +++ b/README.md @@ -5,22 +5,22 @@ This repository contains Taurus scripts for performance testing of Atlassian Dat ## Supported versions * Supported Jira versions: - * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.1` and `9.4.14` + * Jira [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `9.12.4` and `9.4.17` * Supported Jira Service Management versions: - * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.1` and `5.4.14` + * Jira Service Management [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `5.12.4` and `5.4.17` * Supported Confluence versions: - * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.5.4` and `7.19.17` + * Confluence [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.5.6` and `7.19.19` * Supported Bitbucket Server versions: - * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.8` and `7.21.20` + * Bitbucket Server [Long Term Support release](https://confluence.atlassian.com/enterprise/atlassian-enterprise-releases-948227420.html): `8.9.10` and `7.21.22` * Supported Crowd versions: - * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.2.2` + * Crowd [release notes](https://confluence.atlassian.com/crowd/crowd-release-notes-199094.html): `5.2.3` * Supported Bamboo versions: - * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.9` + * Bamboo [Long Term Support release](https://confluence.atlassian.com/bamboo/bamboo-release-notes-671089224.html): `9.2.11` ## Support In case of technical questions, issues or problems with DC Apps Performance Toolkit, contact us for support in the [community Slack](http://bit.ly/dcapt_slack) **#data-center-app-performance-toolkit** channel. diff --git a/app/bamboo.yml b/app/bamboo.yml index b4dbfb335..3293bd8cb 100644 --- a/app/bamboo.yml +++ b/app/bamboo.yml @@ -51,7 +51,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.16.0 + - selenium==4.18.1 execution: - scenario: jmeter executor: jmeter @@ -125,7 +125,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "122.0.6261.128" # Supports Chrome version 122. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml diff --git a/app/bitbucket.yml b/app/bitbucket.yml index 34bc62a23..1536c0c4b 100644 --- a/app/bitbucket.yml +++ b/app/bitbucket.yml @@ -37,7 +37,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.16.0 + - selenium==4.18.1 execution: - scenario: ${load_executor} concurrency: ${concurrency} @@ -91,7 +91,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "122.0.6261.128" # Supports Chrome version 122. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml diff --git a/app/confluence.yml b/app/confluence.yml index 0624d14c8..865e6b4af 100644 --- a/app/confluence.yml +++ b/app/confluence.yml @@ -25,14 +25,14 @@ settings: extended_metrics: False # Action percentage for JMeter and Locust load executors view_page: 33 - view_dashboard: 9 + view_dashboard: 10 view_blog: 13 - search_cql: 10 - create_blog: 4 - create_and_edit_page: 8 - comment_page: 7 - view_attachment: 5 - upload_attachment: 6 + search_cql: 4 + create_blog: 5 + create_and_edit_page: 9 + comment_page: 8 + view_attachment: 6 + upload_attachment: 7 like_page: 3 upload_emoticon: 2 # For Confluence 8.4.x+ standalone_extension: 0 # By default disabled @@ -52,7 +52,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.16.0 + - selenium==4.18.1 execution: - scenario: ${load_executor} executor: ${load_executor} @@ -118,7 +118,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "122.0.6261.128" # Supports Chrome version 122. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml diff --git a/app/datasets/confluence/static-content/words.csv b/app/datasets/confluence/static-content/words.csv new file mode 100644 index 000000000..6deadc638 --- /dev/null +++ b/app/datasets/confluence/static-content/words.csv @@ -0,0 +1,1004 @@ +jira +confluence +bitbucket +crowd +bamboo +hello +ability +able +about +above +accept +according +account +across +act +action +activity +actually +add +address +administration +admit +adult +affect +after +again +against +age +agency +agent +ago +agree +agreement +ahead +air +all +allow +almost +alone +along +already +also +although +always +American +among +amount +analysis +and +animal +another +answer +any +anyone +anything +appear +apply +approach +area +argue +arm +around +arrive +art +article +artist +as +ask +assume +at +attack +attention +attorney +audience +author +authority +available +avoid +away +baby +back +bad +bag +ball +bank +bar +base +be +beat +beautiful +because +become +bed +before +begin +behavior +behind +believe +benefit +best +better +between +beyond +big +bill +billion +bit +black +blood +blue +board +body +book +born +both +box +boy +break +bring +brother +budget +build +building +business +but +buy +by +call +camera +campaign +can +cancer +candidate +capital +car +card +care +career +carry +case +catch +cause +cell +center +central +century +certain +certainly +chair +challenge +chance +change +character +charge +check +child +choice +choose +church +citizen +city +civil +claim +class +clear +clearly +close +coach +cold +collection +college +color +come +commercial +common +community +company +compare +computer +concern +condition +conference +Congress +consider +consumer +contain +continue +control +cost +could +country +couple +course +court +cover +create +crime +cultural +culture +cup +current +customer +cut +dark +data +daughter +day +dead +deal +death +debate +decade +decide +decision +deep +defense +degree +Democrat +democratic +describe +design +despite +detail +determine +develop +development +die +difference +different +difficult +dinner +direction +director +discover +discuss +discussion +disease +do +doctor +dog +door +down +draw +dream +drive +drop +drug +during +each +early +east +easy +eat +economic +economy +edge +education +effect +effort +eight +either +election +else +employee +end +energy +enjoy +enough +enter +entire +environment +environmental +especially +establish +even +evening +event +ever +every +everybody +everyone +everything +evidence +exactly +example +executive +exist +expect +experience +expert +explain +eye +face +fact +factor +fail +fall +family +far +fast +father +fear +federal +feel +feeling +few +field +fight +figure +fill +film +final +finally +financial +find +fine +finger +finish +fire +firm +first +fish +five +floor +fly +focus +follow +food +foot +for +force +foreign +forget +form +former +forward +four +free +friend +from +front +full +fund +future +game +garden +gas +general +generation +get +girl +give +glass +go +goal +good +government +great +green +ground +group +grow +growth +guess +gun +guy +hair +half +hand +hang +happen +happy +hard +have +he +head +health +hear +heart +heat +heavy +help +her +here +herself +high +him +himself +his +history +hit +hold +home +hope +hospital +hot +hotel +hour +house +how +however +huge +human +hundred +husband +I +idea +identify +if +image +imagine +impact +important +improve +in +include +including +increase +indeed +indicate +individual +industry +information +inside +instead +institution +interest +interesting +international +interview +into +investment +involve +issue +it +item +its +itself +job +join +just +keep +key +kid +kill +kind +kitchen +know +knowledge +land +language +large +last +late +later +laugh +law +lawyer +lay +lead +leader +learn +least +leave +left +leg +legal +less +let +letter +level +lie +life +light +like +likely +line +list +listen +little +live +local +long +look +lose +loss +lot +love +low +machine +magazine +main +maintain +major +majority +make +man +manage +management +manager +many +market +marriage +material +matter +may +maybe +me +mean +measure +media +medical +meet +meeting +member +memory +mention +message +method +middle +might +military +million +mind +minute +miss +mission +model +modern +moment +money +month +more +morning +most +mother +mouth +move +movement +movie +Mr +Mrs +much +music +must +my +myself +name +nation +national +natural +nature +near +nearly +necessary +need +network +never +new +news +newspaper +next +nice +night +no +none +nor +north +not +note +nothing +notice +now +number +occur +of +off +offer +office +officer +official +often +oh +oil +ok +old +on +once +one +only +onto +open +operation +opportunity +option +or +order +organization +other +others +our +out +outside +over +own +owner +page +pain +painting +paper +parent +part +participant +particular +particularly +partner +party +pass +past +patient +pattern +pay +peace +people +per +perform +performance +perhaps +period +person +personal +phone +physical +pick +picture +piece +place +plan +plant +play +player +PM +point +police +policy +political +politics +poor +popular +population +position +positive +possible +power +practice +prepare +present +president +pressure +pretty +prevent +price +private +probably +problem +process +produce +product +production +professional +professor +program +project +property +protect +prove +provide +public +pull +purpose +push +put +quality +question +quickly +quite +race +radio +raise +range +rate +rather +reach +read +ready +real +reality +realize +really +reason +receive +recent +recently +recognize +record +red +reduce +reflect +region +relate +relationship +religious +remain +remember +remove +report +represent +Republican +require +research +resource +respond +response +responsibility +rest +result +return +reveal +rich +right +rise +risk +road +rock +role +room +rule +run +safe +same +save +say +scene +school +science +scientist +score +sea +season +seat +second +section +security +see +seek +seem +sell +send +senior +sense +series +serious +serve +service +set +seven +several +sex +sexual +shake +share +she +shoot +short +shot +should +shoulder +show +side +sign +significant +similar +simple +simply +since +sing +single +sister +sit +site +situation +six +size +skill +skin +small +smile +so +social +society +soldier +some +somebody +someone +something +sometimes +son +song +soon +sort +sound +source +south +southern +space +speak +special +specific +speech +spend +sport +spring +staff +stage +stand +standard +star +start +state +statement +station +stay +step +still +stock +stop +store +story +strategy +street +strong +structure +student +study +stuff +style +subject +success +successful +such +suddenly +suffer +suggest +summer +support +sure +surface +system +table +take +talk +task +tax +teach +teacher +team +technology +television +tell +ten +tend +term +test +than +thank +that +the +their +them +themselves +then +theory +there +these +they +thing +think +third +this +those +though +thought +thousand +threat +three +through +throughout +throw +thus +time +to +today +together +tonight +too +top +total +tough +toward +town +trade +traditional +training +travel +treat +treatment +tree +trial +trip +trouble +true +truth +try +turn +TV +two +type +under +understand +unit +until +up +upon +us +use +usually +value +various +very +victim +view +violence +visit +voice +vote +wait +walk +wall +want +war +watch +water +way +we +weapon +wear +week +weight +well +west +western +what +whatever +when +where +whether +which +while +white +who +whole +whom +whose +why +wide +wife +will +win +wind +window +wish +with +within +without +woman +wonder +word +work +worker +world +worry +would +write +writer +wrong +yard +yeah +year +yes +yet +you +young +your +yourself \ No newline at end of file diff --git a/app/jira.yml b/app/jira.yml index 10868e875..fda376321 100644 --- a/app/jira.yml +++ b/app/jira.yml @@ -52,7 +52,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.16.0 + - selenium==4.18.1 execution: - scenario: ${load_executor} executor: ${load_executor} @@ -119,7 +119,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "122.0.6261.128" # Supports Chrome version 122. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml diff --git a/app/jmeter/confluence.jmx b/app/jmeter/confluence.jmx index 60127d893..2737b73e1 100644 --- a/app/jmeter/confluence.jmx +++ b/app/jmeter/confluence.jmx @@ -1,5 +1,5 @@ - + This test plan was created by the BlazeMeter converter v.2.4.18. Please contact support@blazemeter.com for further support. @@ -48,8 +48,8 @@ true 6 ^((?!google|youtube|facebook|pinterest|twimg|doubleclick|lorempixel|extranet|bulldogwiki|jira-dev|jira).)*$ - 10000 - 10000 + 25000 + 25000 @@ -401,6 +401,17 @@ log.info("Confluence version: ${confluence-version}") page_id,space_key + + datasets/confluence/cqls.csv + UTF-8 + , + cql_var + true + false + true + false + + , @@ -847,6 +858,13 @@ log.info("Confluence version: ${confluence-version}") = true + + false + ${atlassian-token} + = + true + atl_token + @@ -1333,6 +1351,13 @@ if ( sleep_time > 0 ) { = true + + false + ${atlassian-token} + = + true + atl_token + @@ -1489,7 +1514,7 @@ if ( sleep_time > 0 ) { true cql - siteSearch~${p_search_term} + siteSearch~'${cql_var}' = true @@ -1555,15 +1580,6 @@ if ( sleep_time > 0 ) { 16 - - groovy - - - true - vars.put("p_search_term", (new Random().with {(1..5).collect {(('a'..'z')).join()[ nextInt((('a'..'z')).join().length())]}.join()}).toString()); - - - @@ -1639,6 +1655,13 @@ if ( sleep_time > 0 ) { = true + + false + ${atlassian-token} + = + true + atl_token + @@ -2136,6 +2159,13 @@ if ( sleep_time > 0 ) { = true + + false + ${atlassian-token} + = + true + atl_token + @@ -2263,6 +2293,13 @@ if ( sleep_time > 0 ) { = true + + false + ${atlassian-token} + = + true + atl_token + @@ -2795,6 +2832,13 @@ vars.put("page_title", "jmeter_create_and_edit_page:create_page - = true + + false + ${atlassian-token} + = + true + atl_token + @@ -3298,6 +3342,13 @@ vars.put("page_text", "jmeter_create_and_edit_page:edit_page - &q = true + + false + ${atlassian-token} + = + true + atl_token + @@ -3666,6 +3717,13 @@ vars.put("page_text", "jmeter_create_and_edit_page:edit_page - &q = true + + false + ${atlassian-token} + = + true + atl_token + @@ -4354,6 +4412,13 @@ if ( sleep_time > 0 ) { = true + + false + ${atlassian-token} + = + true + atl_token + @@ -4826,6 +4891,13 @@ if ( sleep_time > 0 ) { = true + + false + ${atlassian-token} + = + true + atl_token + diff --git a/app/jsm.yml b/app/jsm.yml index 2c90b5724..8a65cec22 100644 --- a/app/jsm.yml +++ b/app/jsm.yml @@ -68,7 +68,7 @@ services: - python util/post_run/cleanup_results_dir.py - module: pip-install packages: - - selenium==4.16.0 + - selenium==4.18.1 execution: - scenario: ${load_executor}_agents executor: ${load_executor} @@ -171,7 +171,7 @@ modules: httpsampler.ignore_failed_embedded_resources: "true" selenium: chromedriver: - version: "120.0.6099.109" # Supports Chrome version 120. You can refer to https://googlechromelabs.github.io/chrome-for-testing + version: "122.0.6261.128" # Supports Chrome version 122. You can refer to https://googlechromelabs.github.io/chrome-for-testing reporting: - data-source: sample-labels module: junit-xml diff --git a/app/locustio/confluence/http_actions.py b/app/locustio/confluence/http_actions.py index b2dd90bb3..bcec88d65 100644 --- a/app/locustio/confluence/http_actions.py +++ b/app/locustio/confluence/http_actions.py @@ -205,6 +205,7 @@ def view_page(locust): f'&pageId={parsed_page_id}' f'&spaceKey={space_key}' f'&atl_after_login_redirect=/pages/viewpage.action' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000&_={timestamp_int()}', catch_response=True) @@ -385,6 +386,7 @@ def view_blog(locust): f'&pageId={blog_id}' f'&spaceKey={space_key}' f'&atl_after_login_redirect=/pages/viewpage.action' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000&_={timestamp_int()}', catch_response=True) @@ -408,6 +410,7 @@ def view_blog(locust): def search_cql_and_view_results(locust): raise_if_login_failed(locust) + cql = random.choice(confluence_dataset["cqls"])[0] @confluence_measure('locust_search_cql:recently_viewed') def search_recently_viewed(): @@ -420,7 +423,7 @@ def search_recently_viewed(): def search_cql(): # 530 rest/api/search r = locust.get(f"/rest/api/search" - f"?cql=siteSearch~'{generate_random_string(3, only_letters=True)}'" + f"?cql=siteSearch~'{cql}'" f"&start=0" f"&limit=20", catch_response=True) @@ -452,7 +455,8 @@ def create_blog_editor(): # 550 pages/createblogpost.action r = locust.get(f'/pages/createblogpost.action' - f'?spaceKey={blog_space_key}', + f'?spaceKey={blog_space_key}' + f"&atl_token={locust.session_data_storage['token']}", catch_response=True) content = r.content.decode('utf-8') @@ -710,6 +714,7 @@ def create_blog(): f'&pageId={content_id}' f'&spaceKey={parsed_space_key}' f'&atl_after_login_redirect=/pages/viewpage.action' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000&_={timestamp_int()}', catch_response=True) @@ -750,6 +755,7 @@ def create_page_editor(): r = locust.get(f'/pages/createpage.action' f'?spaceKey={space_key}' f'&fromPageId={page_id}' + f"&atl_token={locust.session_data_storage['token']}" f'&src=quick-create', catch_response=True) @@ -1022,6 +1028,7 @@ def create_page(): f'&pageId={locust.session_data_storage["content_id"]}' f'&spaceKey={space_key}' f'&atl_after_login_redirect=/display/{space_key}/{page_title}' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000&_={timestamp_int()}', catch_response=True) @@ -1278,6 +1285,7 @@ def edit_page(): f'&pageId={locust.session_data_storage["content_id"]}' f'&spaceKey={space_key}' f'&atl_after_login_redirect=/pages/viewpage.action' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000' f'&_={timestamp_int()}', catch_response=True) @@ -1440,6 +1448,7 @@ def view_attachments(locust): f'&pageId={page_id}' f'&spaceKey={space_key}' f'&atl_after_login_redirect=/pages/viewpage.action' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000' f'&_={timestamp_int()}', catch_response=True) @@ -1554,6 +1563,7 @@ def upload_attachments(locust): f'&pageId={page_id}' f'&spaceKey={space_key}' f'&atl_after_login_redirect=/pages/viewpage.action' + f"&atl_token={locust.session_data_storage['token']}" f'&timeout=12000' f'&_={timestamp_int()}', catch_response=True) diff --git a/app/locustio/confluence/requests_params.py b/app/locustio/confluence/requests_params.py index 21b21b35b..e4d7d26a6 100644 --- a/app/locustio/confluence/requests_params.py +++ b/app/locustio/confluence/requests_params.py @@ -1,7 +1,7 @@ # flake8: noqa from locustio.common_utils import read_input_file, BaseResource -from util.project_paths import CONFLUENCE_PAGES, CONFLUENCE_BLOGS, CONFLUENCE_USERS, CONFLUENCE_STATIC_CONTENT -import json +from util.project_paths import (CONFLUENCE_PAGES, CONFLUENCE_BLOGS, CONFLUENCE_USERS, CONFLUENCE_STATIC_CONTENT, + CONFLUENCE_CQLS) def confluence_datasets(): @@ -9,6 +9,7 @@ def confluence_datasets(): data_sets["pages"] = read_input_file(CONFLUENCE_PAGES) data_sets["blogs"] = read_input_file(CONFLUENCE_BLOGS) data_sets["users"] = read_input_file(CONFLUENCE_USERS) + data_sets["cqls"] = read_input_file(CONFLUENCE_CQLS) data_sets['static-content'] = read_input_file(CONFLUENCE_STATIC_CONTENT) return data_sets diff --git a/app/selenium_ui/base_page.py b/app/selenium_ui/base_page.py index 4bb1210fd..5f6d7a64c 100644 --- a/app/selenium_ui/base_page.py +++ b/app/selenium_ui/base_page.py @@ -144,6 +144,13 @@ def return_to_parent_frame(self): def execute_js(self, js): return self.driver.execute_script(js) + def rest_api_get(self, url): + return self.execute_js(js=f""" + return fetch('{url}') + .then(response => response.json()) + .then(data => data); + """) + @property def app_version(self): return self.driver.app_version if 'app_version' in dir(self.driver) else None diff --git a/app/selenium_ui/bitbucket/modules.py b/app/selenium_ui/bitbucket/modules.py index ddfdfbfc1..68d0ba121 100644 --- a/app/selenium_ui/bitbucket/modules.py +++ b/app/selenium_ui/bitbucket/modules.py @@ -10,15 +10,22 @@ def setup_run_data(datasets): + datasets['current_session'] = {} user = random.choice(datasets["users"]) project_with_repo_prs = random.choice(datasets["pull_requests"]) - datasets['username'] = user[1] - datasets['password'] = user[2] - datasets['project_key'] = project_with_repo_prs[1] - datasets['repo_slug'] = project_with_repo_prs[0] - datasets['pull_request_branch_from'] = project_with_repo_prs[3] - datasets['pull_request_branch_to'] = project_with_repo_prs[4] - datasets['pull_request_id'] = project_with_repo_prs[2] + datasets['current_session']['username'] = user[1] + datasets['current_session']['password'] = user[2] + datasets['current_session']['project_key'] = project_with_repo_prs[1] + datasets['current_session']['repo_slug'] = project_with_repo_prs[0] + datasets['current_session']['pull_request_branch_from'] = project_with_repo_prs[3] + datasets['current_session']['pull_request_branch_to'] = project_with_repo_prs[4] + datasets['current_session']['pull_request_id'] = project_with_repo_prs[2] + + +def generate_debug_session_info(webdriver, datasets): + debug_data = datasets['current_session'] + debug_data['current_url'] = webdriver.current_url + return debug_data def login(webdriver, datasets): @@ -29,6 +36,7 @@ def login(webdriver, datasets): BITBUCKET_SETTINGS.admin_password) webdriver.app_version = version.parse(client.get_bitbucket_version()) login_page = LoginPage(webdriver) + webdriver.debug_info = generate_debug_session_info(webdriver, datasets) @print_timing("selenium_login") def measure(): @@ -41,7 +49,7 @@ def sub_measure(): login_page.go_to() sub_measure() - login_page.set_credentials(datasets['username'], datasets['password']) + login_page.set_credentials(datasets['current_session']['username'], datasets['current_session']['password']) @print_timing("selenium_login:login_get_started") def sub_measure(): @@ -79,7 +87,7 @@ def view_project_repos(webdriver, datasets): @print_timing("selenium_view_project_repositories") def measure(): - project_page = Project(webdriver, project_key=datasets['project_key']) + project_page = Project(webdriver, project_key=datasets['current_session']['project_key']) project_page.go_to() project_page.wait_for_page_loaded() measure() @@ -87,8 +95,8 @@ def measure(): def view_repo(webdriver, datasets): repository_page = Repository(webdriver, - project_key=datasets['project_key'], - repo_slug=datasets['repo_slug']) + project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug']) @print_timing("selenium_view_repository") def measure(): @@ -101,8 +109,8 @@ def measure(): def view_list_pull_requests(webdriver, datasets): repo_pull_requests_page = RepoPullRequests(webdriver, - project_key=datasets['project_key'], - repo_slug=datasets['repo_slug']) + project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug']) @print_timing("selenium_view_list_pull_requests") def measure(): @@ -112,9 +120,9 @@ def measure(): def view_pull_request_overview_tab(webdriver, datasets): - pull_request_page = PullRequest(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug'], - pull_request_key=datasets['pull_request_id']) + pull_request_page = PullRequest(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug'], + pull_request_key=datasets['current_session']['pull_request_id']) @print_timing("selenium_view_pull_request_overview") def measure(): @@ -125,9 +133,9 @@ def measure(): def view_pull_request_diff_tab(webdriver, datasets): - pull_request_page = PullRequest(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug'], - pull_request_key=datasets['pull_request_id']) + pull_request_page = PullRequest(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug'], + pull_request_key=datasets['current_session']['pull_request_id']) @print_timing("selenium_view_pull_request_diff") def measure(): @@ -138,9 +146,9 @@ def measure(): def view_pull_request_commits_tab(webdriver, datasets): - pull_request_page = PullRequest(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug'], - pull_request_key=datasets['pull_request_id']) + pull_request_page = PullRequest(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug'], + pull_request_key=datasets['current_session']['pull_request_id']) @print_timing("selenium_view_pull_request_commits") def measure(): @@ -151,9 +159,9 @@ def measure(): def comment_pull_request_diff(webdriver, datasets): - pull_request_page = PullRequest(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug'], - pull_request_key=datasets['pull_request_id']) + pull_request_page = PullRequest(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug'], + pull_request_key=datasets['current_session']['pull_request_id']) pull_request_page.go_to_diff() @print_timing("selenium_comment_pull_request_file") @@ -169,9 +177,9 @@ def measure(): def comment_pull_request_overview(webdriver, datasets): - pull_request_page = PullRequest(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug'], - pull_request_key=datasets['pull_request_id']) + pull_request_page = PullRequest(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug'], + pull_request_key=datasets['current_session']['pull_request_id']) pull_request_page.go_to() @print_timing("selenium_comment_pull_request_overview") @@ -185,8 +193,8 @@ def measure(): def view_branches(webdriver, datasets): - branches_page = RepositoryBranches(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug']) + branches_page = RepositoryBranches(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug']) @print_timing("selenium_view_branches") def measure(): @@ -198,8 +206,8 @@ def measure(): def create_pull_request(webdriver, datasets): repository_page = Repository(webdriver, - project_key=datasets['project_key'], - repo_slug=datasets['repo_slug']) + project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug']) repo_pull_requests_page = RepoPullRequests(webdriver, repo_slug=repository_page.repo_slug, project_key=repository_page.project_key) repository_branches_page = RepositoryBranches(webdriver, repo_slug=repository_page.repo_slug, @@ -212,8 +220,8 @@ def measure(): @print_timing("selenium_create_pull_request:create_pull_request") def sub_measure(): - branch_from = datasets['pull_request_branch_from'] - branch_to = datasets['pull_request_branch_to'] + branch_from = datasets['current_session']['pull_request_branch_from'] + branch_to = datasets['current_session']['pull_request_branch_to'] repository_branches_page.open_base_branch(base_branch_name=branch_from) fork_branch_from = repository_branches_page.create_branch_fork_rnd_name(base_branch_name=branch_from) navigation_panel.wait_for_navigation_panel() @@ -241,8 +249,8 @@ def sub_measure(): def view_commits(webdriver, datasets): - repo_commits_page = RepositoryCommits(webdriver, project_key=datasets['project_key'], - repo_slug=datasets['repo_slug']) + repo_commits_page = RepositoryCommits(webdriver, project_key=datasets['current_session']['project_key'], + repo_slug=datasets['current_session']['repo_slug']) @print_timing("selenium_view_commits") def measure(): diff --git a/app/selenium_ui/confluence/modules.py b/app/selenium_ui/confluence/modules.py index af532e516..dd9c06e05 100644 --- a/app/selenium_ui/confluence/modules.py +++ b/app/selenium_ui/confluence/modules.py @@ -3,17 +3,20 @@ from selenium_ui.confluence.pages.pages import Login, AllUpdates, PopupManager, Page, Dashboard, TopNavPanel, Editor, \ Logout +from selenium_ui.confluence.pages.selectors import PageLocators from util.api.confluence_clients import ConfluenceRestClient from util.confluence.browser_metrics import browser_metrics from util.conf import CONFLUENCE_SETTINGS USERS = "users" PAGES = "pages" +CQLS = "cqls" CUSTOM_PAGES = "custom_pages" BLOGS = "blogs" def setup_run_data(datasets): + datasets['current_session'] = {} user = random.choice(datasets[USERS]) page = random.choice(datasets[PAGES]) if CUSTOM_PAGES in datasets: @@ -21,17 +24,25 @@ def setup_run_data(datasets): custom_page = random.choice(datasets[CUSTOM_PAGES]) datasets['custom_page_id'] = custom_page[0] blog = random.choice(datasets[BLOGS]) - datasets['username'] = user[0] - datasets['password'] = user[1] - datasets['page_id'] = page[0] - datasets['blog_id'] = blog[0] + datasets['current_session']['username'] = user[0] + datasets['current_session']['password'] = user[1] + datasets['current_session']['page_id'] = page[0] + datasets['current_session']['blog_id'] = blog[0] - datasets['view_page'] = None - datasets['view_page_cache'] = None - datasets['edit_page'] = None - datasets['edit_page_click'] = None - datasets['create_comment_page'] = None - datasets['view_blog'] = None + datasets['current_session']['view_page'] = None + datasets['current_session']['view_page_cache'] = None + datasets['current_session']['edit_page'] = None + datasets['current_session']['edit_page_click'] = None + datasets['current_session']['create_comment_page'] = None + datasets['current_session']['view_blog'] = None + + +def generate_debug_session_info(webdriver, datasets): + debug_data = datasets['current_session'] + if 'current_url' in dir(webdriver): + debug_data['current_url'] = webdriver.current_url + debug_data['custom_page_id'] = datasets.get('custom_page_id') + return debug_data def login(webdriver, datasets): @@ -43,6 +54,7 @@ def login(webdriver, datasets): verify=CONFLUENCE_SETTINGS.secure, ) login_page = Login(webdriver) + webdriver.debug_info = generate_debug_session_info(webdriver, datasets) def measure(): @@ -60,7 +72,8 @@ def sub_measure(): sub_measure() - login_page.set_credentials(username=datasets['username'], password=datasets['password']) + login_page.set_credentials(username=datasets['current_session']['username'], + password=datasets['current_session']['password']) def sub_measure(): login_page.click_login_button() @@ -73,6 +86,11 @@ def sub_measure(): measure_browser_navi_metrics(webdriver, datasets, expected_metrics=browser_metrics['selenium_login']) sub_measure() + current_session_response = login_page.rest_api_get(url=f'{CONFLUENCE_SETTINGS.server_url}' + f'/rest/api/user/current') + if 'username' in current_session_response: + actual_username = current_session_response['username'] + assert actual_username == datasets['current_session']['username'] measure() PopupManager(webdriver).dismiss_default_popup() @@ -82,8 +100,8 @@ def view_page(webdriver, datasets): random_page = random.choice(datasets[PAGES]) page_id = random_page[0] page_description = random_page[2] - datasets['view_page'] = random_page - datasets['view_page_cache'] = random_page + datasets['current_session']['view_page'] = random_page + datasets['current_session']['view_page_cache'] = random_page page = Page(webdriver, page_id=page_id) def measure(): @@ -97,10 +115,10 @@ def measure(): def view_page_from_cache(webdriver, datasets): - cached_page = datasets['view_page_cache'] + cached_page = datasets['current_session']['view_page_cache'] page_id = cached_page[0] page_description = cached_page[2] - datasets['view_page'] = cached_page + datasets['current_session']['view_page'] = cached_page page = Page(webdriver, page_id=page_id) @@ -120,7 +138,7 @@ def view_blog(webdriver, datasets): blog_id = random_blog[0] blog_description = random_blog[2] blog = Page(webdriver, page_id=blog_id) - datasets['view_blog'] = random_blog + datasets['current_session']['view_blog'] = random_blog def measure(): blog.go_to() @@ -182,7 +200,7 @@ def edit_confluence_page_by_url(webdriver, datasets): random_page = random.choice(datasets[PAGES]) page_id = random_page[0] page_description = random_page[2] - datasets['edit_page'] = random_page + datasets['current_session']['edit_page'] = random_page edit_page = Editor(webdriver, page_id=page_id) def measure(): @@ -210,9 +228,9 @@ def sub_measure(): def edit_confluence_page_quick_edit(webdriver, datasets): - random_page = datasets['edit_page'] + random_page = datasets['current_session']['edit_page'] page_description = random_page[2] - datasets['edit_page_click'] = random_page + datasets['current_session']['edit_page_click'] = random_page page = Page(webdriver, page_id=random_page[0]) edit_page = Editor(webdriver, page_id=random_page[0]) @@ -246,37 +264,51 @@ def sub_measure(): def create_inline_comment(webdriver, datasets): page = random.choice(datasets[PAGES]) page_id = page[0] - datasets['create_comment_page'] = page + datasets['current_session']['create_comment_page'] = page page = Page(webdriver, page_id=page_id) + editor_page = Editor(webdriver) @print_timing("selenium_create_comment") - def measure(webdriver): + def measure(): page.go_to() page.wait_for_page_loaded() - edit_comment = Editor(webdriver) @print_timing("selenium_create_comment:write_comment") - def sub_measure(webdriver): + def sub_measure(): page.click_add_comment() - edit_comment.write_content(text='This is selenium comment') + editor_page.write_content(text='This is selenium comment') - sub_measure(webdriver) + sub_measure() @print_timing("selenium_create_comment:save_comment") - def sub_measure(webdriver): - edit_comment.click_submit() + def sub_measure(): + editor_page.click_submit() page.wait_for_comment_field() - sub_measure(webdriver) + sub_measure() + + measure() + - measure(webdriver) +def cql_search(webdriver, datasets): + random_cql = random.choice(datasets[CQLS]) + page = Page(webdriver) + page.wait_until_visible(PageLocators.search_box) + + @print_timing("selenium_cql_search") + def measure(): + page.get_element(PageLocators.search_box).send_keys(random_cql) + page.wait_until_visible(PageLocators.search_results) + page.get_element(PageLocators.close_search_button).click() + measure() def log_out(webdriver, datasets): + logout_page = Logout(webdriver) + @print_timing("selenium_log_out") - def measure(webdriver): - logout_page = Logout(webdriver) + def measure(): logout_page.go_to() logout_page.wait_for_logout() - measure(webdriver) + measure() diff --git a/app/selenium_ui/confluence/pages/pages.py b/app/selenium_ui/confluence/pages/pages.py index 500136e5c..579736b9a 100644 --- a/app/selenium_ui/confluence/pages/pages.py +++ b/app/selenium_ui/confluence/pages/pages.py @@ -3,7 +3,7 @@ from selenium_ui.base_page import BasePage from selenium_ui.confluence.pages.selectors import UrlManager, LoginPageLocators, AllUpdatesLocators, PopupLocators,\ - PageLocators, DashboardLocators, TopPanelLocators, EditorLocators, LogoutLocators + PageLocators, DashboardLocators, TopPanelLocators, EditorLocators, LogoutLocators, XsrfTokenLocators class Login(BasePage): @@ -118,7 +118,9 @@ class Editor(BasePage): def __init__(self, driver, page_id=None): BasePage.__init__(self, driver) url_manager = UrlManager(page_id=page_id) - self.page_url = url_manager.edit_page_url() + + xsrf_token = self.get_element(XsrfTokenLocators.xsrf_token).get_attribute('content') + self.page_url = url_manager.edit_page_url() + "&atl_token=" + xsrf_token def wait_for_create_page_open(self): self.wait_until_clickable(EditorLocators.publish_button) diff --git a/app/selenium_ui/confluence/pages/selectors.py b/app/selenium_ui/confluence/pages/selectors.py index d09d9988a..2bb706c0a 100644 --- a/app/selenium_ui/confluence/pages/selectors.py +++ b/app/selenium_ui/confluence/pages/selectors.py @@ -65,6 +65,9 @@ class PageLocators: page_title = (By.ID, "title-text") comment_text_field = (By.CSS_SELECTOR, ".quick-comment-prompt") edit_page_button = (By.ID, "editPageLink") + search_box = (By.ID, "quick-search-query") + search_results = (By.ID, "search-result-container") + close_search_button = (By.ID, "search-drawer-close") class DashboardLocators: @@ -90,3 +93,7 @@ class EditorLocators: class LogoutLocators: logout_msg = (By.ID, "logout-message") + + +class XsrfTokenLocators: + xsrf_token = (By.ID, "atlassian-token") diff --git a/app/selenium_ui/confluence_ui.py b/app/selenium_ui/confluence_ui.py index cbadb140a..49b3e9fee 100644 --- a/app/selenium_ui/confluence_ui.py +++ b/app/selenium_ui/confluence_ui.py @@ -39,6 +39,10 @@ def test_1_selenium_create_inline_comment(confluence_webdriver, confluence_datas modules.create_inline_comment(confluence_webdriver, confluence_datasets) +def test_1_selenium_cql_search(confluence_webdriver, confluence_datasets, confluence_screen_shots): + modules.cql_search(confluence_webdriver, confluence_datasets) + + """ Add custom actions anywhere between login and log out action. Move this to a different line as needed. Write your custom selenium scripts in `app/extension/confluence/extension_ui.py`. diff --git a/app/selenium_ui/conftest.py b/app/selenium_ui/conftest.py index e709fa6a5..ac6107289 100644 --- a/app/selenium_ui/conftest.py +++ b/app/selenium_ui/conftest.py @@ -16,6 +16,7 @@ from selenium.webdriver import Chrome from selenium.webdriver.chrome.options import Options +from util.common_util import webdriver_pretty_debug from util.conf import CONFLUENCE_SETTINGS, JIRA_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, BAMBOO_SETTINGS from util.exceptions import WebDriverExceptionPostpone from util.project_paths import JIRA_DATASET_ISSUES, JIRA_DATASET_JQLS, JIRA_DATASET_KANBAN_BOARDS, \ @@ -23,7 +24,7 @@ BITBUCKET_PROJECTS, BITBUCKET_REPOS, BITBUCKET_PRS, CONFLUENCE_BLOGS, CONFLUENCE_PAGES, CONFLUENCE_CUSTOM_PAGES, \ CONFLUENCE_USERS, ENV_TAURUS_ARTIFACT_DIR, JSM_DATASET_REQUESTS, JSM_DATASET_CUSTOMERS, JSM_DATASET_AGENTS, \ JSM_DATASET_SERVICE_DESKS_L, JSM_DATASET_SERVICE_DESKS_M, JSM_DATASET_SERVICE_DESKS_S, JSM_DATASET_CUSTOM_ISSUES, \ - JSM_DATASET_INSIGHT_SCHEMAS, JSM_DATASET_INSIGHT_ISSUES, BAMBOO_USERS, BAMBOO_BUILD_PLANS + JSM_DATASET_INSIGHT_SCHEMAS, JSM_DATASET_INSIGHT_ISSUES, BAMBOO_USERS, BAMBOO_BUILD_PLANS, CONFLUENCE_CQLS SCREEN_WIDTH = 1920 SCREEN_HEIGHT = 1080 @@ -46,26 +47,39 @@ def __init__(self): def jira_dataset(self): if not self.dataset: - self.dataset["issues"] = self.__read_input_file(JIRA_DATASET_ISSUES) + self.dataset["issues"] = self.__read_input_file( + JIRA_DATASET_ISSUES) self.dataset["users"] = self.__read_input_file(JIRA_DATASET_USERS) self.dataset["jqls"] = self.__read_input_file(JIRA_DATASET_JQLS) - self.dataset["scrum_boards"] = self.__read_input_file(JIRA_DATASET_SCRUM_BOARDS) - self.dataset["kanban_boards"] = self.__read_input_file(JIRA_DATASET_KANBAN_BOARDS) - self.dataset["projects"] = self.__read_input_file(JIRA_DATASET_PROJECTS) - self.dataset["custom_issues"] = self.__read_input_file(JIRA_DATASET_CUSTOM_ISSUES) + self.dataset["scrum_boards"] = self.__read_input_file( + JIRA_DATASET_SCRUM_BOARDS) + self.dataset["kanban_boards"] = self.__read_input_file( + JIRA_DATASET_KANBAN_BOARDS) + self.dataset["projects"] = self.__read_input_file( + JIRA_DATASET_PROJECTS) + self.dataset["custom_issues"] = self.__read_input_file( + JIRA_DATASET_CUSTOM_ISSUES) return self.dataset def jsm_dataset(self): if not self.dataset: - self.dataset["requests"] = self.__read_input_file(JSM_DATASET_REQUESTS) - self.dataset["customers"] = self.__read_input_file(JSM_DATASET_CUSTOMERS) + self.dataset["requests"] = self.__read_input_file( + JSM_DATASET_REQUESTS) + self.dataset["customers"] = self.__read_input_file( + JSM_DATASET_CUSTOMERS) self.dataset["agents"] = self.__read_input_file(JSM_DATASET_AGENTS) - self.dataset["service_desks_large"] = self.__read_input_file(JSM_DATASET_SERVICE_DESKS_L) - self.dataset["service_desks_small"] = self.__read_input_file(JSM_DATASET_SERVICE_DESKS_S) - self.dataset["service_desks_medium"] = self.__read_input_file(JSM_DATASET_SERVICE_DESKS_M) - self.dataset["custom_issues"] = self.__read_input_file(JSM_DATASET_CUSTOM_ISSUES) - self.dataset["insight_schemas"] = self.__read_input_file(JSM_DATASET_INSIGHT_SCHEMAS) - self.dataset["insight_issues"] = self.__read_input_file(JSM_DATASET_INSIGHT_ISSUES) + self.dataset["service_desks_large"] = self.__read_input_file( + JSM_DATASET_SERVICE_DESKS_L) + self.dataset["service_desks_small"] = self.__read_input_file( + JSM_DATASET_SERVICE_DESKS_S) + self.dataset["service_desks_medium"] = self.__read_input_file( + JSM_DATASET_SERVICE_DESKS_M) + self.dataset["custom_issues"] = self.__read_input_file( + JSM_DATASET_CUSTOM_ISSUES) + self.dataset["insight_schemas"] = self.__read_input_file( + JSM_DATASET_INSIGHT_SCHEMAS) + self.dataset["insight_issues"] = self.__read_input_file( + JSM_DATASET_INSIGHT_ISSUES) return self.dataset def confluence_dataset(self): @@ -73,21 +87,26 @@ def confluence_dataset(self): self.dataset["pages"] = self.__read_input_file(CONFLUENCE_PAGES) self.dataset["blogs"] = self.__read_input_file(CONFLUENCE_BLOGS) self.dataset["users"] = self.__read_input_file(CONFLUENCE_USERS) - self.dataset["custom_pages"] = self.__read_input_file(CONFLUENCE_CUSTOM_PAGES) + self.dataset["cqls"] = self.__read_input_file(CONFLUENCE_CQLS) + self.dataset["custom_pages"] = self.__read_input_file( + CONFLUENCE_CUSTOM_PAGES) return self.dataset def bitbucket_dataset(self): if not self.dataset: - self.dataset["projects"] = self.__read_input_file(BITBUCKET_PROJECTS) + self.dataset["projects"] = self.__read_input_file( + BITBUCKET_PROJECTS) self.dataset["users"] = self.__read_input_file(BITBUCKET_USERS) self.dataset["repos"] = self.__read_input_file(BITBUCKET_REPOS) - self.dataset["pull_requests"] = self.__read_input_file(BITBUCKET_PRS) + self.dataset["pull_requests"] = self.__read_input_file( + BITBUCKET_PRS) return self.dataset def bamboo_dataset(self): if not self.dataset: self.dataset["users"] = self.__read_input_file(BAMBOO_USERS) - self.dataset["build_plans"] = self.__read_input_file(BAMBOO_BUILD_PLANS) + self.dataset["build_plans"] = self.__read_input_file( + BAMBOO_BUILD_PLANS) return self.dataset @@ -116,8 +135,8 @@ def datetime_now(prefix): def is_docker(): path = '/proc/self/cgroup' return ( - os.path.exists('/.dockerenv') or - os.path.isfile(path) and any('docker' in line for line in open(path)) + os.path.exists('/.dockerenv') or + os.path.isfile(path) and any('docker' in line for line in open(path)) ) @@ -137,7 +156,8 @@ def wrapper(*args, **kwargs): full_exception = '' if args: driver = [arg for arg in args if isinstance(arg, Chrome)] - node_ip = "" if not driver else getattr(driver[0], "node_ip", "") + node_ip = "" if not driver else getattr( + driver[0], "node_ip", "") try: func(*args, **kwargs) success = True @@ -149,6 +169,10 @@ def wrapper(*args, **kwargs): if 'msg' in dir(full_exception): if 'Locator' in full_exception.msg: locator_debug_message = f" - {full_exception.msg.split('Locator:')[-1].strip().replace(',','')}" + else: + locator_debug_message = f" - {full_exception.msg.replace(',','')}" + locator_debug_message = locator_debug_message.replace( + '\n', ' ') error_msg = f"Failed measure: {interaction} - {exc_type.__name__}{locator_debug_message}" end = time() timing = str(int((end - start) * 1000)) @@ -159,13 +183,16 @@ def wrapper(*args, **kwargs): with open(selenium_results_file, "a+") as jtl_file: timestamp = round(time() * 1000) if explicit_timing: - jtl_file.write(f"{timestamp},{explicit_timing*1000},{interaction},,{error_msg}," - f",{success},0,0,0,0,,0\n") + jtl_file.write( + f"{timestamp},{explicit_timing*1000},{interaction},,{error_msg}," + f",{success},0,0,0,0,,0\n") else: - jtl_file.write(f"{timestamp},{timing},{interaction},,{error_msg}" - f",,{success},0,0,0,0,{node_ip},0\n") + jtl_file.write( + f"{timestamp},{timing},{interaction},,{error_msg}" + f",,{success},0,0,0,0,{node_ip},0\n") - print(f"{timestamp},{timing},{interaction},{error_msg},{success},{node_ip}") + print( + f"{timestamp},{timing},{interaction},{error_msg},{success},{node_ip}") if not success: if LOGIN_ACTION_NAME in interaction: @@ -181,17 +208,22 @@ def webdriver(app_settings): def driver_init(): chrome_options = Options() if app_settings.webdriver_visible and is_docker(): - raise Exception("ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") + raise Exception( + "ERROR: WEBDRIVER_VISIBLE is True in .yml, but Docker container does not have a display.") if not app_settings.webdriver_visible: chrome_options.add_argument("--headless") if not app_settings.secure: chrome_options.add_argument('--ignore-certificate-errors') - chrome_options.add_argument("--window-size={},{}".format(SCREEN_WIDTH, SCREEN_HEIGHT)) + chrome_options.add_argument( + "--window-size={},{}".format(SCREEN_WIDTH, SCREEN_HEIGHT)) chrome_options.add_argument("--no-sandbox") chrome_options.add_argument("--disable-infobars") chrome_options.add_argument('lang=en') - chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'}) - chrome_options.set_capability('goog:loggingPrefs', {'performance': 'ALL'}) + chrome_options.add_experimental_option( + 'prefs', {'intl.accept_languages': 'en,en_US'}) + chrome_options.set_capability( + 'goog:loggingPrefs', { + 'performance': 'ALL'}) driver = Chrome(options=chrome_options) driver.app_settings = app_settings return driver @@ -291,7 +323,8 @@ def measure_dom_requests(webdriver, interaction, description=''): timestamp = round(time() * 1000) jtl_file.write( f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") - print(f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") + print( + f"{timestamp},{timing},{interaction},,{error_msg},,{success},0,0,0,0,{webdriver.node_ip},0\n") def get_mark_from_dataset(page_id: str, dataset: dict) -> str: @@ -311,35 +344,49 @@ def measure_browser_navi_metrics(webdriver, dataset, expected_metrics): if 'browser.metrics.navigation' not in str(request): continue post_data_str = request[0]['params']['request']['postData'] - post_data = eval(post_data_str.replace('true', 'True').replace('false', 'False')) + post_data = eval( + post_data_str.replace( + 'true', + 'True').replace( + 'false', + 'False')) for data in post_data: if data['name'] != 'browser.metrics.navigation': continue key = data['properties']['key'] ready_for_user = data['properties']['readyForUser'] - mark = '' # mark = '' for key == [confluence.dashboard.view, confluence.page.create.collaborative.view...] + # mark = '' for key == [confluence.dashboard.view, + # confluence.page.create.collaborative.view...] + mark = '' if 'blogpost.view' in key: - blogpost_template_id = dataset['view_blog'][2] + blogpost_template_id = dataset['current_session']['view_blog'][2] mark = f'-view_blog-{blogpost_template_id}' print(f'BLOGPOST_FOUND {mark}') if 'page.view' in key: if 'pageID' in post_data_str: - page_id = re.search(r'"pageID":"(.+?)"', post_data_str).group(1) - mark = get_mark_from_dataset(page_id, dataset) or '-create_page' + page_id = re.search( + r'"pageID":"(.+?)"', post_data_str).group(1) + mark = get_mark_from_dataset( + page_id, dataset['current_session']) or '-create_page' elif 'pageID' in str(requests): page_ids = re.findall(r'"pageID":"(.+?)"', str(requests)) - print('Cannot find pageID in post data string, searching in request body') + print( + 'Cannot find pageID in post data string, searching in request body') print(f'Available pageID: {page_ids}') - print(f'Trying to retrieve mark related to first page_id {page_ids[0]}') - mark = get_mark_from_dataset(page_ids[0], dataset) + print( + f'Trying to retrieve mark related to first page_id {page_ids[0]}') + mark = get_mark_from_dataset(page_ids[0], dataset['current_session']) if not mark: # key == page.view and pageID is not related to any template - print(f'Hit {key} without mark, ' - f'this action will not be saved into {selenium_results_file.name}\n' - f'Current url: {webdriver.current_url}\nrequests dict:') + print( + f'Hit {key} without mark, ' + f'this action will not be saved into {selenium_results_file.name}\n' + f'Current url: {webdriver.current_url}\nrequests dict:') pprint(requests) continue # to jump to next element in post_data without appending to metrics - ready_for_user_dict = {'key': f'{key}{mark}', 'ready_for_user': ready_for_user} + ready_for_user_dict = { + 'key': f'{key}{mark}', + 'ready_for_user': ready_for_user} metrics.append(ready_for_user_dict) lockfile = f'{selenium_results_file}.lock' @@ -356,7 +403,8 @@ def measure_browser_navi_metrics(webdriver, dataset, expected_metrics): node_ip = webdriver.node_ip jtl_file.write( f"{timestamp},{ready_for_user_timing},{interaction},,{error_msg},,{success},0,0,0,0,{node_ip},0\n") - print(f"{timestamp},{ready_for_user_timing},{interaction},{error_msg},{success},{node_ip}") + print( + f"{timestamp},{ready_for_user_timing},{interaction},{error_msg},{success},{node_ip}") @pytest.fixture(scope="module") @@ -430,21 +478,26 @@ def get_screen_shots(request, webdriver): mode = "w" if not selenium_error_file.exists() else "a+" action_name = request.node.rep_call.head_line error_text = request.node.rep_call.longreprtext + errors_artifacts = ENV_TAURUS_ARTIFACT_DIR / 'errors_artifacts' + errors_artifacts.mkdir(parents=True, exist_ok=True) + error_artifact_name = errors_artifacts / datetime_now(action_name) + pretty_debug = webdriver_pretty_debug( + webdriver, additional_field={ + 'screenshot_name': f'{error_artifact_name}.png'}) with open(selenium_error_file, mode) as err_file: timestamp = round(time() * 1000) dt = datetime.datetime.now() utc_time = dt.replace(tzinfo=timezone.utc) str_time = utc_time.strftime("%m-%d-%Y, %H:%M:%S") str_time_stamp = f'{str_time}, {timestamp}' - err_file.write(f"{str_time_stamp}, Action: {action_name}, Error: {error_text}\n") - print(f"Action: {action_name}, Error: {error_text}\n") - errors_artifacts = ENV_TAURUS_ARTIFACT_DIR / 'errors_artifacts' - errors_artifacts.mkdir(parents=True, exist_ok=True) - error_artifact_name = errors_artifacts / datetime_now(action_name) + err_file.write( + f"{str_time_stamp}, Action: {action_name}, Error: {error_text}\n{pretty_debug}") + print(f"Action: {action_name}, Error: {error_text}\n{pretty_debug}") webdriver.save_screenshot('{}.png'.format(error_artifact_name)) with open(f'{error_artifact_name}.html', 'wb') as html_file: html_file.write(webdriver.page_source.encode('utf-8')) - webdriver.execute_script("window.onbeforeunload = function() {};") # to prevent alert window (force get link) + # to prevent alert window (force get link) + webdriver.execute_script("window.onbeforeunload = function() {};") webdriver.get(webdriver.app_settings.server_url) @@ -499,7 +552,8 @@ def f_retry(*args, **kwargs): print(f'Retrying: {mtries}') mtries -= 1 if mtries == 0: - return f(*args, **kwargs) # extra try, to avoid except-raise syntax + # extra try, to avoid except-raise syntax + return f(*args, **kwargs) return f_retry diff --git a/app/selenium_ui/jira/modules.py b/app/selenium_ui/jira/modules.py index df0ee49d0..826aaa0cc 100644 --- a/app/selenium_ui/jira/modules.py +++ b/app/selenium_ui/jira/modules.py @@ -8,7 +8,10 @@ from util.api.jira_clients import JiraRestClient from util.conf import JIRA_SETTINGS -client = JiraRestClient(JIRA_SETTINGS.server_url, JIRA_SETTINGS.admin_login, JIRA_SETTINGS.admin_password) +client = JiraRestClient( + JIRA_SETTINGS.server_url, + JIRA_SETTINGS.admin_login, + JIRA_SETTINGS.admin_password) rte_status = client.check_rte_status() KANBAN_BOARDS = "kanban_boards" @@ -21,6 +24,7 @@ def setup_run_data(datasets): + datasets['current_session'] = {} page_size = 25 projects_count = len(datasets[PROJECTS]) user = random.choice(datasets[USERS]) @@ -33,34 +37,46 @@ def setup_run_data(datasets): scrum_boards = random.choice(datasets[SCRUM_BOARDS]) kanban_boards = random.choice(datasets[KANBAN_BOARDS]) projects = random.choice(datasets[PROJECTS]) - datasets['username'] = user[0] - datasets['password'] = user[1] - datasets['issue_key'] = issue[0] - datasets['issue_id'] = issue[1] - datasets['project_key'] = projects[0] - datasets['scrum_board_id'] = scrum_boards[0] - datasets['kanban_board_id'] = kanban_boards[0] - datasets['jql'] = urllib.parse.quote(random.choice(datasets[JQLS][0])) - datasets['project_pages_count'] = projects_count // page_size if projects_count % page_size == 0 \ + datasets['current_session']['username'] = user[0] + datasets['current_session']['password'] = user[1] + datasets['current_session']['issue_key'] = issue[0] + datasets['current_session']['issue_id'] = issue[1] + datasets['current_session']['project_key'] = projects[0] + datasets['current_session']['scrum_board_id'] = scrum_boards[0] + datasets['current_session']['kanban_board_id'] = kanban_boards[0] + datasets['current_session']['jql'] = urllib.parse.quote( + random.choice(datasets[JQLS][0])) + datasets['current_session']['project_pages_count'] = projects_count // page_size if projects_count % page_size == 0 \ else projects_count // page_size + 1 +def generate_debug_session_info(webdriver, datasets): + debug_data = datasets['current_session'] + debug_data['current_url'] = webdriver.current_url + debug_data['custom_issue_key'] = datasets.get('custom_issue_key') + debug_data['custom_issue_id'] = datasets.get('custom_issue_id') + return debug_data + + def login(webdriver, datasets): setup_run_data(datasets) @print_timing("selenium_login") def measure(): login_page = Login(webdriver) + webdriver.base_url = login_page.base_url + webdriver.debug_info = generate_debug_session_info(webdriver, datasets) @print_timing("selenium_login:open_login_page") def sub_measure(): login_page.go_to() - sub_measure() @print_timing("selenium_login:login_and_view_dashboard") def sub_measure(): - login_page.set_credentials(username=datasets['username'], password=datasets['password']) + login_page.set_credentials( + username=datasets['current_session']['username'], + password=datasets['current_session']['password']) if login_page.is_first_login(): login_page.first_login_setup() if login_page.is_first_login_second_page(): @@ -70,13 +86,20 @@ def sub_measure(): print(f"node_id:{webdriver.node_id}") sub_measure() + current_session_response = login_page.rest_api_get( + url=f'{webdriver.base_url}/rest/auth/latest/session') + if 'name' in current_session_response: + actual_username = current_session_response['name'] + assert actual_username == datasets['current_session']['username'] measure() PopupManager(webdriver).dismiss_default_popup() def view_issue(webdriver, datasets): - issue_page = Issue(webdriver, issue_key=datasets['issue_key']) + issue_page = Issue( + webdriver, + issue_key=datasets['current_session']['issue_key']) @print_timing("selenium_view_issue") def measure(): @@ -87,7 +110,9 @@ def measure(): def view_project_summary(webdriver, datasets): - project_page = Project(webdriver, project_key=datasets['project_key']) + project_page = Project( + webdriver, + project_key=datasets['current_session']['project_key']) @print_timing("selenium_project_summary") def measure(): @@ -111,7 +136,8 @@ def sub_measure(): @print_timing("selenium_create_issue:fill_and_submit_issue_form") def sub_measure(): issue_modal.fill_summary_create() # Fill summary field - issue_modal.fill_description_create(rte_status) # Fill description field + issue_modal.fill_description_create( + rte_status) # Fill description field issue_modal.assign_to_me() # Click assign to me issue_modal.set_resolution() # Set resolution if there is such field issue_modal.set_issue_type() # Set issue type, use non epic type @@ -129,7 +155,7 @@ def sub_sub_measure(): def search_jql(webdriver, datasets): - search_page = Search(webdriver, jql=datasets['jql']) + search_page = Search(webdriver, jql=datasets['current_session']['jql']) @print_timing("selenium_search_jql") def measure(): @@ -140,7 +166,9 @@ def measure(): def edit_issue(webdriver, datasets): - issue_page = Issue(webdriver, issue_id=datasets['issue_id']) + issue_page = Issue( + webdriver, + issue_id=datasets['current_session']['issue_id']) @print_timing("selenium_edit_issue") def measure(): @@ -164,7 +192,9 @@ def sub_measure(): def save_comment(webdriver, datasets): - issue_page = Issue(webdriver, issue_id=datasets['issue_id']) + issue_page = Issue( + webdriver, + issue_id=datasets['current_session']['issue_id']) @print_timing("selenium_save_comment") def measure(): @@ -188,7 +218,9 @@ def sub_measure(): def browse_projects_list(webdriver, datasets): @print_timing("selenium_browse_projects_list") def measure(): - projects_list_page = ProjectsList(webdriver, projects_list_pages=datasets['project_pages_count']) + projects_list_page = ProjectsList( + webdriver, + projects_list_pages=datasets['current_session']['project_pages_count']) projects_list_page.go_to() projects_list_page.wait_for_page_loaded() @@ -207,7 +239,9 @@ def measure(): def view_backlog_for_scrum_board(webdriver, datasets): - scrum_board_page = Board(webdriver, board_id=datasets['scrum_board_id']) + scrum_board_page = Board( + webdriver, + board_id=datasets['current_session']['scrum_board_id']) @print_timing("selenium_view_scrum_board_backlog") def measure(): @@ -218,7 +252,9 @@ def measure(): def view_scrum_board(webdriver, datasets): - scrum_board_page = Board(webdriver, board_id=datasets['scrum_board_id']) + scrum_board_page = Board( + webdriver, + board_id=datasets['current_session']['scrum_board_id']) @print_timing("selenium_view_scrum_board") def measure(): @@ -229,7 +265,9 @@ def measure(): def view_kanban_board(webdriver, datasets): - kanban_board_page = Board(webdriver, board_id=datasets['kanban_board_id']) + kanban_board_page = Board( + webdriver, + board_id=datasets['current_session']['kanban_board_id']) @print_timing("selenium_view_kanban_board") def measure(): diff --git a/app/selenium_ui/jira/pages/pages.py b/app/selenium_ui/jira/pages/pages.py index 7199943de..2bbeeede3 100644 --- a/app/selenium_ui/jira/pages/pages.py +++ b/app/selenium_ui/jira/pages/pages.py @@ -18,6 +18,7 @@ def dismiss_default_popup(self): class Login(BasePage): page_url = LoginPageLocators.login_url page_loaded_selector = LoginPageLocators.system_dashboard + base_url = UrlManager().host def is_first_login(self): return True if self.get_elements(LoginPageLocators.continue_button) else False diff --git a/app/selenium_ui/jsm/modules_agents.py b/app/selenium_ui/jsm/modules_agents.py index ef5cdb33c..ace28dc37 100644 --- a/app/selenium_ui/jsm/modules_agents.py +++ b/app/selenium_ui/jsm/modules_agents.py @@ -24,41 +24,42 @@ def setup_run_data(datasets): agent = random.choice(datasets[AGENTS]) request = random.choice(datasets[REQUESTS]) + datasets['current_session'] = {} if datasets[SERVICE_DESKS_LARGE]: service_desk_large = random.choice(datasets[SERVICE_DESKS_LARGE]) - datasets['large_project_id'] = service_desk_large[1] - datasets['large_project_key'] = service_desk_large[2] - datasets['all_open_queue_id_large'] = service_desk_large[4] + datasets['current_session']['large_project_id'] = service_desk_large[1] + datasets['current_session']['large_project_key'] = service_desk_large[2] + datasets['current_session']['all_open_queue_id_large'] = service_desk_large[4] if datasets[SERVICE_DESKS_MEDIUM]: service_desk_medium = random.choice(datasets[SERVICE_DESKS_MEDIUM]) - datasets['medium_project_id'] = service_desk_medium[1] - datasets['medium_project_key'] = service_desk_medium[2] - datasets['all_open_queue_id_medium'] = service_desk_medium[4] + datasets['current_session']['medium_project_id'] = service_desk_medium[1] + datasets['current_session']['medium_project_key'] = service_desk_medium[2] + datasets['current_session']['all_open_queue_id_medium'] = service_desk_medium[4] # Medium projects reports - datasets['m_report_created_vs_resolved_id'] = service_desk_medium[5] + datasets['current_session']['m_report_created_vs_resolved_id'] = service_desk_medium[5] if datasets[SERVICE_DESKS_SMALL]: service_desk_small = random.choice(datasets[SERVICE_DESKS_SMALL]) - datasets['small_project_id'] = service_desk_small[1] - datasets['small_project_key'] = service_desk_small[2] - datasets['all_open_queue_id_small'] = service_desk_small[4] + datasets['current_session']['small_project_id'] = service_desk_small[1] + datasets['current_session']['small_project_key'] = service_desk_small[2] + datasets['current_session']['all_open_queue_id_small'] = service_desk_small[4] # Small projects reports - datasets['s_report_created_vs_resolved_id'] = service_desk_small[5] + datasets['current_session']['s_report_created_vs_resolved_id'] = service_desk_small[5] # Prepare random project key service_desk_random = random.choice(datasets[SERVICE_DESKS_SMALL] + datasets[SERVICE_DESKS_MEDIUM] + datasets[SERVICE_DESKS_LARGE]) - datasets['random_project_key'] = service_desk_random[2] + datasets['current_session']['random_project_key'] = service_desk_random[2] # Define users dataset - datasets['agent_username'] = agent[0] - datasets['agent_password'] = agent[1] + datasets['current_session']['agent_username'] = agent[0] + datasets['current_session']['agent_password'] = agent[1] # Define request dataset - datasets['request_id'] = request[0] - datasets['request_key'] = request[1] + datasets['current_session']['request_id'] = request[0] + datasets['current_session']['request_key'] = request[1] if CUSTOM_ISSUES in datasets: if len(datasets[CUSTOM_ISSUES]) > 0: @@ -68,9 +69,15 @@ def setup_run_data(datasets): if JSM_SETTINGS.insight: schema_id = random.choice(datasets[INSIGHT_SCHEMAS]) - datasets['schema_id'] = schema_id[0] + datasets['current_session']['schema_id'] = schema_id[0] insight_issues = random.choice(datasets[INSIGHT_ISSUES]) - datasets['issue_key'] = insight_issues[0] + datasets['current_session']['issue_key'] = insight_issues[0] + + +def generate_debug_session_info(webdriver, datasets): + debug_data = datasets['current_session'] + debug_data['current_url'] = webdriver.current_url + return debug_data def login(webdriver, datasets): @@ -79,16 +86,18 @@ def login(webdriver, datasets): @print_timing("selenium_agent_login") def measure(): login_page = Login(webdriver) + webdriver.base_url = login_page.base_url + webdriver.debug_info = generate_debug_session_info(webdriver, datasets) @print_timing("selenium_agent_login:open_login_page") def sub_measure(): login_page.go_to() - sub_measure() @print_timing("selenium_agent_login:login_and_view_dashboard") def sub_measure(): - login_page.set_credentials(username=datasets['agent_username'], password=datasets['agent_password']) + login_page.set_credentials(username=datasets['current_session']['agent_username'], + password=datasets['current_session']['agent_password']) if login_page.is_first_login(): login_page.first_login_setup() if login_page.is_first_login_second_page(): @@ -99,12 +108,17 @@ def sub_measure(): sub_measure() + current_session_response = login_page.rest_api_get(url=f'{webdriver.base_url}/rest/auth/latest/session') + if 'name' in current_session_response: + actual_username = current_session_response['name'] + assert actual_username == datasets['current_session']['agent_username'] measure() PopupManager(webdriver).dismiss_default_popup() def view_report_workload_medium(webdriver, datasets): - workload_report = Report.view_workload_report(webdriver, project_key=datasets['medium_project_key']) + workload_report = Report.view_workload_report(webdriver, + project_key=datasets['current_session']['medium_project_key']) @print_timing('selenium_agent_view_report_workload_medium') def measure(): @@ -118,8 +132,8 @@ def measure(): def view_report_created_vs_resolved_medium(webdriver, datasets): created_vs_resolved = Report.view_created_vs_resolved_report( webdriver, - project_key=datasets['medium_project_key'], - created_vs_resolved_report_id=datasets['m_report_created_vs_resolved_id'] + project_key=datasets['current_session']['medium_project_key'], + created_vs_resolved_report_id=datasets['current_session']['m_report_created_vs_resolved_id'] ) @print_timing('selenium_agent_view_report_created_vs_resolved_medium') @@ -132,7 +146,8 @@ def measure(): def view_report_workload_small(webdriver, datasets): - workload_report = Report.view_workload_report(webdriver, project_key=datasets['small_project_key']) + workload_report = Report.view_workload_report(webdriver, + project_key=datasets['current_session']['small_project_key']) @print_timing('selenium_agent_view_report_workload_small') def measure(): @@ -145,8 +160,8 @@ def measure(): def view_report_created_vs_resolved_small(webdriver, datasets): created_vs_resolved = Report.view_created_vs_resolved_report( - webdriver, project_key=datasets['small_project_key'], - created_vs_resolved_report_id=datasets['s_report_created_vs_resolved_id'] + webdriver, project_key=datasets['current_session']['small_project_key'], + created_vs_resolved_report_id=datasets['current_session']['s_report_created_vs_resolved_id'] ) @print_timing('selenium_agent_view_report_created_vs_resolved_small') @@ -184,7 +199,7 @@ def measure(): def view_customers(webdriver, datasets): - browse_customers_page = BrowseCustomers(webdriver, project_key=datasets['random_project_key']) + browse_customers_page = BrowseCustomers(webdriver, project_key=datasets['current_session']['random_project_key']) @print_timing('selenium_agent_view_customers') def measure(): @@ -196,7 +211,7 @@ def measure(): def view_request(webdriver, datasets): - customer_request_page = ViewCustomerRequest(webdriver, request_key=datasets['request_key']) + customer_request_page = ViewCustomerRequest(webdriver, request_key=datasets['current_session']['request_key']) @print_timing('selenium_agent_view_request') def measure(): @@ -208,7 +223,7 @@ def measure(): def add_comment(webdriver, datasets): - customer_request_page = ViewCustomerRequest(webdriver, request_key=datasets['request_key']) + customer_request_page = ViewCustomerRequest(webdriver, request_key=datasets['current_session']['request_key']) @print_timing('selenium_agent_add_comment') def measure(): @@ -227,15 +242,15 @@ def sub_measure(): def view_queues_medium(webdriver, datasets): - browse_queues_page = ViewQueue(webdriver, project_key=datasets['medium_project_key'], - queue_id=datasets['all_open_queue_id_medium']) + browse_queues_page = ViewQueue(webdriver, project_key=datasets['current_session']['medium_project_key'], + queue_id=datasets['current_session']['all_open_queue_id_medium']) view_queues_form_diff_projects_size(browse_queues_page, project_size='large') PopupManager(webdriver).dismiss_default_popup() def view_queues_small(webdriver, datasets): - browse_queues_page = ViewQueue(webdriver, project_key=datasets['small_project_key'], - queue_id=datasets['all_open_queue_id_small']) + browse_queues_page = ViewQueue(webdriver, project_key=datasets['current_session']['small_project_key'], + queue_id=datasets['current_session']['all_open_queue_id_small']) view_queues_form_diff_projects_size(browse_queues_page, project_size='small') PopupManager(webdriver).dismiss_default_popup() @@ -246,7 +261,8 @@ def insight_main_page(webdriver, datasets): @print_timing("selenium_agent_insight_view_main_page") def measure(): view_insight_main_page.go_to() - view_insight_main_page.submit_login(username=datasets['agent_username'], password=datasets['agent_password']) + view_insight_main_page.submit_login(username=datasets['current_session']['agent_username'], + password=datasets['current_session']['agent_password']) measure() PopupManager(webdriver).dismiss_default_popup() @@ -260,7 +276,7 @@ def measure(): insight_create_schema_page.go_to() insight_create_schema_page.wait_for_page_loaded() PopupManager(webdriver).dismiss_default_popup() - datasets['schema_name'] = insight_create_schema_page.create_new_schema() + datasets['current_session']['schema_name'] = insight_create_schema_page.create_new_schema() measure() @@ -271,7 +287,7 @@ def insight_create_new_object(webdriver, datasets): @print_timing('selenium_agent_insight_create_new_object') def measure(): insight_new_object_page.wait_for_page_loaded() - insight_new_object_page.go_to_new_schema(datasets['schema_name']) + insight_new_object_page.go_to_new_schema(datasets['current_session']['schema_name']) insight_new_object_page.insight_create_new_objects() measure() @@ -286,13 +302,14 @@ def measure(): insight_delete_schema_page.go_to() insight_delete_schema_page.wait_for_page_loaded() PopupManager(webdriver).dismiss_default_popup() - insight_delete_schema_page.delete_new_schema(datasets['schema_name']) + insight_delete_schema_page.delete_new_schema(datasets['current_session']['schema_name']) measure() def insight_view_queue_insight_column(webdriver, datasets): - insight_random_queue_page = InsightViewQueue(webdriver, project_key=datasets['random_project_key']) + insight_random_queue_page = InsightViewQueue(webdriver, + project_key=datasets['current_session']['random_project_key']) @print_timing('selenium_agent_insight_view_queue_with_insight_column') def measure(): @@ -305,7 +322,7 @@ def measure(): def insight_search_object_by_iql(webdriver, datasets): - search_object_by_iql_page = InsightSearchByIql(webdriver, schema_id=datasets['schema_id']) + search_object_by_iql_page = InsightSearchByIql(webdriver, schema_id=datasets['current_session']['schema_id']) @print_timing('selenium_agent_insight_search_object_by_iql') def measure(): @@ -318,7 +335,8 @@ def measure(): def view_issue_with_insight_objects(webdriver, datasets): - view_issue_with_objects_page = ViewIssueWithObject(webdriver, insight_issues=datasets["issue_key"]) + view_issue_with_objects_page = ViewIssueWithObject(webdriver, + insight_issues=datasets['current_session']["issue_key"]) @print_timing('selenium_agent_insight_view_issue_with_objects') def measure(): diff --git a/app/selenium_ui/jsm/modules_customers.py b/app/selenium_ui/jsm/modules_customers.py index 756e6d422..9789ab86f 100644 --- a/app/selenium_ui/jsm/modules_customers.py +++ b/app/selenium_ui/jsm/modules_customers.py @@ -14,7 +14,8 @@ def __get_random_customer_request(customer): customer_requests = customer[2:] - customer_requests_chunks = [customer_requests[x:x+3] for x in range(0, len(customer_requests), 3)] + customer_requests_chunks = [customer_requests[x:x + 3] + for x in range(0, len(customer_requests), 3)] customer_request = random.choice(customer_requests_chunks) service_desk_id = customer_request[0] request_id = customer_request[1] @@ -25,21 +26,22 @@ def __get_random_customer_request(customer): def setup_run_data(datasets): customer = random.choice(datasets[CUSTOMERS]) request = random.choice(datasets[REQUESTS]) + datasets['current_session'] = {} # Define users dataset - datasets['customer_username'] = customer[0] - datasets['customer_password'] = customer[1] + datasets['current_session']['customer_username'] = customer[0] + datasets['current_session']['customer_password'] = customer[1] customer_request = __get_random_customer_request(customer) - datasets['customer_service_desk_id'] = customer_request[0] - datasets['customer_request_id'] = customer_request[1] - datasets['customer_request_key'] = customer_request[2] + datasets['current_session']['customer_service_desk_id'] = customer_request[0] + datasets['current_session']['customer_request_id'] = customer_request[1] + datasets['current_session']['customer_request_key'] = customer_request[2] # Define request dataset - datasets['request_id'] = request[0] - datasets['request_key'] = request[1] - datasets['service_desk_id'] = request[2] - datasets['project_id'] = request[3] - datasets['project_key'] = request[4] + datasets['current_session']['request_id'] = request[0] + datasets['current_session']['request_key'] = request[1] + datasets['current_session']['service_desk_id'] = request[2] + datasets['current_session']['project_id'] = request[3] + datasets['current_session']['project_key'] = request[4] if CUSTOM_ISSUES in datasets: if len(datasets[CUSTOM_ISSUES]) > 0: @@ -49,6 +51,12 @@ def setup_run_data(datasets): datasets['custom_service_desk_id'] = custom_issue[3] +def generate_debug_session_info(webdriver, datasets): + debug_data = datasets['current_session'] + debug_data['current_url'] = webdriver.current_url + return debug_data + + def login(webdriver, datasets): setup_run_data(datasets) @@ -56,6 +64,8 @@ def login(webdriver, datasets): def measure(): login_page = Login(webdriver) customer_portals = CustomerPortals(webdriver) + webdriver.base_url = login_page.base_url + webdriver.debug_info = generate_debug_session_info(webdriver, datasets) @print_timing("selenium_customer_login:open_login_page") def sub_measure(): @@ -69,15 +79,24 @@ def sub_measure(): @print_timing("selenium_customer_login:login_and_view_portal") def sub_measure(): - login_page.set_credentials(username=datasets['customer_username'], password=datasets['customer_password']) + login_page.set_credentials( + username=datasets['current_session']['customer_username'], + password=datasets['current_session']['customer_password']) customer_portals.wait_for_page_loaded() sub_measure() + + current_session_response = login_page.rest_api_get( + url=f'{webdriver.base_url}/rest/auth/latest/session') + if 'name' in current_session_response: + actual_username = current_session_response['name'] + assert actual_username == datasets['current_session']['customer_username'] measure() def create_request(webdriver, datasets): customer_portals = CustomerPortals(webdriver) - customer_portal = CustomerPortal(webdriver, portal_id=datasets['customer_service_desk_id']) + customer_portal = CustomerPortal( + webdriver, portal_id=datasets['current_session']['customer_service_desk_id']) @print_timing("selenium_customer_create_request") def measure(): @@ -106,8 +125,10 @@ def sub_measure(): def view_request(webdriver, datasets): - customer_request = CustomerRequest(webdriver, portal_id=datasets['customer_service_desk_id'], - request_key=datasets['customer_request_key']) + customer_request = CustomerRequest( + webdriver, + portal_id=datasets['current_session']['customer_service_desk_id'], + request_key=datasets['current_session']['customer_request_key']) @print_timing("selenium_customer_view_request") def measure(): @@ -137,8 +158,10 @@ def measure(): def add_comment(webdriver, datasets): - customer_request = CustomerRequest(webdriver, portal_id=datasets['customer_service_desk_id'], - request_key=datasets['customer_request_key']) + customer_request = CustomerRequest( + webdriver, + portal_id=datasets['current_session']['customer_service_desk_id'], + request_key=datasets['current_session']['customer_request_key']) @print_timing("selenium_customer_add_comment") def measure(): @@ -149,8 +172,10 @@ def measure(): def share_request_with_customer(webdriver, datasets): - customer_request = CustomerRequest(webdriver, portal_id=datasets['customer_service_desk_id'], - request_key=datasets['customer_request_key']) + customer_request = CustomerRequest( + webdriver, + portal_id=datasets['current_session']['customer_service_desk_id'], + request_key=datasets['current_session']['customer_request_key']) customer_request.go_to() customer_request.wait_for_page_loaded() @@ -160,9 +185,11 @@ def measure(): @print_timing("selenium_customer_share_request_with_customer:search_for_customer_to_share_with") def sub_measure(): if webdriver.app_version >= version.parse('5.12'): - customer_request.search_for_customer_to_share_with_react_ui(customer_name='performance_customer') + customer_request.search_for_customer_to_share_with_react_ui( + customer_name='performance_customer') else: - customer_request.search_for_customer_to_share_with(customer_name='performance_customer') + customer_request.search_for_customer_to_share_with( + customer_name='performance_customer') sub_measure() @print_timing("selenium_customer_share_request:share_request_with_customer") @@ -176,7 +203,8 @@ def sub_measure(): def view_request_with_insight(webdriver, datasets): - view_request_with_insight_field = ViewRequestWithInsight(webdriver, portal_id=datasets['customer_service_desk_id']) + view_request_with_insight_field = ViewRequestWithInsight( + webdriver, portal_id=datasets['current_session']['customer_service_desk_id']) @print_timing("selenium_customer_insight_view_request_with_insight_field") def measure(): diff --git a/app/selenium_ui/jsm/pages/agent_pages.py b/app/selenium_ui/jsm/pages/agent_pages.py index 5e1c8b277..cc6b94b0c 100644 --- a/app/selenium_ui/jsm/pages/agent_pages.py +++ b/app/selenium_ui/jsm/pages/agent_pages.py @@ -16,6 +16,7 @@ def dismiss_default_popup(self): class Login(BasePage): page_url = LoginPageLocators.login_url + base_url = UrlManager().host page_loaded_selector = LoginPageLocators.system_dashboard def is_first_login(self): diff --git a/app/selenium_ui/jsm/pages/customer_pages.py b/app/selenium_ui/jsm/pages/customer_pages.py index faf9806dc..3fdd8ecc1 100644 --- a/app/selenium_ui/jsm/pages/customer_pages.py +++ b/app/selenium_ui/jsm/pages/customer_pages.py @@ -11,6 +11,7 @@ class Login(BasePage): page_url = LoginPageLocators.login_url + base_url = UrlManager().host page_loaded_selector = LoginPageLocators.login_submit_button def set_credentials(self, username, password): diff --git a/app/util/analytics/analytics_utils.py b/app/util/analytics/analytics_utils.py index 842d98d5d..a73b67030 100644 --- a/app/util/analytics/analytics_utils.py +++ b/app/util/analytics/analytics_utils.py @@ -152,6 +152,7 @@ def generate_report_summary(collector): pretty_report = map(lambda x: format_string_summary_report(x, offset_1st), summary_report) write_to_file(pretty_report, summary_report_file) + print(f"Results summary log file created: {summary_report_file}") def get_os(): diff --git a/app/util/analytics/application_info.py b/app/util/analytics/application_info.py index 13de26957..ae9a6bcae 100644 --- a/app/util/analytics/application_info.py +++ b/app/util/analytics/application_info.py @@ -1,6 +1,7 @@ from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, CROWD_SETTINGS, \ BAMBOO_SETTINGS from util.api.jira_clients import JiraRestClient +from util.api.jsm_clients import JsmRestClient from util.api.confluence_clients import ConfluenceRestClient from util.api.bitbucket_clients import BitbucketRestClient from util.api.crowd_clients import CrowdRestClient @@ -230,9 +231,9 @@ def application(self): return Bitbucket(api_client=BitbucketRestClient, config_yml=BITBUCKET_SETTINGS) if self.application_type == JSM: if JSM_SETTINGS.insight: - return Insight(api_client=JiraRestClient, config_yml=JSM_SETTINGS) + return Insight(api_client=JsmRestClient, config_yml=JSM_SETTINGS) else: - return Jsm(api_client=JiraRestClient, config_yml=JSM_SETTINGS) + return Jsm(api_client=JsmRestClient, config_yml=JSM_SETTINGS) if self.application_type == CROWD: return Crowd(api_client=CrowdRestClient, config_yml=CROWD_SETTINGS) if self.application_type == BAMBOO: diff --git a/app/util/api/confluence_clients.py b/app/util/api/confluence_clients.py index 19457c894..99eef2d6e 100644 --- a/app/util/api/confluence_clients.py +++ b/app/util/api/confluence_clients.py @@ -26,10 +26,10 @@ def get_content(self, start=0, limit=100, type="page", expand="space"): while loop_count > 0: api_url = ( - self.host + f'/rest/api/content/?type={type}' - f'&start={start}' - f'&limit={limit}' - f'&expand={expand}' + self.host + f'/rest/api/content/?type={type}' + f'&start={start}' + f'&limit={limit}' + f'&expand={expand}' ) request = self.get(api_url, "Could not retrieve content") @@ -63,10 +63,10 @@ def get_content_search(self, start=0, limit=100, cql=None, expand="space"): while loop_count > 0: api_url = ( - self.host + f'/rest/api/content/search?cql={cql}' - f'&start={start}' - f'&limit={limit}' - f'&expand={expand}' + self.host + f'/rest/api/content/search?cql={cql}' + f'&start={start}' + f'&limit={limit}' + f'&expand={expand}' ) request = self.get(api_url, "Could not retrieve content") @@ -123,14 +123,20 @@ def search(self, cql, cqlcontext=None, expand=None, start=0, limit=500): @retry() def is_remote_api_enabled(self): api_url = f'{self.host}/rpc/xmlrpc' - response = self.get(api_url, error_msg='Confluence Remote API (XML-RPC & SOAP) is disabled. ' - 'For further processing enable Remote API via ' - 'General Configuration - Further Configuration - Remote API') + response = self.get( + api_url, error_msg='Confluence Remote API (XML-RPC & SOAP) is disabled. ' + 'For further processing enable Remote API via ' + 'General Configuration - Further Configuration - Remote API') return response.status_code == 200 def get_confluence_nodes(self): - response = self.get(f'{self.host}/rest/zdu/cluster', error_msg='Could not get Confluence nodes count via API', - expected_status_codes=[200, 403, 500]) + response = self.get( + f'{self.host}/rest/zdu/cluster', + error_msg='Could not get Confluence nodes count via API', + expected_status_codes=[ + 200, + 403, + 500]) if response.status_code == 403 and 'clustered installation' in response.text: return 'Server' nodes = [node['id'] for node in response.json()['nodes']] @@ -144,10 +150,13 @@ def get_available_processors(self): node_id = self.get_confluence_nodes()[0] api_url = f'{self.host}/rest/atlassian-cluster-monitoring/cluster/suppliers/data/com.atlassian.cluster' \ f'.monitoring.cluster-monitoring-plugin/runtime-information/{node_id}' - response = self.get(api_url, "Could not get Available Processors information") - processors = response.json()['data']['rows']['availableProcessors'][1] + response = self.get( + api_url, "Could not get Available Processors information") + processors = response.json( + )['data']['rows']['availableProcessors'][1] except Exception as e: - print(f"Warning: Could not get Available Processors information. Error: {e}") + print( + f"Warning: Could not get Available Processors information. Error: {e}") return 'N/A' return processors @@ -158,15 +167,19 @@ def get_total_pages_count(self): def get_collaborative_editing_status(self): api_url = f'{self.host}/rest/synchrony-interop/status' - response = self.get(api_url, error_msg='Could not get collaborative editing status') + response = self.get( + api_url, error_msg='Could not get collaborative editing status') return response.json() def get_locale(self): language = None - page = self.get(f"{self.host}/index.action#all-updates", "Could not get page content.") + page = self.get( + f"{self.host}/index.action#all-updates", + "Could not get page content.") tree = html.fromstring(page.content) try: - language = tree.xpath('.//meta[@name="ajs-user-locale"]/@content')[0] + language = tree.xpath( + './/meta[@name="ajs-user-locale"]/@content')[0] except Exception as error: print(f"Warning: Could not get user locale: {error}") return language @@ -206,6 +219,21 @@ def get_node_ip(self, node_id: str) -> str: else: return "" + def create_user(self, username, password): + create_user_url = f'{self.host}/rest/api/admin/user' + payload = { + "userName": username, + "password": password, + "email": f'{username}@test.com', + "notifyViaEmail": False, + "fullName": username.capitalize() + } + r = self.post( + url=create_user_url, + body=payload, + error_msg='ERROR: Could not create user') + return r.json() + class ConfluenceRpcClient(Client): @@ -229,6 +257,10 @@ def create_user(self, username=None, password=None): } proxy.confluence2.addUser(token, user_definition, password) user_definition['password'] = password - return {'user': {'username': user_definition["name"], 'email': user_definition["email"]}} + return { + 'user': { + 'username': user_definition["name"], + 'email': user_definition["email"]}} else: - raise Exception(f"Can't create user {username}: user already exists.") + raise Exception( + f"Can't create user {username}: user already exists.") diff --git a/app/util/api/jira_clients.py b/app/util/api/jira_clients.py index 649f42d4a..71a069aa4 100644 --- a/app/util/api/jira_clients.py +++ b/app/util/api/jira_clients.py @@ -80,6 +80,8 @@ def get_users(self, username='.', start_at=0, max_results=1000, include_active=T return users_list + + @retry() def issues_search(self, jql='order by key', start_at=0, max_results=1000, fields=None): """ Searches for issues using JQL. @@ -233,11 +235,6 @@ def get_user_permissions(self): app_properties = self.get(api_url, "Could not retrieve user permissions") return app_properties.json() - def get_service_desk_info(self): - api_url = f'{self.host}/rest/plugins/applications/1.0/installed/jira-servicedesk' - service_desk_info = self.get(api_url, "Could not retrieve JSM info", headers=JSM_EXPERIMENTAL_HEADERS) - return service_desk_info.json() - def get_deployment_type(self): html_pattern = 'com.atlassian.dcapt.deployment=terraform' jira_system_page = self.get_system_info_page() diff --git a/app/util/api/jsm_clients.py b/app/util/api/jsm_clients.py index 5299a58e0..873089ed8 100644 --- a/app/util/api/jsm_clients.py +++ b/app/util/api/jsm_clients.py @@ -1,11 +1,11 @@ from util.api.abstract_clients import JSM_EXPERIMENTAL_HEADERS -from util.api.abstract_clients import RestClient +from util.api.jira_clients import JiraRestClient from selenium_ui.conftest import retry BATCH_SIZE_USERS = 1000 -class JsmRestClient(RestClient): +class JsmRestClient(JiraRestClient): def get_agent(self, username='.', start_at=0, max_results=1000, include_active=True, include_inactive=False): """ @@ -368,6 +368,11 @@ def get_all_schemas(self): objectschemas = [] api_url = self.host + "/rest/insight/1.0/objectschema/list?" r = self.get(api_url, - f"Could not get objectSchemas id").json() + "Could not get objectSchemas id").json() objectschemas.extend(r['objectschemas']) return objectschemas + + def get_service_desk_info(self): + api_url = f'{self.host}/rest/servicedeskapi/info' + service_desk_info = self.get(api_url, "Could not retrieve JSM info", headers=JSM_EXPERIMENTAL_HEADERS) + return service_desk_info.json() diff --git a/app/util/bamboo/bamboo_dataset_generator/pom.xml b/app/util/bamboo/bamboo_dataset_generator/pom.xml index 29a72b122..32b0988b6 100644 --- a/app/util/bamboo/bamboo_dataset_generator/pom.xml +++ b/app/util/bamboo/bamboo_dataset_generator/pom.xml @@ -5,7 +5,7 @@ com.atlassian.bamboo bamboo-specs-parent - 9.2.9 + 9.2.11 @@ -18,7 +18,7 @@ org.codehaus.mojo exec-maven-plugin - 3.1.1 + 3.2.0 @@ -77,28 +77,37 @@ com.jayway.jsonpath json-path - 2.7.0 + 2.9.0 + + + net.minidev + json-smart + 2.5.0 + + + org.slf4j + slf4j-api + 1.7.36 commons-codec commons-codec - 1.16.0 + 1.16.1 - org.apache.logging.log4j log4j-api - 2.22.1 + 2.23.1 org.apache.logging.log4j log4j-core - 2.22.1 + 2.23.1 org.apache.logging.log4j log4j-slf4j-impl - 2.22.1 + 2.23.1 diff --git a/app/util/common_util.py b/app/util/common_util.py index 07c5cd48c..6d3f9c39a 100644 --- a/app/util/common_util.py +++ b/app/util/common_util.py @@ -22,7 +22,8 @@ def get_latest_version(supported=True): r.raise_for_status() conf = r.text.splitlines() version_line = next((line for line in conf if VERSION_STR in line)) - latest_version_str = version_line.split('=')[1].replace("'", "").replace('"', "").strip() + latest_version_str = version_line.split( + '=')[1].replace("'", "").replace('"', "").strip() latest_version = version.parse(latest_version_str) return latest_version except requests.exceptions.RequestException as e: @@ -69,3 +70,19 @@ def wrapper(*args, **kwargs): return wrapper return deco_wrapper + + +def webdriver_pretty_debug(webdriver, additional_field): + debug_message = {} + for key, value in additional_field.items(): + debug_message[key] = value + + if 'debug_info' in dir(webdriver): + webdriver.debug_info['current_url'] = webdriver.current_url + webdriver.debug_info['session_id'] = webdriver.session_id + debug_message.update(webdriver.debug_info) + list_to_print = '\n'.join( + [f'{key}: {value}' for key, value in debug_message.items()]) + pretty_formatted_string = f"""=============== WEBDRIVER DEBUG INFORMATION ===============""" + \ + f'\n{list_to_print}' + """\n===========================================================\n""" + return pretty_formatted_string diff --git a/app/util/conf.py b/app/util/conf.py index d2477cbe2..023cca033 100644 --- a/app/util/conf.py +++ b/app/util/conf.py @@ -2,8 +2,8 @@ from util.project_paths import JIRA_YML, CONFLUENCE_YML, BITBUCKET_YML, JSM_YML, CROWD_YML, BAMBOO_YML -TOOLKIT_VERSION = '8.0.0' -UNSUPPORTED_VERSION = '7.3.0' +TOOLKIT_VERSION = '8.1.0' +UNSUPPORTED_VERSION = '7.4.1' def read_yml_file(file): @@ -14,9 +14,9 @@ def read_yml_file(file): class BaseAppSettings: def __init__(self, config_yml): - obj = read_yml_file(config_yml) - self.settings = obj['settings'] - self.env_settings = obj['settings']['env'] + self.obj = read_yml_file(config_yml) + self.settings = self.obj['settings'] + self.env_settings = self.obj['settings']['env'] self.hostname = self.get_property('application_hostname') self.protocol = self.get_property('application_protocol') self.port = self.get_property('application_port') @@ -28,6 +28,8 @@ def __init__(self, config_yml): self.load_executor = self.get_property('load_executor') self.secure = self.get_property('secure') self.environment_compliance_check = self.get_property('environment_compliance_check') + self.chromedriver_version = ( + self.obj.get('modules', {}).get('selenium', {}).get('chromedriver', {}).get('version', None)) @property def server_url(self): diff --git a/app/util/data_preparation/confluence_prepare_data.py b/app/util/data_preparation/confluence_prepare_data.py index 02de03902..7b926f7e5 100644 --- a/app/util/data_preparation/confluence_prepare_data.py +++ b/app/util/data_preparation/confluence_prepare_data.py @@ -1,11 +1,13 @@ import random +from packaging import version from multiprocessing.pool import ThreadPool -from prepare_data_common import __generate_random_string, __write_to_file, __warnings_filter +from prepare_data_common import __generate_random_string, __write_to_file, __warnings_filter, __read_file from util.api.confluence_clients import ConfluenceRpcClient, ConfluenceRestClient from util.common_util import print_timing from util.conf import CONFLUENCE_SETTINGS -from util.project_paths import CONFLUENCE_USERS, CONFLUENCE_PAGES, CONFLUENCE_BLOGS, CONFLUENCE_CUSTOM_PAGES +from util.project_paths import (CONFLUENCE_USERS, CONFLUENCE_PAGES, CONFLUENCE_BLOGS, CONFLUENCE_CQLS, + CONFLUENCE_CUSTOM_PAGES, CONFLUENCE_WORDS) __warnings_filter() @@ -13,9 +15,24 @@ PAGES = "pages" CUSTOM_PAGES = "custom_pages" BLOGS = "blogs" +CQLS = "cqls" DEFAULT_USER_PREFIX = 'performance_' DEFAULT_USER_PASSWORD = 'password' ERROR_LIMIT = 10 +CQL_WORDS_COUNT = 3 + +PAGE_CQL = ('type=page' + ' and title !~ JMeter' # filter out pages created by JMeter + ' and title !~ Selenium' # filter out pages created by Selenium + ' and title !~ locust' # filter out pages created by locust + ' and title !~ Home' # filter out space Home pages + ) + +BLOG_CQL = ('type=blogpost' + ' and title !~ Performance' # filter out blogs with Performance in title + ) + + DATASET_PAGES_TEMPLATES = {'big_attachments_1': ['PAGE_1', 'PAGE_2'], 'small_attachments_3': ['PAGE_3', 'PAGE_4', 'PAGE_5', 'PAGE_6'], 'small_text_7': ['PAGE_7', 'PAGE_8', 'PAGE_9', 'PAGE_10', 'PAGE_11', @@ -25,8 +42,8 @@ 'text_formatting_21': ['PAGE_21', 'PAGE_22', 'PAGE_25', 'PAGE_26', 'PAGE_27', 'PAGE_28', 'PAGE_29', 'PAGE_30'] } -DATASET_BLOGS_TEMPLATES = {1: ['BLOG_1'], #, 'BLOG_2'], # TODO Investigate how to group similar blogs - 3: ['BLOG_3'], #'BLOG_4', 'BLOG_5'], +DATASET_BLOGS_TEMPLATES = {1: ['BLOG_1'], # , 'BLOG_2'], # TODO Investigate how to group similar blogs + 3: ['BLOG_3'], # 'BLOG_4', 'BLOG_5'], 6: ['BLOG_6'] # 'BLOG_7', 'BLOG_8', 'BLOG_9', 'BLOG_10'] } @@ -45,7 +62,9 @@ def __create_data_set(rest_client, rpc_client): pool = ThreadPool(processes=2) - dcapt_dataset = bool(perf_user_api.search(limit=1, cql='type=page and text ~ PAGE_1')) + dcapt_dataset = (len(perf_user_api.search(limit=5, cql='type=page and text ~ PAGE_7')) + + len(perf_user_api.search(limit=5, cql='type=blogpost and text ~ BLOG_7')) == 10) + print(f"DCAPT dataset: {dcapt_dataset}") async_pages = pool.apply_async(__get_pages, (perf_user_api, 5000, dcapt_dataset)) async_blogs = pool.apply_async(__get_blogs, (perf_user_api, 5000, dcapt_dataset)) @@ -55,6 +74,8 @@ def __create_data_set(rest_client, rpc_client): dataset[PAGES] = async_pages.get() dataset[BLOGS] = async_blogs.get() + dataset[CQLS] = __generate_cqls(words_count=CQL_WORDS_COUNT) + dataset[CUSTOM_PAGES] = __get_custom_pages(perf_user_api, 5000, CONFLUENCE_SETTINGS.custom_dataset_query) print(f'Users count: {len(dataset[USERS])}') print(f'Pages count: {len(dataset[PAGES])}') @@ -66,6 +87,12 @@ def __create_data_set(rest_client, rpc_client): @print_timing('Getting users') def __get_users(confluence_api, rpc_api, count): + # TODO Remove RPC Client after Confluence 7.X.X. EOL + confluence_version = confluence_api.get_confluence_version().split('-')[0] + if version.parse(confluence_version) > version.parse('8.5'): + create_user = confluence_api.create_user + else: + create_user = rpc_api.create_user errors_count = 0 cur_perf_users = confluence_api.get_users(DEFAULT_USER_PREFIX, count) if len(cur_perf_users) >= count: @@ -77,10 +104,10 @@ def __get_users(confluence_api, rpc_api, count): f'Please check the errors in bzt.log') username = f"{DEFAULT_USER_PREFIX}{__generate_random_string(10)}" try: - user = rpc_api.create_user(username=username, password=DEFAULT_USER_PASSWORD) - print(f"User {user['user']['username']} is created, number of users to create is " + create_user(username=username, password=DEFAULT_USER_PASSWORD) + print(f"User {username} is created, number of users to create is " f"{count - len(cur_perf_users)}") - cur_perf_users.append(user) + cur_perf_users.append({'user': {'username': username}}) # To avoid rate limit error from server. Execution should not be stopped after catch error from server. except Exception as error: print(f"Warning: Create confluence user error: {error}. Retry limits {errors_count}/{ERROR_LIMIT}") @@ -100,23 +127,14 @@ def __get_pages(confluence_api, count, dcapt_dataset): for template_id, pages_marks in DATASET_PAGES_TEMPLATES.items(): for mark in pages_marks: pages = confluence_api.get_content_search( - 0, pages_per_template, cql='type=page' - ' and title !~ JMeter' # filter out pages created by JMeter - ' and title !~ Selenium' # filter out pages created by Selenium - ' and title !~ locust' # filter out pages created by locust - ' and title !~ Home' # filter out space Home pages - f' and text ~ {mark}') + 0, pages_per_template, cql=PAGE_CQL + f' and text ~ {mark}') for page in pages: page['template_id'] = template_id total_pages.extend(pages) else: total_pages = confluence_api.get_content_search( - 0, count, cql='type=page' - ' and title !~ JMeter' # filter out pages created by JMeter - ' and title !~ Selenium' # filter out pages created by Selenium - ' and title !~ locust' # filter out pages created by locust - ' and title !~ Home') # filter out space Home pages + 0, count, cql=PAGE_CQL) for page in total_pages: page['template_id'] = DEFAULT_TEMPLATE_ID if not total_pages: @@ -137,6 +155,17 @@ def __get_custom_pages(confluence_api, count, cql): return pages +@print_timing('Generate CQLs') +def __generate_cqls(words_count, total=5000): + cqls = [] + words = __read_file(CONFLUENCE_WORDS) + for i in range(total): + random_words = random.sample(words, words_count) + cql = ' '.join(random_words) + cqls.append(cql) + return cqls + + @print_timing('Getting blogs') def __get_blogs(confluence_api, count, dcapt_dataset): blogs_templates = [i for sublist in DATASET_BLOGS_TEMPLATES.values() for i in sublist] @@ -148,16 +177,13 @@ def __get_blogs(confluence_api, count, dcapt_dataset): for template_id, blogs_marks in DATASET_BLOGS_TEMPLATES.items(): for mark in blogs_marks: blogs = confluence_api.get_content_search( - 0, blogs_per_template, cql='type=blogpost' - ' and title !~ Performance' - f' and text ~ {mark}') + 0, blogs_per_template, cql=BLOG_CQL + f' and text ~ {mark}') for blog in blogs: blog['template_id'] = template_id total_blogs.extend(blogs) else: total_blogs = confluence_api.get_content_search( - 0, count, cql='type=blogpost' - ' and title !~ Performance') + 0, count, cql=BLOG_CQL) for blog in total_blogs: blog['template_id'] = DEFAULT_TEMPLATE_ID @@ -183,6 +209,8 @@ def write_test_data_to_files(dataset): users = [f"{user['user']['username']},{DEFAULT_USER_PASSWORD}" for user in dataset[USERS]] __write_to_file(CONFLUENCE_USERS, users) + __write_to_file(CONFLUENCE_CQLS, dataset[CQLS]) + custom_pages = [f"{page['id']},{page['space']['key']}" for page in dataset[CUSTOM_PAGES]] __write_to_file(CONFLUENCE_CUSTOM_PAGES, custom_pages) diff --git a/app/util/data_preparation/prepare_data_common.py b/app/util/data_preparation/prepare_data_common.py index 577dd00ab..279068f84 100644 --- a/app/util/data_preparation/prepare_data_common.py +++ b/app/util/data_preparation/prepare_data_common.py @@ -18,3 +18,9 @@ def __write_to_file(file_path, items): with open(file_path, 'w') as f: for item in items: f.write(f"{item}\n") + + +def __read_file(file): + with file.open('r') as f: + lines = f.read().splitlines() + return lines diff --git a/app/util/default_test_actions.json b/app/util/default_test_actions.json index 85d26504c..13aa26414 100644 --- a/app/util/default_test_actions.json +++ b/app/util/default_test_actions.json @@ -79,6 +79,7 @@ "selenium_create_comment:write_comment", "selenium_create_comment:save_comment", "selenium_create_comment", + "selenium_cql_search", "selenium_log_out" ], "jmeter": [ diff --git a/app/util/k8s/README.MD b/app/util/k8s/README.MD index f612d687a..20cd882e4 100644 --- a/app/util/k8s/README.MD +++ b/app/util/k8s/README.MD @@ -1,58 +1,87 @@ +# Table of content: +- [Development environment](#development-environment) + - [Create development environment](#create-development-environment) + - [Terminate development environment](#terminate-development-environment) +- [Enterprise-scale environment](#enterprise-scale-environment) + - [Create enterprise-scale environment](#create-enterprise-scale-environment) + - [Terminate enterprise-scale environment](#terminate-enterprise-scale-environment) +- [Collect detailed k8s logs](#collect-detailed-k8s-logs) +- [Force terminate cluster](#force-terminate-cluster) +- [Connect to a product pod](#connect-to-a-product-pod) +- [Connect to the execution environment pod](#connect-to-the-execution-environment-pod) +- [Connect to the RDS database](#connect-to-the-rds-database) +- [Enable detailed resources monitoring](#enable-detailed-resources-monitoring) +- [Rebuild atlassian/dcapt docker image on the fly](#rebuild-atlassiandcapt-docker-image-on-the-fly) +- [Run tests locally from docker container](#run-tests-locally-from-docker-container) +- [Run tests from execution environment pod](#run-tests-from-execution-environment-pod) + # Development environment -## Create development environment -* set AWS credential in [aws_envs](./aws_envs) file -* set correct values in [dcapt-small.tfvars](./dcapt-small.tfvars) file: - * `environment_name` - * `products` - * `license` -* run install development environment command: +### Create development environment +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. set correct values in [dcapt-small.tfvars](./dcapt-small.tfvars) file: + * `environment_name` + * `products` + * `license` +4. Run install development environment command from : ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` -## Terminate development environment +### Terminate development environment Note: install and uninstall commands have to use the same `atlassianlabs/terraform:TAG` image tag. -Set AWS credential in [aws_envs](./aws_envs) file and run command: +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Run command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform:2.7.1 ./uninstall.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.4 ./uninstall.sh -c conf.tfvars ``` # Enterprise-scale environment -## Create enterprise-scale environment -* set AWS credential in [aws_envs](./aws_envs) file -* set correct values in [dcapt.tfvars](./dcapt.tfvars) file: - * `environment_name` - * `products` - * `license` -* run install enterprise-scale environment command: +### Create enterprise-scale environment +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Set correct values in [dcapt.tfvars](./dcapt.tfvars) file: + * `environment_name` + * `products` + * `license` +4. Run install enterprise-scale environment command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` -## Terminate enterprise-scale environment +### Terminate enterprise-scale environment Note: install and uninstall commands have to use the same `atlassianlabs/terraform:TAG` image tag. -Set AWS credential in [aws_envs](./aws_envs) file and run command: +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Run command: ``` bash docker run --pull=always --env-file aws_envs \ -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform:2.7.1 ./uninstall.sh -c conf.tfvars +-it atlassianlabs/terraform:2.7.4 ./uninstall.sh -c conf.tfvars ``` # Collect detailed k8s logs -Set AWS credential in [aws_envs](./aws_envs) file and run command: +Note: On unsuccessful deployment detailed logs generated automatically +in `dc-app-performance-toolkit/app/util/logs/k8s_logs` folder. + +To generate detailed k8s logs: +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Run command: ``` bash export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 @@ -62,11 +91,13 @@ export REGION=us-east-2 docker run --pull=always --env-file aws_envs \ -v "/$PWD/k8s_logs:/data-center-terraform/k8s_logs" \ -v "/$PWD/logs:/data-center-terraform/logs" \ --it atlassianlabs/terraform:2.7.1 ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs +-it atlassianlabs/terraform:2.7.4 ./scripts/collect_k8s_logs.sh atlas-$ENVIRONMENT_NAME-cluster $REGION k8s_logs ``` # Force terminate cluster -Set AWS credential in [aws_envs](./aws_envs) file and run command: +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Run command: ``` bash export ENVIRONMENT_NAME=your_environment_name export REGION=us-east-2 @@ -80,24 +111,134 @@ docker run --pull=always --env-file aws_envs \ atlassian/dcapt terminate_cluster.py --cluster_name atlas-$ENVIRONMENT_NAME-cluster --aws_region $REGION ``` -# Connect to product pod -Set your environment name: -``` bash -export ENVIRONMENT_NAME=your_environment_name -export REGION=us-east-2 -``` +# Connect to a product pod +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Set your environment name: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + export REGION=us-east-2 + ``` +4. SSH to terraform container: + ``` bash + docker run --pull=always --env-file aws_envs \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -e REGION=$REGION \ + -it atlassianlabs/terraform:2.7.4 bash + ``` -SSH to terraform container: -``` bash -docker run --pull=always --env-file aws_envs \ --e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ --e REGION=$REGION \ --it atlassianlabs/terraform:2.7.1 bash -``` +5. Connect to the product pod. Example below for jira pod with number 0. For other product or pod number change `PRODUCT_POD` accordingly. + ``` bash + export PRODUCT_POD=jira-0 + aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION + kubectl exec -it $PRODUCT_POD -n atlassian -- bash + ``` -Connect to the product pod. Example below for jira pod with number 0. For other product or pod number change `PRODUCT_POD` accordingly. -``` bash -export PRODUCT_POD=jira-0 -aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION -kubectl exec -it $PRODUCT_POD -n atlassian -- bash -``` \ No newline at end of file +# Connect to the execution environment pod +1. Navigate to `dc-app-performance-toolkit` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Set your environment name: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + export REGION=us-east-2 + ``` +4. SSH to terraform container: + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -e REGION=$REGION \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -it atlassianlabs/terraform:2.7.4 bash + ``` +5. Copy code base and connect to the execution environment pod: + ``` bash + aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION + exec_pod_name=$(kubectl get pods -n atlassian -l=exec=true --no-headers -o custom-columns=":metadata.name") + kubectl exec -it "$exec_pod_name" -n atlassian -- rm -rf /dc-app-performance-toolkit + kubectl cp --retries 10 dc-app-performance-toolkit atlassian/"$exec_pod_name":/dc-app-performance-toolkit + kubectl exec -it "$exec_pod_name" -n atlassian -- bash + ``` + +# Connect to the RDS database +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Export environment variables for environment name, region and product: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + export REGION=us-east-2 + export PRODUCT=jira + # PRODUCT options: jira/confluence/bitbucket. For jsm use jira as well. + ``` +4. Start and ssh to `atlassianlabs/terraform` docker container: + ``` bash + docker run --pull=always --env-file aws_envs \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -e REGION=$REGION \ + -e PRODUCT=$PRODUCT \ + -v "/$PWD/script-runner.yml:/data-center-terraform/script-runner.yml" \ + -it atlassianlabs/terraform:2.7.4 bash + ``` +5. Run following commands one by one inside docker container: + ``` bash + aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION + kubectl apply -f script-runner.yml + rds_endpoint=$(aws rds --region $REGION describe-db-instances --filters "Name=db-instance-id,Values=atlas-${ENVIRONMENT_NAME}-${PRODUCT}-db" --query "DBInstances[].Endpoint.Address" --output text) + kubectl exec -it script-runner -- psql -h $rds_endpoint -d $PRODUCT -U atl$PRODUCT + ``` +6. Default DB password: `Password1!` + +# Enable detailed resources monitoring +To enable detailed CPU/Memory monitoring and Grafana dashboards for visualisation: +1. Navigate to `dc-app-performance-toolkit/app/util/k8s` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Go to `dcapt.tvars` file -> Monitoring section +4. Uncomment and set to `true` following required variables: `monitoring_enabled` and `monitoring_grafana_expose_lb` +5. Modify if needed other optional variables +6. Do `install.sh` as described in [Create enterprise-scale environment](#create-enterprise-scale-environment) +7. Get Grafana URL: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + export REGION=us-east-2 + ``` + ``` bash + docker run --pull=always --env-file aws_envs \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -e REGION=$REGION \ + -it atlassianlabs/terraform:2.7.4 bash + ``` + ``` bash + aws eks update-kubeconfig --name atlas-$ENVIRONMENT_NAME-cluster --region $REGION + kubectl get svc -n kube-monitoring | grep grafana + ``` +8. Open Grafana URL in the browser. Default Grafana creds: `admin/prom-operator`. +9. Go to Dashboards -> General -> select one of the available dashboards. + +# Rebuild atlassian/dcapt docker image on the fly +In case any changes are needed in `atlassian/dcapt` image: +1. Modify locally `dc-app-performance-toolkit/Dockerfile` file +2. [Run tests from execution environment pod](#run-tests-from-execution-environment-pod) with extra flag in the end `--docker_image_rebuild` + +# Run tests locally from docker container +Note: this option is **not** suitable for full-scale performance runs as local network is a bottleneck. +1. Navigate to `dc-app-performance-toolkit` folder +2. Select needed product and run below command (example below is for jira): + ``` bash + docker run --pull=always --shm-size=4g -v "/$PWD:/dc-app-performance-toolkit" atlassian/dcapt jira.yml + ``` + +# Run tests from execution environment pod +1. Navigate to `dc-app-performance-toolkit` folder +2. Set AWS credential in [aws_envs](./aws_envs) file +3. Set environment name: + ``` bash + export ENVIRONMENT_NAME=your_environment_name + ``` +4. Select needed product and run below command (example below is for jira): + ``` bash + docker run --pull=always --env-file ./app/util/k8s/aws_envs \ + -e REGION=us-east-2 \ + -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ + -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ + -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jira.yml + ``` diff --git a/app/util/k8s/bzt_on_pod.sh b/app/util/k8s/bzt_on_pod.sh index fdca91b70..9c4019afc 100644 --- a/app/util/k8s/bzt_on_pod.sh +++ b/app/util/k8s/bzt_on_pod.sh @@ -16,26 +16,55 @@ fi echo "INFO: AWS REGION: $REGION" if [ $# -eq 0 ]; then - echo "ERROR: No arguments supplied. Product .yml file need to be passed as argument. E.g. jira.yml" + echo "ERROR: No arguments supplied. Product .yml file need to be passed as first argument. E.g. jira.yml" exit 1 fi -echo "INFO: Product .yml: $1" + +if [[ $1 =~ "yml" ]]; then + echo "INFO: Product .yml: $1" +else + echo "ERROR: first argument should be product.yml, e.g. jira.yml" + echo "ERROR: provided first argument: $1" + exit 1 +fi + echo "INFO: Update kubeconfig" aws eks update-kubeconfig --name atlas-"$ENVIRONMENT_NAME"-cluster --region "$REGION" echo "INFO: Get execution environment pod name" exec_pod_name=$(kubectl get pods -n atlassian -l=exec=true --no-headers -o custom-columns=":metadata.name") + +if [[ -z "$exec_pod_name" ]]; then + echo "ERROR: Current cluster does not have execution environment pod. Check what environment type is used. + Development environment does not have execution environment pod by default because dedicated for local app-specific actions development only." +exit 1 +fi + echo "INFO: Execution environment pod name: $exec_pod_name" echo "INFO: Cleanup dc-app-performance-toolkit folder on the exec env pod" kubectl exec -it "$exec_pod_name" -n atlassian -- rm -rf /dc-app-performance-toolkit echo "INFO: Copy latest dc-app-performance-toolkit folder to the exec env pod" -kubectl cp --retries 10 dc-app-performance-toolkit atlassian/"$exec_pod_name":/dc-app-performance-toolkit +start=$(date +%s) +# tar only app folder, exclude results and util/k8s folder +tar -czf dcapt.tar.gz -C dc-app-performance-toolkit --exclude results --exclude util/k8s app Dockerfile requirements.txt +kubectl cp --retries 10 dcapt.tar.gz atlassian/"$exec_pod_name":/dcapt.tar.gz +kubectl exec -it "$exec_pod_name" -n atlassian -- mkdir /dc-app-performance-toolkit +kubectl exec -it "$exec_pod_name" -n atlassian -- tar -xf /dcapt.tar.gz -C /dc-app-performance-toolkit +rm -rf dcapt.tar.gz +end=$(date +%s) +runtime=$((end-start)) +echo "INFO: Copy finished in $runtime seconds" + +if [[ $2 == "--docker_image_rebuild" ]]; then + echo "INFO: Rebuild docker image" + kubectl exec -it "$exec_pod_name" -n atlassian -- docker build -t $DCAPT_DOCKER_IMAGE dc-app-performance-toolkit +fi echo "INFO: Run bzt on the exec env pod" -kubectl exec -it "$exec_pod_name" -n atlassian -- docker run --pull=always --shm-size=4g -v "/dc-app-performance-toolkit:/dc-app-performance-toolkit" $DCAPT_DOCKER_IMAGE "$1" +kubectl exec -it "$exec_pod_name" -n atlassian -- docker run --shm-size=4g -v "/dc-app-performance-toolkit:/dc-app-performance-toolkit" $DCAPT_DOCKER_IMAGE "$1" sleep 10 echo "INFO: Copy results folder from the exec env pod to local" diff --git a/app/util/k8s/dcapt-small.tfvars b/app/util/k8s/dcapt-small.tfvars index 08f058722..d0cfed4e3 100644 --- a/app/util/k8s/dcapt-small.tfvars +++ b/app/util/k8s/dcapt-small.tfvars @@ -29,11 +29,19 @@ jira_license = "jira-license" confluence_license = "confluence-license" bitbucket_license = "bitbucket-license" +# (Optional) Domain name used by the ingress controller. +# The final ingress domain is a subdomain within this domain. (eg.: environment.domain.com) +# You can also provide a subdomain and the final ingress domain will be . +# When commented out, the ingress controller is not provisioned and the application is accessible over HTTP protocol (not HTTPS). +# +#domain = "" + ################################################################################ # Common Settings ################################################################################ -# Default AWS region for DCAPT snapshots. Supported regions are us-east-1, us-east-2, us-west-1, us-west-2. +# Default AWS region for DCAPT snapshots. Supported regions: us-east-2, us-east-1. +# If any other specific region is required, please contact support via community slack channel. region = "us-east-2" # List of IP ranges that are allowed to access the running applications over the World Wide Web. @@ -63,13 +71,6 @@ max_cluster_capacity = 2 # This can be used in case you hit the limit which can happen if 30+ whitelist_cidrs are provided. #enable_https_ingress = false -# (Optional) Domain name used by the ingress controller. -# The final ingress domain is a subdomain within this domain. (eg.: environment.domain.com) -# You can also provide a subdomain and the final ingress domain will be . -# When commented out, the ingress controller is not provisioned and the application is accessible over HTTP protocol (not HTTPS). -# -#domain = "" - ################################################################################ # Jira/JSM Settings ################################################################################ @@ -87,10 +88,10 @@ jira_image_repository = "atlassian/jira-software" # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # Jira version. -jira_version_tag = "9.12.1" +jira_version_tag = "9.12.4" # JSM version # ! REQUIRED for JSM ! -# jira_version_tag = "5.12.1" +# jira_version_tag = "5.12.4" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large jira_dataset_size = "small" @@ -150,12 +151,16 @@ jira_db_master_password = "Password1!" # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # jira_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# Example: ["-Dproperty=value", "-Dproperty1=value1"] +jira_additional_jvm_args = ["-Dupm.plugin.upload.enabled=true"] + ################################################################################ # Confluence Settings ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "8.5.4" +confluence_version_tag = "8.5.6" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large confluence_dataset_size = "small" @@ -223,12 +228,15 @@ confluence_collaborative_editing_enabled = true # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # confluence_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# confluence_additional_jvm_args = ["-Dproperty=value", "-Dproperty1=value1"] + ################################################################################ # Bitbucket Settings ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "8.9.8" +bitbucket_version_tag = "8.9.10" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "small" @@ -314,3 +322,6 @@ bitbucket_db_master_password = "Password1!" # Custom values file location. Defaults to an empty string which means only values from config.tfvars # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # bitbucket_custom_values_file = "/path/to/values.yaml" + +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# bitbucket_additional_jvm_args = ["-Dproperty=value", "-Dproperty1=value1"] diff --git a/app/util/k8s/dcapt-snapshots.json b/app/util/k8s/dcapt-snapshots.json index 0b282324b..7c81223a0 100644 --- a/app/util/k8s/dcapt-snapshots.json +++ b/app/util/k8s/dcapt-snapshots.json @@ -2,15 +2,25 @@ "jira": { "versions": [ { - "version": "9.4.14", + "version": "9.12.4", "data": [ + { + "type": "local-home", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-01942e6924d6094d3", + "us-east-1": "snap-0b77a63ff601d4480" + } + ] + }, { "type": "ebs", "size": "large", "snapshots": [ { - "us-east-2": "snap-084abf5dfca234b9d", - "us-east-1": "snap-0934c1aa5c62be5dc" + "us-east-2": "snap-0800247b9bad8a16d", + "us-east-1": "snap-018690e2d4e8a8393" } ] }, @@ -19,8 +29,8 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-14", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-14" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-12-4", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-4" } ] }, @@ -29,8 +39,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "snap-00af725c87690569d", - "us-east-1": "snap-04417460cb27d17cb" + "us-east-2": "snap-0067577f23ce694f1", + "us-east-1": "snap-0ff264d124d02af3a" } ] }, @@ -39,23 +49,33 @@ "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-14", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-14" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-12-4", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-12-4" } ] } ] }, { - "version": "9.12.1", + "version": "9.4.17", "data": [ + { + "type": "local-home", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-01ebb87caac609507", + "us-east-1": "snap-04ba555d376baa19a" + } + ] + }, { "type": "ebs", "size": "large", "snapshots": [ { - "us-east-2": "snap-0e413a90c64812130", - "us-east-1": "snap-0dedc16a22652e0f1" + "us-east-2": "snap-05655fde5263939cb", + "us-east-1": "snap-01e52802d576b3243" } ] }, @@ -64,8 +84,8 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-12-1", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-12-1" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-9-4-17", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-9-4-17" } ] }, @@ -74,8 +94,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "snap-0a7b4d27c09013274", - "us-east-1": "snap-07d85f52da6a564ed" + "us-east-2": "snap-0e9f7412296d3cb5c", + "us-east-1": "snap-0c22d9c03f3f7b075" } ] }, @@ -84,8 +104,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-12-1", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-12-1" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jira-small-9-4-17", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jira-small-9-4-17" } ] } @@ -96,15 +116,25 @@ "jsm": { "versions": [ { - "version": "5.4.14", + "version": "5.12.4", "data": [ + { + "type": "local-home", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-01f8e5dda7e44c94b", + "us-east-1": "snap-084c8edeae71b8dfd" + } + ] + }, { "type": "ebs", "size": "large", "snapshots": [ { - "us-east-2": "snap-02757b69de7aeb3f8", - "us-east-1": "snap-0be0df5470e3a312d" + "us-east-2": "snap-0098dceccb1e60b46", + "us-east-1": "snap-02f1c88e526bca8a2" } ] }, @@ -113,8 +143,8 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-14", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-14" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-12-4", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-4" } ] }, @@ -123,8 +153,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "snap-007de06d38fcd95c6", - "us-east-1": "snap-075e397f427e6d6c8" + "us-east-2": "snap-0d36a36efae6704c7", + "us-east-1": "snap-0052678aa291f0c0b" } ] }, @@ -133,23 +163,33 @@ "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-14", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-14" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-12-4", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-12-4" } ] } ] }, { - "version": "5.12.1", + "version": "5.4.17", "data": [ + { + "type": "local-home", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-0b0914139687f8fd3", + "us-east-1": "snap-0d12a2fddf1f54173" + } + ] + }, { "type": "ebs", "size": "large", "snapshots": [ { - "us-east-2": "snap-011d04a19c6b93529", - "us-east-1": "snap-0edfff503a2605803" + "us-east-2": "snap-0fe2f2d7fe239f9fd", + "us-east-1": "snap-0a764e69e95c35f38" } ] }, @@ -158,8 +198,8 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-12-1", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-12-1" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-5-4-17", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-5-4-17" } ] }, @@ -168,8 +208,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "snap-053193245de30778c", - "us-east-1": "snap-0c18a374ecc344221" + "us-east-2": "snap-0eafdaae32b290fa9", + "us-east-1": "snap-026b0562c39192007" } ] }, @@ -178,8 +218,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-12-1", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-12-1" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-jsm-small-5-4-17", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-jsm-small-5-4-17" } ] } @@ -190,15 +230,25 @@ "confluence": { "versions": [ { - "version": "8.5.4", + "version": "7.19.19", "data": [ + { + "type": "local-home", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-051ca1f3060f748a9", + "us-east-1": "snap-05e7f0f83a095335b" + } + ] + }, { "type": "ebs", "size": "small", "snapshots": [ { - "us-east-2": "snap-021574360a781464f", - "us-east-1": "snap-00d1f3a18d176ceca" + "us-east-2": "snap-019e918febb9e96ff", + "us-east-1": "snap-01ba6c04e19e3b53a" } ] }, @@ -207,8 +257,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-5-4", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-8-5-4" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-19", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-19" } ] }, @@ -217,8 +267,8 @@ "size": "large", "snapshots": [ { - "us-east-2": "snap-09802dd4106f2686a", - "us-east-1": "snap-08d42b48214eaf3bf" + "us-east-2": "snap-00a8fab739b46f2b7", + "us-east-1": "snap-09ad5c22668c501b5" } ] }, @@ -227,24 +277,34 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-5-4", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-8-5-4" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-19", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-19" } ] } ], - "build_number": "9012" + "build_number": "8804" }, { - "version": "7.19.17", + "version": "8.5.6", "data": [ + { + "type": "local-home", + "size": "large", + "snapshots": [ + { + "us-east-2": "snap-0e4f68027cc6d5252", + "us-east-1": "snap-03adba820e61eafcd" + } + ] + }, { "type": "ebs", "size": "large", "snapshots": [ { - "us-east-2": "snap-0b8723cc5a8f8becc", - "us-east-1": "snap-030f0bb7870b60c73" + "us-east-2": "snap-0df1efecadbd5efe5", + "us-east-1": "snap-0cdf264bb30754151" } ] }, @@ -253,8 +313,8 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-7-19-17", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-7-19-17" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-8-5-6", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-8-5-6" } ] }, @@ -263,8 +323,8 @@ "size": "small", "snapshots": [ { - "us-east-2": "snap-062952d964320477f", - "us-east-1": "snap-0720b5df2ed27b435" + "us-east-2": "snap-00f8b2e81378f57b4", + "us-east-1": "snap-097851c90fadc39d9" } ] }, @@ -273,103 +333,103 @@ "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-7-19-17", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-7-19-17" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-confluence-small-8-5-6", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-confluence-small-8-5-6" } ] } ], - "build_number": "8804" + "build_number": "9012" } ] }, "bitbucket": { "versions": [ { - "version": "8.9.8", + "version": "7.21.22", "data": [ { "type": "ebs", - "size": "small", + "size": "large", "snapshots": [ { - "us-east-2": "snap-04b1409ae2afa2d65", - "us-east-1": "snap-02a3125029b85438b" + "us-east-2": "snap-019e03768c88ea9d2", + "us-east-1": "snap-034148029deb1efda" } ] }, { "type": "rds", - "size": "small", + "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-8", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-8" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-22", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-22" } ] }, { "type": "ebs", - "size": "large", + "size": "small", "snapshots": [ { - "us-east-2": "snap-06d634d448d684fba", - "us-east-1": "snap-0759f03d54c2138cc" + "us-east-2": "snap-00a0e6b2e113a4a1c", + "us-east-1": "snap-0f6ca67e95425659c" } ] }, { "type": "rds", - "size": "large", + "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-8", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-8" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-22", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-22" } ] } ] }, { - "version": "7.21.20", + "version": "8.9.10", "data": [ { "type": "ebs", - "size": "small", + "size": "large", "snapshots": [ { - "us-east-2": "snap-01e565f1a0c5d3f2c", - "us-east-1": "snap-046e472e93ae1ad2b" + "us-east-2": "snap-0d577b3651ca9d903", + "us-east-1": "snap-0866a52c1f7d22c37" } ] }, { "type": "rds", - "size": "small", + "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-7-21-20", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-7-21-20" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-8-9-10", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-8-9-10" } ] }, { "type": "ebs", - "size": "large", + "size": "small", "snapshots": [ { - "us-east-2": "snap-0de936ce723f9582c", - "us-east-1": "snap-02f3a73aef1b80ffe" + "us-east-2": "snap-0f0340834c14ce8b7", + "us-east-1": "snap-098baaef0883d6831" } ] }, { "type": "rds", - "size": "large", + "size": "small", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-7-21-20", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-7-21-20" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-bitbucket-small-8-9-10", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-bitbucket-small-8-9-10" } ] } @@ -380,15 +440,15 @@ "crowd": { "versions": [ { - "version": "5.2.2", + "version": "5.2.3", "data": [ { "type": "ebs", "size": "large", "snapshots": [ { - "us-east-2": "snap-01d7f772d9d5f1ea3", - "us-east-1": "snap-09a3ae3234cb7dbfe" + "us-east-2": "snap-0824995529fb96ba3", + "us-east-1": "snap-02205f6bb80eb7d0e" } ] }, @@ -397,13 +457,13 @@ "size": "large", "snapshots": [ { - "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-2-2", - "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-2-2" + "us-east-2": "arn:aws:rds:us-east-2:585036043680:snapshot:dcapt-crowd-5-2-3", + "us-east-1": "arn:aws:rds:us-east-1:585036043680:snapshot:dcapt-crowd-5-2-3" } ] } ], - "build_number": "1943" + "build_number": "1944" } ] } diff --git a/app/util/k8s/dcapt.tfvars b/app/util/k8s/dcapt.tfvars index 12c83da8a..31d8c8bee 100644 --- a/app/util/k8s/dcapt.tfvars +++ b/app/util/k8s/dcapt.tfvars @@ -38,11 +38,19 @@ confluence_replica_count = 1 bitbucket_replica_count = 1 crowd_replica_count = 1 +# (Optional) Domain name used by the ingress controller. +# The final ingress domain is a subdomain within this domain. (eg.: environment.domain.com) +# You can also provide a subdomain and the final ingress domain will be . +# When commented out, the ingress controller is not provisioned and the application is accessible over HTTP protocol (not HTTPS). +# +#domain = "" + ################################################################################ # Common Settings ################################################################################ -# Default AWS region for DCAPT snapshots. Supported regions are us-east-1, us-east-2, us-west-1, us-west-2. +# Default AWS region for DCAPT snapshots. Supported regions: us-east-2, us-east-1. +# If any other specific region is required, please contact support via community slack channel. region = "us-east-2" # List of IP ranges that are allowed to access the running applications over the World Wide Web. @@ -64,7 +72,7 @@ instance_disk_size = 200 # Cluster-autoscaler is installed in the EKS cluster that will manage the requested capacity # and increase/decrease the number of nodes accordingly. This ensures there is always enough resources for the workloads # and removes the need to change this value. -min_cluster_capacity = 1 +min_cluster_capacity = 2 max_cluster_capacity = 6 # By default, Ingress controller listens on 443 and 80. You can enable only http port 80 by @@ -72,13 +80,6 @@ max_cluster_capacity = 6 # This can be used in case you hit the limit which can happen if 30+ whitelist_cidrs are provided. #enable_https_ingress = false -# (Optional) Domain name used by the ingress controller. -# The final ingress domain is a subdomain within this domain. (eg.: environment.domain.com) -# You can also provide a subdomain and the final ingress domain will be . -# When commented out, the ingress controller is not provisioned and the application is accessible over HTTP protocol (not HTTPS). -# -#domain = "" - ################################################################################ # Execution Environment Settings ################################################################################ @@ -108,11 +109,11 @@ jira_image_repository = "atlassian/jira-software" # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions # Jira version -jira_version_tag = "9.12.1" +jira_version_tag = "9.12.4" # JSM version # ! REQUIRED for JSM ! -# jira_version_tag = "5.12.1" +# jira_version_tag = "5.12.4" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large. jira_dataset_size = "large" @@ -161,12 +162,16 @@ jira_db_master_password = "Password1!" # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # jira_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# Example: ["-Dproperty=value", "-Dproperty1=value1"] +jira_additional_jvm_args = ["-Dupm.plugin.upload.enabled=true"] + ################################################################################ # Confluence Settings ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -confluence_version_tag = "8.5.4" +confluence_version_tag = "8.5.6" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large confluence_dataset_size = "large" @@ -226,12 +231,15 @@ confluence_collaborative_editing_enabled = true # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # confluence_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# confluence_additional_jvm_args = ["-Dproperty=value", "-Dproperty1=value1"] + ################################################################################ # Bitbucket Settings ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -bitbucket_version_tag = "8.9.8" +bitbucket_version_tag = "8.9.10" # Dataset size. Used only when snapshots_json_file_path is defined. Defaults to large bitbucket_dataset_size = "large" @@ -309,12 +317,16 @@ bitbucket_db_master_password = "Password1!" # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # bitbucket_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# Example: ["-Dproperty=value", "-Dproperty1=value1"] +bitbucket_additional_jvm_args = ["-Dupm.plugin.upload.enabled=true"] + ################################################################################ # Crowd Settings ################################################################################ # Supported versions by DCAPT: https://github.com/atlassian/dc-app-performance-toolkit#supported-versions -crowd_version_tag = "5.2.2" +crowd_version_tag = "5.2.3" # Helm chart version of Crowd and Crowd agent instances. By default the latest version is installed. # crowd_helm_chart_version = "" @@ -367,6 +379,9 @@ crowd_db_master_password = "Password1!" # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # crowd_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# crowd_additional_jvm_args = ["-Dproperty=value", "-Dproperty1=value1"] + ################################################################################ # Bamboo Settings ################################################################################ @@ -374,8 +389,8 @@ crowd_db_master_password = "Password1!" # By default, latest supported by DCAPT version is set. # https://hub.docker.com/r/atlassian/bamboo/tags # https://hub.docker.com/r/atlassian/bamboo-agent-base/tags -bamboo_version_tag = "9.2.9" -bamboo_agent_version_tag = "9.2.9" +bamboo_version_tag = "9.2.11" +bamboo_agent_version_tag = "9.2.11" # Helm chart version of Bamboo and Bamboo agent instances # bamboo_helm_chart_version = "" @@ -436,7 +451,7 @@ bamboo_nfs_limits_memory = "2Gi" # Documentation can be found via: # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html # https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Storage.html#USER_PIOPS -bamboo_db_major_engine_version = "13" +bamboo_db_major_engine_version = "14" bamboo_db_instance_class = "db.t3.medium" bamboo_db_allocated_storage = 100 bamboo_db_iops = 1000 @@ -452,26 +467,26 @@ bamboo_dataset_url = "https://centaurus-datasets.s3.amazonaws.com/bamboo/dcapt-b # are passed to Helm chart. Variables from config.tfvars take precedence over those defined in a custom values.yaml. # bamboo_custom_values_file = "/path/to/values.yaml" +# A list of JVM arguments to be passed to the server. Defaults to an empty list. +# Example: ["-Dproperty=value", "-Dproperty1=value1"] +bamboo_additional_jvm_args = ["-Dupm.plugin.upload.enabled=true"] + ################################################################################ # Monitoring settings ################################################################################ # Deploy https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack Helm chart # to kube-monitoring namespace. Defaults to false. +# # monitoring_enabled = true # Create Grafana service of LoadBalancer type. Defaults to false. To restrict access to LB URL # the list of CIRDs from whitelist_cidr will be automatically applied. - +# +# To get Grafana URL see README.MD instructions. +# # monitoring_grafana_expose_lb = true -# Command to select cluster: -# export ENVIRONMENT_NAME=your_environment_name -# aws eks update-kubeconfig --region us-east-2 --name atlas-$ENVIRONMENT_NAME-cluster - -# Command to get grafana ulr: kubectl get svc -n kube-monitoring | grep grafana -# Default grafana creds: admin/prom-operator - # Prometheus Persistent Volume Claim size. Defaults to 10Gi. # Out of the box EKS cluster is created with gp2 storage class which does not allow volume expansion, # i.e. if you expect a high volume of metrics or metrics with high cardinality it is recommended diff --git a/app/util/k8s/script-runner.yml b/app/util/k8s/script-runner.yml new file mode 100644 index 000000000..ec07cc814 --- /dev/null +++ b/app/util/k8s/script-runner.yml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Pod +metadata: + name: script-runner + labels: + app: script-runner +spec: + containers: + - name: script-runner + image: ubuntu:latest + command: + - /bin/sh + - -c + - | + echo "running below scripts" + apt update; + apt install postgresql-client curl wget -y; + /bin/sleep 3650d; + imagePullPolicy: IfNotPresent + restartPolicy: Always \ No newline at end of file diff --git a/app/util/k8s/terminate_cluster.py b/app/util/k8s/terminate_cluster.py index 0e389bf18..c83632661 100644 --- a/app/util/k8s/terminate_cluster.py +++ b/app/util/k8s/terminate_cluster.py @@ -653,7 +653,7 @@ def delete_ebs_volumes_by_id(aws_region, volumes): def get_clusters_to_terminate(): - clusters_to_terminate = [] + clusters_to_terminate = dict() for rgn in REGIONS: eks_client = boto3.client('eks', region_name=rgn) clusters = eks_client.list_clusters()['clusters'] @@ -670,7 +670,7 @@ def get_clusters_to_terminate(): logging.info(f"Cluster {cluster} is not EOL yet, skipping...") else: logging.info(f"Cluster {cluster} is EOL and should be deleted.") - clusters_to_terminate.append(cluster) + clusters_to_terminate[rgn]=cluster return clusters_to_terminate @@ -892,9 +892,9 @@ def delete_unused_volumes(): f"| Name tag {name}: skipping") -def delete_s3_bucket_tf_state(cluster_name): +def delete_s3_bucket_tf_state(cluster_name, aws_region): environment_name = retrieve_environment_name(cluster_name=cluster_name) - s3_client = boto3.client('s3') + s3_client = boto3.client('s3', region_name=aws_region) bucket_name_template = f'atl-dc-{environment_name}' response = s3_client.list_buckets() matching_buckets = [bucket['Name'] for bucket in response['Buckets'] if bucket_name_template in bucket['Name']] @@ -966,21 +966,21 @@ def main(): delete_open_identities_for_cluster(open_identities) remove_cluster_specific_roles_and_policies(cluster_name=args.cluster_name, aws_region=args.aws_region) delete_ebs_volumes_by_id(aws_region=args.aws_region, volumes=volumes) - delete_s3_bucket_tf_state(cluster_name=args.cluster_name) + delete_s3_bucket_tf_state(cluster_name=args.cluster_name, aws_region=args.aws_region) delete_dynamo_bucket_tf_state(cluster_name=args.cluster_name, aws_region=args.aws_region) return logging.info("--cluster_name parameter was not specified.") logging.info("Searching for clusters to remove.") clusters = get_clusters_to_terminate() - for cluster_name in clusters: + for region, cluster_name in clusters.items(): logging.info(f"Delete all resources and VPC for cluster {cluster_name}.") terminate_cluster(cluster_name=cluster_name) vpc_name = f'{cluster_name.replace("-cluster", "-vpc")}' terminate_vpc(vpc_name=vpc_name) terminate_open_id_providers(cluster_name=cluster_name) - delete_s3_bucket_tf_state(cluster_name=cluster_name) - delete_dynamo_bucket_tf_state(cluster_name=cluster_name, aws_region=args.aws_region) + delete_s3_bucket_tf_state(cluster_name=cluster_name, aws_region=region) + delete_dynamo_bucket_tf_state(cluster_name=cluster_name, aws_region=region) vpcs = get_vpcs_to_terminate() for vpc_name in vpcs: logging.info(f"Delete all resources for vpc {vpc_name}.") diff --git a/app/util/pre_run/environment_compliance_check.py b/app/util/pre_run/environment_compliance_check.py index c3153ac29..208da00e5 100644 --- a/app/util/pre_run/environment_compliance_check.py +++ b/app/util/pre_run/environment_compliance_check.py @@ -1,4 +1,6 @@ import sys +from selenium import webdriver +from packaging import version from util.common_util import get_latest_version, get_current_version, get_unsupported_version from util.analytics.application_info import ApplicationSelector @@ -6,10 +8,11 @@ from util.conf import JIRA_SETTINGS, CONFLUENCE_SETTINGS, BITBUCKET_SETTINGS, JSM_SETTINGS, BAMBOO_SETTINGS, \ CROWD_SETTINGS + APPS_SETTINGS = { "JIRA": JIRA_SETTINGS, "CONFLUENCE": CONFLUENCE_SETTINGS, - "BITBUCKET": BITBUCKET_SETTINGS, + "BITBUCKET": BITBUCKET_SETTINGS, "JSM": JSM_SETTINGS, "BAMBOO": BAMBOO_SETTINGS, "CROWD": CROWD_SETTINGS, @@ -22,26 +25,33 @@ def check_dcapt_version(): unsupported_version = get_unsupported_version() if latest_version is None: - print('Warning: failed to get the latest version') + print('WARNING: failed to get the latest version') elif unsupported_version is None: - print('Warning: failed to get the unsupported version') + print('WARNING: failed to get the unsupported version') elif current_version <= unsupported_version: - raise SystemExit(f"DCAPT version {current_version} is no longer supported. " - f"Consider an upgrade to the latest version: {latest_version}") + raise SystemExit( + f"DCAPT version {current_version} is no longer supported. " + f"Consider an upgrade to the latest version: {latest_version}") elif current_version < latest_version: - print(f"Warning: DCAPT version {current_version} is outdated. " + print(f"WARNING: DCAPT version {current_version} is outdated. " f"Consider upgrade to the latest version: {latest_version}.") elif current_version == latest_version: - print(f"Info: DCAPT version {current_version} is the latest.") + print(f"INFO: DCAPT version {current_version} is the latest.") else: - print(f"Info: DCAPT version {current_version} " + print(f"INFO: DCAPT version {current_version} " f"is ahead of the latest production version: {latest_version}.") -def validate_application_config(processors, app_name_upper, app_settings, min_defaults): +def validate_application_config( + processors, + app_name_upper, + app_settings, + min_defaults): is_jsm = app_name_upper == "JSM" if is_jsm: - current_concurrency = (app_settings.customers_concurrency, app_settings.agents_concurrency) + current_concurrency = ( + app_settings.customers_concurrency, + app_settings.agents_concurrency) else: current_concurrency = app_settings.concurrency @@ -50,30 +60,61 @@ def validate_application_config(processors, app_name_upper, app_settings, min_de (is_jsm and current_concurrency >= (min_defaults['customer_concurrency'], min_defaults['agent_concurrency'])) ): - # If the number of processors is less than 4, raise a SystemExit with a warning message. + # If the number of processors is less than 4, raise a SystemExit with a + # warning message. if processors < 4: raise SystemExit( - f"ERROR: You are trying to run an enterprise-scale load test with concurrency: {current_concurrency} against the " - f"instance with a weaker configuration than recommended.\n" - f"Kindly consider decreasing the `concurrency`/`total_actions_per_hour` in your {app_name_upper.lower()}.yml file if this development environment.\n" + f"ERROR: You are trying to run an enterprise-scale load test with concurrency: " + f"{current_concurrency} against the instance with a weaker configuration than recommended.\n" + f"Kindly consider decreasing the `concurrency`/`total_actions_per_hour` in your " + f"{app_name_upper.lower()}.yml file if this development environment.\n" f"For enterprise-scale load make sure environment has a compliant configuration.\n" - f"To skip environment compliance check set `environment_compliance_check` variable to False in your {app_name_upper.lower()}.yml file.") + f"To skip environment compliance check set `environment_compliance_check` variable to False in your " + f"{app_name_upper.lower()}.yml file.") + + +def validate_chromedriver_version(app_name, app_settings): + options = webdriver.ChromeOptions() + options.add_argument("--headless") + options.add_argument("--no-sandbox") + driver = webdriver.Chrome(options=options) + current_chrome_version = version.parse(driver.capabilities['browserVersion']) + if app_settings.chromedriver_version: + current_chromedriver_version = version.parse(app_settings.chromedriver_version) + else: + print(f"WARNING: Chromedriver version was not found in the {app_name}.yml. Skipping Chrome/chromedriver check.") + return + if current_chromedriver_version.major == current_chrome_version.major: + print(f"INFO: Chrome version: {current_chrome_version}") + print(f"INFO: Chromedriver version in {app_name}.yml: {current_chromedriver_version}") + else: + raise SystemExit( + f'ERROR: Your Chromedriver version {current_chromedriver_version} is ' + f'not corresponding to your Chrome browser version {current_chrome_version}. ' + f'Please change `chromedriver` version in your {app_name}.yml.') def analyze_application_configuration(app_name): app_name_upper = app_name.upper() app = ApplicationSelector(app_name).application processors = app.processors + app_settings = APPS_SETTINGS[app_name_upper] try: processors = int(processors) + min_defaults = MIN_DEFAULTS.get(app_name.lower()) + validate_application_config( + processors, + app_name_upper, + app_settings, + min_defaults) except ValueError: - print("Warning: You are using a server instance for running enterprise-scale load tests.") - return + print("WARNING: Skipping processor count validation because.") - app_settings = APPS_SETTINGS[app_name_upper] - min_defaults = MIN_DEFAULTS.get(app_name.lower()) - validate_application_config(processors, app_name_upper, app_settings, min_defaults) + if app_name.upper() == "CROWD": + print("INFO: Skipping Chromedriver check for Crowd.") + else: + validate_chromedriver_version(app_name, app_settings) def main(): @@ -83,15 +124,17 @@ def main(): except IndexError: raise SystemExit("ERROR: execution_compliance_check.py expects application name as argument") - # TODO: Add a check for CROWD configuration once the feature with processors is implemented in the product + # TODO: Add a check for CROWD configuration once the feature with + # processors is implemented in the product if app_name.upper() != "CROWD": if app_name.upper() in APPS_SETTINGS: app_settings = APPS_SETTINGS[app_name.upper()] if app_settings.environment_compliance_check: analyze_application_configuration(app_name) else: - raise SystemExit(f'ERROR: Unknown application: {app_name.upper()}. ' - f'Supported applications are {list(APPS_SETTINGS.keys())}') + raise SystemExit( + f'ERROR: Unknown application: {app_name.upper()}. ' + f'Supported applications are {list(APPS_SETTINGS.keys())}') if __name__ == "__main__": diff --git a/app/util/project_paths.py b/app/util/project_paths.py index f977c238e..a439fab3b 100644 --- a/app/util/project_paths.py +++ b/app/util/project_paths.py @@ -121,8 +121,10 @@ def __get_default_test_actions(): CONFLUENCE_USERS = __get_confluence_dataset('users.csv') CONFLUENCE_PAGES = __get_confluence_dataset('pages.csv') CONFLUENCE_BLOGS = __get_confluence_dataset('blogs.csv') +CONFLUENCE_CQLS = __get_confluence_dataset('cqls.csv') CONFLUENCE_STATIC_CONTENT = __get_confluence_dataset('static-content/files_upload.csv') CONFLUENCE_CUSTOM_PAGES = __get_confluence_dataset('custom_pages.csv') +CONFLUENCE_WORDS = __get_confluence_dataset('static-content/words.csv') BITBUCKET_YML = __get_bitbucket_yml() BITBUCKET_DATASETS = __get_bitbucket_datasets() diff --git a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md index b82dec709..20f06e1f2 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bamboo.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bamboo.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2024-01-05" +date: "2024-03-19" --- # Data Center App Performance Toolkit User Guide For Bamboo @@ -71,7 +71,7 @@ specifically for performance testing during the DC app review process. -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bamboo`. 8. Wait for all remote agents to be started and connected. It can take up to 10 minutes. Agents can be checked in `Settings` > `Agents`. @@ -257,7 +257,7 @@ To receive performance baseline results **without** an app installed and **witho * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. * `standalone_extension_locust` set to 0. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -272,7 +272,7 @@ To receive performance baseline results **without** an app installed and **witho -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bamboo.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bamboo.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bamboo/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -303,7 +303,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bamboo.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bamboo.yml ``` {{% note %}} @@ -322,7 +322,7 @@ To receive results for Bamboo DC **with app** and **with app-specific actions**: * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. * `standalone_extension_locust` set to 1 and Locust app-specific actions code base applied in case of Locust app-specific actions. * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/bamboo_ui.py#L51-L52) is uncommented and has implementation in case of Selenium app-specific actions. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -337,7 +337,7 @@ To receive results for Bamboo DC **with app** and **with app-specific actions**: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bamboo.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bamboo.yml ``` {{% note %}} diff --git a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md index 504a17f93..3b25658cc 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md +++ b/docs/dc-apps-performance-toolkit-user-guide-bitbucket.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2024-01-05" +date: "2024-03-19" --- # Data Center App Performance Toolkit User Guide For Bitbucket @@ -84,7 +84,7 @@ Below process describes how to install low-tier Bitbucket DC with "small" datase -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -248,7 +248,7 @@ Below process describes how to install enterprise-scale Bitbucket DC with "large -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/bitbucket`. @@ -308,7 +308,7 @@ To receive performance baseline results **without** an app installed: * App-specific actions code base is not needed for Run1 and Run2. * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -323,7 +323,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bitbucket.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/bitbucket/YY-MM-DD-hh-mm-ss` folder: @@ -354,7 +354,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -389,7 +389,7 @@ To receive scalability benchmark results for one-node Bitbucket DC **with** app- * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/bitbucket_ui.py#L67-L68) is uncommented and has implementation in case of Selenium app-specific actions. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -404,7 +404,7 @@ To receive scalability benchmark results for one-node Bitbucket DC **with** app- -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -415,7 +415,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~1 hour) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -429,7 +429,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -442,7 +442,7 @@ To receive scalability benchmark results for two-node Bitbucket DC **with** app- -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -453,7 +453,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~1 hour) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -471,7 +471,7 @@ To receive scalability benchmark results for four-node Bitbucket DC with app-spe -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh bitbucket.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh bitbucket.yml ``` {{% note %}} @@ -483,7 +483,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Edit the `./app/reports_generation/performance_profile.yml` file: +1. Edit the `./app/reports_generation/scale_profile.yml` file: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). diff --git a/docs/dc-apps-performance-toolkit-user-guide-confluence.md b/docs/dc-apps-performance-toolkit-user-guide-confluence.md index 1f34efd3a..95bc395d0 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-confluence.md +++ b/docs/dc-apps-performance-toolkit-user-guide-confluence.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2024-01-05" +date: "2024-03-19" --- # Data Center App Performance Toolkit User Guide For Confluence @@ -83,7 +83,7 @@ Below process describes how to install low-tier Confluence DC with "small" datas -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -328,7 +328,7 @@ Below process describes how to install enterprise-scale Confluence DC with "larg -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/confluence`. @@ -384,11 +384,11 @@ This scenario helps to identify basic performance issues without a need to spin To receive performance baseline results **without** an app installed: 1. Before run: - * Make sure `jira.yml` and toolkit code base has default configuration from the `master` branch. + * Make sure `confluence.yml` and toolkit code base has default configuration from the `master` branch. * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -403,7 +403,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh confluence.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/confluence/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -433,7 +433,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -479,7 +479,7 @@ To receive scalability benchmark results for one-node Confluence DC **with** app * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/confluence_ui.py#L47-L48) is uncommented and has implementation in case of Selenium app-specific actions. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -494,7 +494,7 @@ To receive scalability benchmark results for one-node Confluence DC **with** app -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -505,7 +505,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -519,7 +519,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -532,7 +532,7 @@ To receive scalability benchmark results for two-node Confluence DC **with** app -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -543,7 +543,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -561,7 +561,7 @@ To receive scalability benchmark results for four-node Confluence DC with app-sp -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh confluence.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh confluence.yml ``` {{% note %}} @@ -573,7 +573,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Edit the `./app/reports_generation/performance_profile.yml` file: +1. Edit the `./app/reports_generation/scale_profile.yml` file: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). diff --git a/docs/dc-apps-performance-toolkit-user-guide-crowd.md b/docs/dc-apps-performance-toolkit-user-guide-crowd.md index 4f2747322..c236981ec 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-crowd.md +++ b/docs/dc-apps-performance-toolkit-user-guide-crowd.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2024-01-05" +date: "2024-03-19" --- # Data Center App Performance Toolkit User Guide For Crowd @@ -66,7 +66,7 @@ specifically for performance testing during the DC app review process. -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 7. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/crowd`. @@ -162,7 +162,7 @@ To receive performance baseline results **without** an app installed and **witho * Make sure `crowd.yml` and toolkit code base has default configuration from the `master` branch. No app-specific actions code applied. * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -177,7 +177,7 @@ To receive performance baseline results **without** an app installed and **witho -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh crowd.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/crowd/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -206,7 +206,7 @@ To receive performance results with an app installed (still use master branch): -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -250,7 +250,7 @@ To receive scalability benchmark results for one-node Crowd DC **with** app-spec * Make sure `crowd.yml` and toolkit code base has code base with your developed app-specific actions. * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -265,7 +265,7 @@ To receive scalability benchmark results for one-node Crowd DC **with** app-spec -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -275,7 +275,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 30. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -289,7 +289,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 1. Edit **run parameters** for 2 nodes run. To do it, left uncommented only 2 nodes scenario parameters in `crowd.yml` file. ``` @@ -316,7 +316,7 @@ To receive scalability benchmark results for two-node Crowd DC **with** app-spec -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -327,7 +327,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 30. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -359,7 +359,7 @@ To receive scalability benchmark results for four-node Crowd DC with app-specifi -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh crowd.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh crowd.yml ``` {{% note %}} @@ -371,7 +371,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Edit the `./app/reports_generation/performance_profile.yml` file: +1. Edit the `./app/reports_generation/scale_profile.yml` file: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). diff --git a/docs/dc-apps-performance-toolkit-user-guide-jira.md b/docs/dc-apps-performance-toolkit-user-guide-jira.md index 7c964cdfa..e3e4cc790 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jira.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jira.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2024-01-05" +date: "2024-03-19" --- # Data Center App Performance Toolkit User Guide For Jira @@ -95,7 +95,7 @@ Below process describes how to install low-tier Jira DC with "small" dataset inc -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -356,7 +356,7 @@ Below process describes how to install enterprise-scale Jira DC with "large" dat -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -416,7 +416,7 @@ To receive performance baseline results **without** an app installed: * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -431,7 +431,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jira.yml ``` 1. View the results files of the run in the local `dc-app-performance-toolkit/app/results/jira/YY-MM-DD-hh-mm-ss` folder: - `results_summary.log`: detailed run summary @@ -483,7 +483,7 @@ Re-index information window is displayed on the **Indexing page**. If the window -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -529,7 +529,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. * [test_1_selenium_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/jira_ui.py#L65-L66) is uncommented and has implementation in case of Selenium app-specific actions. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -544,7 +544,7 @@ To receive scalability benchmark results for one-node Jira DC **with** app-speci -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -555,7 +555,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -569,7 +569,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -582,7 +582,7 @@ To receive scalability benchmark results for two-node Jira DC **with** app-speci -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -593,7 +593,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jira/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -611,7 +611,7 @@ To receive scalability benchmark results for four-node Jira DC with app-specific -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jira.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jira.yml ``` {{% note %}} @@ -623,7 +623,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Edit the `./app/reports_generation/performance_profile.yml` file: +1. Edit the `./app/reports_generation/scale_profile.yml` file: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). diff --git a/docs/dc-apps-performance-toolkit-user-guide-jsm.md b/docs/dc-apps-performance-toolkit-user-guide-jsm.md index 2243e4add..067e8ff9f 100644 --- a/docs/dc-apps-performance-toolkit-user-guide-jsm.md +++ b/docs/dc-apps-performance-toolkit-user-guide-jsm.md @@ -4,7 +4,7 @@ platform: platform product: marketplace category: devguide subcategory: build -date: "2024-01-05" +date: "2024-03-19" --- # Data Center App Performance Toolkit User Guide For Jira Service Management @@ -97,7 +97,7 @@ Below process describes how to install low-tier Jira Service Management DC with -v "/$PWD/dcapt-small.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -392,7 +392,7 @@ Below process describes how to install enterprise-scale Jira Service Management -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 8. Copy product URL from the console output. Product url should look like `http://a1234-54321.us-east-2.elb.amazonaws.com/jira`. @@ -457,7 +457,7 @@ To receive performance baseline results **without** an app installed: * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * Check correctness of `application_hostname`, `application_protocol`, `application_port` and `application_postfix` in .yml file. * `standalone_extension` set to 0. App-specific actions are not needed for Run1 and Run2. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -472,7 +472,7 @@ To receive performance baseline results **without** an app installed: -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jsm.yml ``` 1. View the following main results of the run in the `dc-app-performance-toolkit/app/results/jsm/YY-MM-DD-hh-mm-ss` folder: @@ -527,7 +527,7 @@ Re-index information window is displayed on the **Indexing page**. If the window -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -572,7 +572,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC * Check load configuration parameters needed for enterprise-scale run: [Setting up load configuration for Enterprise-scale runs](#loadconfiguration). * `standalone_extension` set to non 0 and .jmx file has standalone actions implementation in case of JMeter app-specific actions. * [test_1_selenium_customer_custom_action](https://github.com/atlassian/dc-app-performance-toolkit/blob/master/app/selenium_ui/jsm_ui_customers.py#L43C9-L44) is uncommented and has implementation in case of Selenium app-specific actions. - * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs/aws_envs` file: + * AWS access keys set in `./dc-app-performance-toolkit/app/util/k8s/aws_envs` file: - `AWS_ACCESS_KEY_ID` - `AWS_SECRET_ACCESS_KEY` - `AWS_SESSION_TOKEN` (only for temporary creds) @@ -587,7 +587,7 @@ To receive scalability benchmark results for one-node Jira Service Management DC -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -598,7 +598,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 4 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -612,7 +612,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC -v "/$PWD/dcapt.tfvars:/data-center-terraform/conf.tfvars" \ -v "/$PWD/dcapt-snapshots.json:/data-center-terraform/dcapt-snapshots.json" \ -v "/$PWD/logs:/data-center-terraform/logs" \ - -it atlassianlabs/terraform:2.7.1 ./install.sh -c conf.tfvars + -it atlassianlabs/terraform:2.7.4 ./install.sh -c conf.tfvars ``` 1. Navigate to `dc-app-performance-toolkit` folder and start tests execution: ``` bash @@ -625,7 +625,7 @@ To receive scalability benchmark results for two-node Jira Service Management DC -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -636,7 +636,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o ##### Run 5 (~50 min) {{% note %}} Before scaling your DC make sure that AWS vCPU limit is not lower than needed number. Minimum recommended value is 50. -Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-34B43A08) to see current limit. +Use [AWS Service Quotas service](https://console.aws.amazon.com/servicequotas/home/services/ec2/quotas/L-1216C47A) to see current limit. [EC2 CPU Limit](https://developer.atlassian.com/platform/marketplace/dc-apps-performance-toolkit-user-guide-jsm/#ec2-cpu-limit) section has instructions on how to increase limit if needed. {{% /note %}} @@ -654,7 +654,7 @@ To receive scalability benchmark results for four-node Jira Service Management D -e ENVIRONMENT_NAME=$ENVIRONMENT_NAME \ -v "/$PWD:/data-center-terraform/dc-app-performance-toolkit" \ -v "/$PWD/app/util/k8s/bzt_on_pod.sh:/data-center-terraform/bzt_on_pod.sh" \ - -it atlassianlabs/terraform:2.7.1 bash bzt_on_pod.sh jsm.yml + -it atlassianlabs/terraform:2.7.4 bash bzt_on_pod.sh jsm.yml ``` {{% note %}} @@ -666,7 +666,7 @@ Review `results_summary.log` file under artifacts dir location. Make sure that o To generate a scalability report: -1. Edit the `./app/reports_generation/performance_profile.yml` file: +1. Edit the `./app/reports_generation/scale_profile.yml` file: - For `runName: "1 Node"`, in the `relativePath` key, insert the relative path to results directory of [Run 3](#run3). - For `runName: "2 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 4](#run4). - For `runName: "4 Nodes"`, in the `relativePath` key, insert the relative path to results directory of [Run 5](#run5). diff --git a/requirements.txt b/requirements.txt index 5996d2880..513d5cee2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,13 +1,13 @@ -matplotlib==3.8.2 -pandas==2.1.4 -numpy==1.26.3 -scipy==1.11.4 -pytest==7.4.4 -locust==2.20.1 -selenium==4.16.0 +matplotlib==3.8.3 +pandas==2.2.1 +numpy==1.26.4 +scipy==1.12.0 +pytest==8.0.2 +locust==2.24.0 +selenium==4.18.1 filelock==3.13.1 packaging==23.2 -prettytable==3.9.0 -bzt==1.16.27 -boto3==1.34.14 +prettytable==3.10.0 +bzt==1.16.29 +boto3==1.34.58 retry==0.9.2