diff --git a/CHANGELOG.md b/CHANGELOG.md index 756ca18058d..584e0437ff1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,45 @@ # Changelog -## v1.159.0 (11/06/2024) +## v1.160.0 (01/08/2024) + +### Features: +- [#5657](https://github.com/telstra/open-kilda/pull/5657) GUI fix for gui switch-datatable and switch-detail components [**gui**] + +### Bug Fixes: +- [#5704](https://github.com/telstra/open-kilda/pull/5704) fix QR code image display issue in 2FA screen (Issue: [#5703](https://github.com/telstra/open-kilda/issues/5703)) [**gui**] + +### Improvements: +- [#5696](https://github.com/telstra/open-kilda/pull/5696) [TEST]: Regular Flow: New interaction approach(switches) [**tests**] +- [#5697](https://github.com/telstra/open-kilda/pull/5697) [TEST]: 5694: Improvement: Error verification [**tests**] +- [#5698](https://github.com/telstra/open-kilda/pull/5698) [TEST]: Improvement: Flaky test: Path: toString() throws NPE [**tests**] +- [#5706](https://github.com/telstra/open-kilda/pull/5706) [TEST]: Improvement: Fix test discrepancies [**tests**] +- [#5707](https://github.com/telstra/open-kilda/pull/5707) [TEST]: Improvement: Server42: Flakiness during parallel execution [**tests**] +- [#5708](https://github.com/telstra/open-kilda/pull/5708) [TEST]: Improvement: Flaky tests(global timeout/single switchTriplet) [**tests**] +- [#5579](https://github.com/telstra/open-kilda/pull/5579) [TEST]: 5569: Y-Flow: Server42: Adding TCs [**tests**] +- [#5712](https://github.com/telstra/open-kilda/pull/5712) [TEST]: Improvement: Refactoring tests to be applicable to hardware env [**tests**] +- [#5714](https://github.com/telstra/open-kilda/pull/5714) [TEST]: Improvement: Cleanup: ISL recovering: Flaky tests [**tests**] +- [#5717](https://github.com/telstra/open-kilda/pull/5717) Add API to flush reroute for pending flow (fix response) +- [#5674](https://github.com/telstra/open-kilda/pull/5674) [TEST]: Regular Flows: MaxLatency, ThrottlingReroute, PinnedFlow specs are refactored with new approach [**tests**] +- [#5681](https://github.com/telstra/open-kilda/pull/5681) [TEST]: Regular Flows: MirrorEndpoints, IntentionalReroute, MultiReroute and FlowValidationNegative specs were refactored with new approach [**tests**] +- [#5686](https://github.com/telstra/open-kilda/pull/5686) [TEST]: Regular Flows: PartialUpdate, QinQFlow, VxlanFlow specs were refactored with new approach [**tests**] +- [#5687](https://github.com/telstra/open-kilda/pull/5687) [TEST]: Regular Flow: Autoreroute, Protected(v1/v2), Swap: New approach for flow interaction [**tests**] +- [#5688](https://github.com/telstra/open-kilda/pull/5688) [TEST]: Cleanup: Updating calls to be with autocleanup [**tests**] +- [#5690](https://github.com/telstra/open-kilda/pull/5690) [TEST]: Regular Flow: New interaction approach(additional specs) [**tests**] +- [#5692](https://github.com/telstra/open-kilda/pull/5692) [TEST]: Refactor regular flows in Switches specs: pt1: [**tests**] +- [#5693](https://github.com/telstra/open-kilda/pull/5693) [TEST]: Improvement: Y-Flow: Stats: Flaky test [**tests**] + +### Other changes: +- [#5700](https://github.com/telstra/open-kilda/pull/5700) [TEST]: 5390: Fix some broken / flaky tests after refactoring [**tests**] +- [#5702](https://github.com/telstra/open-kilda/pull/5702) Flush reroute queue with stuck flow + +For the complete list of changes, check out [the commit log](https://github.com/telstra/open-kilda/compare/v1.159.0...v1.160.0). + +### Affected Components: +gui + +--- + +## v1.159.0 (17/06/2024) ### Improvements: - [#5637](https://github.com/telstra/open-kilda/pull/5637) 5560: [TEST]: Eliminate cleanup sections part 1 (Issue: [#5560](https://github.com/telstra/open-kilda/issues/5560)) [**tests**] diff --git a/src-gui/.gitignore b/src-gui/.gitignore index 0aaad2e083a..e9bdd277e97 100644 --- a/src-gui/.gitignore +++ b/src-gui/.gitignore @@ -1,7 +1,7 @@ /logs/ tmp/ .deps/ - +.angular/ # Gradle specific @@ -30,6 +30,7 @@ src/main/webapp/ui/*.js src/main/webapp/ui/*.js.map src/main/webapp/ui/*.js.LICENSE.txt ui/node_modules +ui/.angular ui/package-lock.json src/main/webapp/ui/index.html src/main/webapp/ui/icon-*.*.png @@ -51,6 +52,8 @@ src/main/webapp/ui/runtime.js.map src/main/webapp/ui/scripts.js src/main/webapp/ui/scripts.js.map src/main/webapp/ui/styles.js +src/main/webapp/ui/styles.css +src/main/webapp/ui/assets-images-*.png src/main/webapp/ui/styles.js.map src/main/webapp/ui/vendor.js src/main/webapp/ui/vendor.js.map diff --git a/src-gui/Makefile b/src-gui/Makefile index 73b868f9e32..9ced6c68393 100644 --- a/src-gui/Makefile +++ b/src-gui/Makefile @@ -18,6 +18,7 @@ build/libs/${APP}.war: .deps/node .deps/resources [ -e src/main/webapp/lib/css/bootstrap.min.css ] || wget -O src/main/webapp/lib/css/bootstrap.min.css https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css [ -e src/main/webapp/lib/javascript/bootstrap.min.js ] || wget -O src/main/webapp/lib/javascript/bootstrap.min.js https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js [ -e src/main/webapp/lib/javascript/jquery-3.5.1.min.js ] || wget -O src/main/webapp/lib/javascript/jquery-3.5.1.min.js https://cdnjs.cloudflare.com/ajax/libs/jquery/3.5.1/jquery.js + [ -e src/main/webapp/lib/javascript/qrcode.min.js ] || wget -O src/main/webapp/lib/javascript/qrcode.min.js https://cdnjs.cloudflare.com/ajax/libs/qrcodejs/1.0.0/qrcode.min.js .deps: mkdir -p .deps diff --git a/src-gui/src/checkstyle/checkstyle.xml b/src-gui/src/checkstyle/checkstyle.xml index d95e487836b..db2a1c91588 100644 --- a/src-gui/src/checkstyle/checkstyle.xml +++ b/src-gui/src/checkstyle/checkstyle.xml @@ -232,7 +232,7 @@ + value="Override, Bean, Test, Given, When, Then, And, Before, BeforeEach, After, BeforeClass, AfterClass"/> diff --git a/src-gui/src/main/java/org/openkilda/config/DatabaseConfigurator.java b/src-gui/src/main/java/org/openkilda/config/DatabaseConfigurator.java index 29bf35f7f8f..4d4ed50883f 100644 --- a/src-gui/src/main/java/org/openkilda/config/DatabaseConfigurator.java +++ b/src-gui/src/main/java/org/openkilda/config/DatabaseConfigurator.java @@ -19,7 +19,7 @@ import org.openkilda.dao.repository.VersionRepository; import com.ibatis.common.jdbc.ScriptRunner; - +import org.apache.commons.collections4.CollectionUtils; import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.core.io.Resource; @@ -32,9 +32,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; - import java.math.BigInteger; - import java.sql.Connection; import java.sql.Timestamp; import java.util.ArrayList; @@ -42,7 +40,6 @@ import java.util.Collections; import java.util.List; import java.util.stream.Collectors; - import javax.persistence.EntityManager; import javax.persistence.PersistenceContext; import javax.sql.DataSource; @@ -52,21 +49,21 @@ public class DatabaseConfigurator { @PersistenceContext private EntityManager entityManager; - + private static final Logger LOGGER = Logger.getLogger(DatabaseConfigurator.class); - + private static final String SCRIPT_FILE_PREFIX = "import-script_"; private static final String SCRIPT_FILE_SUFFIX = ".sql"; private static final String SCRIPT_LOCATION = "db"; - - private ResourceLoader resourceLoader; - private VersionRepository versionEntityRepository; + private final ResourceLoader resourceLoader; + + private final VersionRepository versionEntityRepository; + + private final DataSource dataSource; - private DataSource dataSource; - public DatabaseConfigurator(@Autowired final VersionRepository versionRepository, final DataSource dataSource, - final ResourceLoader resourceLoader, EntityManager em) { + final ResourceLoader resourceLoader, EntityManager em) { this.versionEntityRepository = versionRepository; this.dataSource = dataSource; this.resourceLoader = resourceLoader; @@ -81,16 +78,16 @@ public void init() { private void loadInitialData() { List versionNumberList = versionEntityRepository.findAllVersionNumber(); - - if (versionNumberList.size() == 0) { + + if (CollectionUtils.isEmpty(versionNumberList)) { try { List list = new ArrayList(); List newVersionList = new ArrayList(); List results = entityManager.createNativeQuery("SELECT v.version_id ," + "v.version_deployment_date, v.version_number FROM version v").getResultList(); - - for (Object[] perTestEntity :results) { + + for (Object[] perTestEntity : results) { VersionEntity versionEntity = new VersionEntity(); versionEntity.setVersionId(BigInteger.valueOf(Long.valueOf( (perTestEntity[0].toString()))).longValue()); @@ -108,25 +105,25 @@ private void loadInitialData() { } InputStream inputStream = null; try { - ClassLoader loader = getClass().getClassLoader(); + ClassLoader loader = getClass().getClassLoader(); PathMatchingResourcePatternResolver resolver = new PathMatchingResourcePatternResolver(loader); Resource[] resources = resolver.getResources("classpath:" + SCRIPT_LOCATION + "/*"); List dbScripts = Arrays.stream(resources) .map(Resource::getFilename) .collect(Collectors.toList()); ArrayList sortedList = new ArrayList(); - for (String scriptFile : dbScripts) { + for (String scriptFile : dbScripts) { String scriptFileName = scriptFile.replaceFirst("[.][^.]+$", ""); String[] scriptNumber = scriptFileName.split("_"); Long scriptVersionNumber = Long.valueOf(scriptNumber[1]); sortedList.add(scriptVersionNumber); } Collections.sort(sortedList); - for (Long scriptFileNumber : sortedList) { + for (Long scriptFileNumber : sortedList) { if (!versionNumberList.isEmpty()) { if (!versionNumberList.contains(scriptFileNumber)) { - inputStream = resourceLoader.getResource("classpath:" + SCRIPT_LOCATION + "/" - + SCRIPT_FILE_PREFIX + scriptFileNumber + SCRIPT_FILE_SUFFIX).getInputStream(); + inputStream = resourceLoader.getResource("classpath:" + SCRIPT_LOCATION + "/" + + SCRIPT_FILE_PREFIX + scriptFileNumber + SCRIPT_FILE_SUFFIX).getInputStream(); if (inputStream != null) { runScript(inputStream); } else { @@ -134,8 +131,8 @@ private void loadInitialData() { } } } else { - inputStream = resourceLoader.getResource("classpath:" + SCRIPT_LOCATION + "/" - + SCRIPT_FILE_PREFIX + scriptFileNumber + SCRIPT_FILE_SUFFIX).getInputStream(); + inputStream = resourceLoader.getResource("classpath:" + SCRIPT_LOCATION + "/" + + SCRIPT_FILE_PREFIX + scriptFileNumber + SCRIPT_FILE_SUFFIX).getInputStream(); if (inputStream != null) { runScript(inputStream); } else { diff --git a/src-gui/src/main/java/org/openkilda/controller/SwitchController.java b/src-gui/src/main/java/org/openkilda/controller/SwitchController.java index bca05945419..9a0a7f77a75 100644 --- a/src-gui/src/main/java/org/openkilda/controller/SwitchController.java +++ b/src-gui/src/main/java/org/openkilda/controller/SwitchController.java @@ -31,6 +31,7 @@ import org.openkilda.model.LinkParametersDto; import org.openkilda.model.LinkProps; import org.openkilda.model.LinkUnderMaintenanceDto; +import org.openkilda.model.SwitchDetail; import org.openkilda.model.SwitchFlowsInfoPerPort; import org.openkilda.model.SwitchInfo; import org.openkilda.model.SwitchLocation; @@ -61,7 +62,6 @@ * The Class SwitchController. * * @author sumitpal.singh - * */ @RestController @@ -88,42 +88,43 @@ public class SwitchController { @RequestMapping(value = "/list") @ResponseStatus(HttpStatus.OK) - @Permissions(values = { IConstants.Permission.MENU_SWITCHES }) - public @ResponseBody List getSwitchesDetail( - @RequestParam(value = "storeConfigurationStatus", required = false) - final boolean storeConfigurationStatus, - @RequestParam(value = "controller", required = false) - final boolean controller) { - return serviceSwitch.getSwitches(storeConfigurationStatus, controller); + @Permissions(values = {IConstants.Permission.MENU_SWITCHES}) + public @ResponseBody List getSwitchInfos( + @RequestParam(value = "storeConfigurationStatus", required = false) final boolean storeConfigurationStatus, + @RequestParam(value = "controller", required = false) final boolean controller) { + return serviceSwitch.getSwitchInfos(storeConfigurationStatus, controller); } /** * Gets the switches detail. * - * @return the switches detail + * @param switchId an optional parameter specifying the ID of the switch for + * which details are to be retrieved. If not provided, details + * for all switches will be retrieved. + * @param controller an optional boolean parameter indicating if the details + * should include information related to the controller. + * @return a {@link List} of {@link SwitchDetail} objects containing the details of the switches. */ - @RequestMapping(value = "/{switchId}") + @RequestMapping(value = "/details", method = RequestMethod.GET) @ResponseStatus(HttpStatus.OK) - @Permissions(values = { IConstants.Permission.MENU_SWITCHES }) - public @ResponseBody SwitchInfo getSwitchDetail(@PathVariable final String switchId, - @RequestParam(value = "controller", required = false) - final boolean controller) { - return serviceSwitch.getSwitch(switchId, controller); + @Permissions(values = {IConstants.Permission.MENU_SWITCHES}) + public @ResponseBody List getSwitchDetails( + @RequestParam(value = "switchId", required = false) final String switchId, + @RequestParam(value = "controller", required = false) final boolean controller) { + return serviceSwitch.getSwitchDetails(switchId, controller); } /** * Save or update switch name. * - * @param switchId the switch id - * @param switchName the switch name * @return the SwitchInfo */ @RequestMapping(value = "/name/{switchId}", method = RequestMethod.PATCH) @ResponseStatus(HttpStatus.OK) @Permissions(values = {IConstants.Permission.SW_SWITCH_UPDATE_NAME}) public @ResponseBody SwitchInfo saveOrUpdateSwitchName(@PathVariable final String switchId, - @RequestBody final String switchName) { + @RequestBody final String switchName) { if (StringUtil.isNullOrEmpty(switchName)) { throw new RequestValidationException(messageUtil.getAttributeNotNull("switch_name")); } @@ -134,15 +135,11 @@ public class SwitchController { /** * Gets the links detail. * - * @param srcSwitch the src switch - * @param srcPort the src port - * @param dstSwitch the dst switch - * @param dstPort the dst port * @return the links detail */ @RequestMapping(value = "/links", method = RequestMethod.GET) @ResponseStatus(HttpStatus.OK) - @Permissions(values = { IConstants.Permission.MENU_ISL }) + @Permissions(values = {IConstants.Permission.MENU_ISL}) public @ResponseBody List getLinksDetail(@RequestParam(value = "src_switch", required = false) final String srcSwitch, @RequestParam(value = "src_port", required = false) final String srcPort, @RequestParam(value = "dst_switch", @@ -154,13 +151,12 @@ public class SwitchController { /** * Delete Isl. * - * @param linkParametersDto - * the link parameters + * @param linkParametersDto the link parameters * @return the IslLinkInfo */ @RequestMapping(value = "/links", method = RequestMethod.DELETE) @ResponseStatus(HttpStatus.OK) - @Permissions(values = { IConstants.Permission.ISL_DELETE_LINK }) + @Permissions(values = {IConstants.Permission.ISL_DELETE_LINK}) public @ResponseBody List deleteIsl(@RequestBody final LinkParametersDto linkParametersDto) { Long userId = null; if (serverContext.getRequestContext() != null) { @@ -188,10 +184,6 @@ public class SwitchController { /** * Gets the link props. * - * @param srcSwitch the src switch - * @param srcPort the src port - * @param dstSwitch the dst switch - * @param dstPort the dst port * @return the link props */ @RequestMapping(path = "/link/props", method = RequestMethod.GET) @@ -199,18 +191,14 @@ public class SwitchController { public @ResponseBody LinkProps getLinkProps( @RequestParam(value = "src_switch", required = true) final String srcSwitch, @RequestParam(value = "src_port", required = true) final String srcPort, @RequestParam( - value = "dst_switch", required = true) final String dstSwitch, @RequestParam( - value = "dst_port", required = true) final String dstPort) { + value = "dst_switch", required = true) final String dstSwitch, @RequestParam( + value = "dst_port", required = true) final String dstPort) { return serviceSwitch.getLinkProps(srcSwitch, srcPort, dstSwitch, dstPort); } /** * Updates the link max bandwidth. * - * @param srcSwitch the src switch - * @param srcPort the src port - * @param dstSwitch the dst switch - * @param dstPort the dst port * @return the link max bandwidth */ @RequestMapping(path = "/link/bandwidth", method = RequestMethod.PATCH) @@ -219,9 +207,9 @@ public class SwitchController { public @ResponseBody LinkMaxBandwidth updateMaxBandwidth( @RequestParam(value = "src_switch", required = true) final String srcSwitch, @RequestParam(value = "src_port", required = true) final String srcPort, @RequestParam( - value = "dst_switch", required = true) final String dstSwitch, @RequestParam( - value = "dst_port", required = true) final String dstPort, - @RequestBody LinkMaxBandwidth linkMaxBandwidth) { + value = "dst_switch", required = true) final String dstSwitch, @RequestParam( + value = "dst_port", required = true) final String dstPort, + @RequestBody LinkMaxBandwidth linkMaxBandwidth) { activityLogger.log(ActivityType.UPDATE_ISL_BANDWIDTH, linkMaxBandwidth.toString()); return serviceSwitch.updateLinkBandwidth(srcSwitch, srcPort, dstSwitch, dstPort, linkMaxBandwidth); } @@ -229,8 +217,7 @@ public class SwitchController { /** * Update isl bfd-flag. * - * @param linkParametersDto - * the link parameters + * @param linkParametersDto the link parameters * @return the IslLinkInfo */ @RequestMapping(path = "/link/enable-bfd", method = RequestMethod.PATCH) @@ -262,7 +249,6 @@ public class SwitchController { /** * Get Switch Rules. * - * @param switchId the switch id * @return the switch rules */ @RequestMapping(path = "/{switchId}/rules", method = RequestMethod.GET) @@ -276,9 +262,6 @@ public class SwitchController { /** * Configure switch port. * - * @param configuration the configuration - * @param switchId the switch id - * @param port the port * @return the configuredPort */ @RequestMapping(path = "/{switchId}/{port}/config", method = RequestMethod.PUT) @@ -294,14 +277,13 @@ public class SwitchController { /** * Gets Port flows. * - * @param switchId the switch id - * @param port the port * @return the customers detail * @throws AccessDeniedException the access denied exception */ @RequestMapping(path = "/{switchId}/flows", method = RequestMethod.GET) @ResponseStatus(HttpStatus.OK) - public @ResponseBody ResponseEntity> getPortFlows(@PathVariable final String switchId, + public @ResponseBody ResponseEntity> getPortFlows( + @PathVariable final String switchId, @RequestParam(value = "port", required = false) final String port, @RequestParam(value = "inventory", required = false) final boolean inventory) throws AccessDeniedException { return serviceSwitch.getPortFlows(switchId, port, inventory); @@ -310,7 +292,6 @@ public class SwitchController { /** * Gets flows by ports. * - * @param switchId the switch id * @param portIds the ports list * @return the customers detail */ @@ -325,10 +306,6 @@ public class SwitchController { /** * Gets Isl flows. * - * @param srcSwitch the source switch - * @param srcPort the source port - * @param dstSwitch the destination switch - * @param dstPort the destination port * @return isl flows exists in the system. */ @RequestMapping(value = "/links/flows", method = RequestMethod.GET) @@ -356,8 +333,6 @@ public class SwitchController { /** * Switch under maintenance. * - * @param switchId the switch id - * @param switchInfo the switch info * @return the SwitchInfo */ @RequestMapping(value = "/under-maintenance/{switchId}", method = RequestMethod.POST) @@ -373,15 +348,14 @@ public class SwitchController { /** * Delete Switch. * - * @param switchId the switch id * @param force the force delete * @return the SwitchInfo */ @RequestMapping(value = "/{switchId}", method = RequestMethod.DELETE) @ResponseStatus(HttpStatus.OK) - @Permissions(values = { IConstants.Permission.SW_SWITCH_DELETE }) + @Permissions(values = {IConstants.Permission.SW_SWITCH_DELETE}) public @ResponseBody SwitchInfo deleteSwitch(@PathVariable final String switchId, - @RequestParam(name = "force", required = false) boolean force) { + @RequestParam(name = "force", required = false) boolean force) { activityLogger.log(ActivityType.DELETE_SWITCH, switchId); return serviceSwitch.deleteSwitch(switchId, force); } @@ -393,8 +367,9 @@ public class SwitchController { */ @RequestMapping(value = "/{switchId}/ports/{port}/properties", method = RequestMethod.PUT) @ResponseStatus(HttpStatus.OK) - @Permissions(values = { IConstants.Permission.SW_UPDATE_PORT_PROPERTIES }) - public @ResponseBody SwitchProperty updateSwitchPortProperty(@PathVariable final String switchId, + @Permissions(values = {IConstants.Permission.SW_UPDATE_PORT_PROPERTIES}) + public @ResponseBody SwitchProperty updateSwitchPortProperty( + @PathVariable final String switchId, @PathVariable final String port, @RequestBody SwitchProperty switchProperty) { activityLogger.log(ActivityType.UPDATE_SW_PORT_PROPERTIES, switchId); return serviceSwitch.updateSwitchPortProperty(switchId, port, switchProperty); @@ -404,12 +379,11 @@ public class SwitchController { * Gets the switch port properties. * * @return the SwitchProperty - * */ @RequestMapping(value = "/{switchId}/ports/{port}/properties", method = RequestMethod.GET) @ResponseStatus(HttpStatus.OK) public @ResponseBody SwitchProperty getSwitchPortProperty(@PathVariable final String switchId, - @PathVariable final String port) { + @PathVariable final String port) { return serviceSwitch.getSwitchPortProperty(switchId, port); } @@ -417,15 +391,13 @@ public class SwitchController { /** * Updates switch location. * - * @param switchId the switch id - * @param switchLocation the switch location * @return the SwitchInfo */ @RequestMapping(path = "/location/{switchId}", method = RequestMethod.PATCH) @ResponseStatus(HttpStatus.OK) @Permissions(values = {IConstants.Permission.SW_SWITCH_LOCATION_UPDATE}) public @ResponseBody SwitchInfo updateSwitchLocation(@PathVariable final String switchId, - @RequestBody final SwitchLocation switchLocation) { + @RequestBody final SwitchLocation switchLocation) { activityLogger.log(ActivityType.UPDATE_SWITCH_LOCATION, switchId); return serviceSwitch.updateSwitchLocation(switchId, switchLocation); } @@ -433,10 +405,6 @@ public class SwitchController { /** * Gets the link BFD properties. * - * @param srcSwitch the src switch - * @param srcPort the src port - * @param dstSwitch the dst switch - * @param dstPort the dst port * @return the link Bfd properties */ @RequestMapping(value = "/links/bfd", method = RequestMethod.GET) @@ -452,10 +420,6 @@ public class SwitchController { /** * Updates the link BFD properties. * - * @param srcSwitch the src switch - * @param srcPort the src port - * @param dstSwitch the dst switch - * @param dstPort the dst port * @return the link Bfd properties */ @RequestMapping(value = "/links/bfd", method = RequestMethod.PUT) @@ -474,11 +438,6 @@ public class SwitchController { /** * Delete link BFD. - * - * @param srcSwitch the src switch - * @param srcPort the src port - * @param dstSwitch the dst switch - * @param dstPort the dst port */ @RequestMapping(value = "/links/bfd", method = RequestMethod.DELETE) @ResponseStatus(HttpStatus.OK) @@ -492,35 +451,35 @@ public class SwitchController { + srcPort + "\nDst_SW_" + dstSwitch + "\nDst_PORT_" + dstPort); return serviceSwitch.deleteLinkBfd(srcSwitch, srcPort, dstSwitch, dstPort); } - + /** * Creates switch logical port. * - * @param switchId the switch id * @param switchLogicalPort the switch logical port * @return the SwitchLogicalPort */ @RequestMapping(value = "/{switch_id}/lags", method = RequestMethod.POST) @ResponseStatus(HttpStatus.OK) @Permissions(values = IConstants.Permission.SW_CREATE_LOGICAL_PORT) - public @ResponseBody SwitchLogicalPort createLogicalPort(@PathVariable("switch_id") final String switchId, + public @ResponseBody SwitchLogicalPort createLogicalPort( + @PathVariable("switch_id") final String switchId, @RequestBody(required = true) SwitchLogicalPort switchLogicalPort) { activityLogger.log(ActivityType.CREATE_LOGICAL_PORT, "SW_" + switchId + ", " + "PORT_" + switchLogicalPort.getPortNumbers()); return serviceSwitch.createLogicalPort(switchId, switchLogicalPort); } - + /** * Deletes switch logical port. * - * @param switchId the switch id * @param logicalPortNumber the switch logical port * @return the SwitchLogicalPort */ @RequestMapping(value = "/{switch_id}/lags/{logical_port_number}", method = RequestMethod.DELETE) @ResponseStatus(HttpStatus.OK) @Permissions(values = IConstants.Permission.SW_DELETE_LOGICAL_PORT) - public @ResponseBody SwitchLogicalPort deleteLogicalPort(@PathVariable("switch_id") final String switchId, + public @ResponseBody SwitchLogicalPort deleteLogicalPort( + @PathVariable("switch_id") final String switchId, @PathVariable("logical_port_number") final String logicalPortNumber) { activityLogger.log(ActivityType.DELETE_LOGICAL_PORT, "SW_" + switchId + ", " + "L-PORT_" + logicalPortNumber); diff --git a/src-gui/src/main/java/org/openkilda/integration/service/SwitchIntegrationService.java b/src-gui/src/main/java/org/openkilda/integration/service/SwitchIntegrationService.java index 622214ab20c..5ba468a1f59 100644 --- a/src-gui/src/main/java/org/openkilda/integration/service/SwitchIntegrationService.java +++ b/src-gui/src/main/java/org/openkilda/integration/service/SwitchIntegrationService.java @@ -60,6 +60,7 @@ import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpMethod; +import org.springframework.http.HttpStatus; import org.springframework.stereotype.Service; import org.springframework.util.StringUtils; import org.springframework.web.util.UriComponentsBuilder; @@ -135,6 +136,9 @@ public SwitchInfo getSwitchesById(final String switchId) { applicationProperties.getNbBaseUrl() + IConstants.NorthBoundUrl.GET_SWITCH.replace("{switch_id}", switchId), HttpMethod.GET, "", "", applicationService.getAuthHeader()); + if (response.getStatusLine().getStatusCode() == HttpStatus.NOT_FOUND.value()) { + return null; + } if (RestClientManager.isValidResponse(response)) { SwitchInfo switchInfo = restClientManager.getResponse(response, SwitchInfo.class); if (switchInfo != null) { @@ -879,11 +883,11 @@ public String deleteLinkBfd(String srcSwitch, String srcPort, String dstSwitch, } return null; } - + /** * Creates switch logical port. * - * @param switchId the switch id + * @param switchId the switch id * @param switchLogicalPort the switch logical port * @return the SwitchLogicalPort */ @@ -891,8 +895,8 @@ public SwitchLogicalPort createLogicalPort(String switchId, SwitchLogicalPort sw try { HttpResponse response = restClientManager.invoke( applicationProperties.getNbBaseUrl() + IConstants.NorthBoundUrl - .SWITCH_LOGICAL_PORT.replace("{switch_id}", switchId), - HttpMethod.POST, objectMapper.writeValueAsString(switchLogicalPort), "application/json", + .SWITCH_LOGICAL_PORT.replace("{switch_id}", switchId), + HttpMethod.POST, objectMapper.writeValueAsString(switchLogicalPort), "application/json", applicationService.getAuthHeader()); if (RestClientManager.isValidResponse(response)) { return restClientManager.getResponse(response, SwitchLogicalPort.class); @@ -906,11 +910,11 @@ public SwitchLogicalPort createLogicalPort(String switchId, SwitchLogicalPort sw } return null; } - + /** * Deletes switch logical port. * - * @param switchId the switch id + * @param switchId the switch id * @param logicalPortNumber the switch logical port number * @return the SwitchLogicalPort */ @@ -918,9 +922,9 @@ public SwitchLogicalPort deleteLogicalPort(String switchId, String logicalPortNu try { HttpResponse response = restClientManager.invoke( applicationProperties.getNbBaseUrl() + IConstants.NorthBoundUrl - .DELETE_SWITCH_LOGICAL_PORT.replace("{switch_id}", switchId) - .replace("{logical_port_number}", logicalPortNumber), - HttpMethod.DELETE, "", "application/json", + .DELETE_SWITCH_LOGICAL_PORT.replace("{switch_id}", switchId) + .replace("{logical_port_number}", logicalPortNumber), + HttpMethod.DELETE, "", "application/json", applicationService.getAuthHeader()); if (RestClientManager.isValidResponse(response)) { return restClientManager.getResponse(response, SwitchLogicalPort.class); @@ -931,7 +935,7 @@ public SwitchLogicalPort deleteLogicalPort(String switchId, String logicalPortNu } return null; } - + /** * Gets switch logical ports. * @@ -942,8 +946,8 @@ public List getLogicalPort(String switchId) { try { HttpResponse response = restClientManager.invoke( applicationProperties.getNbBaseUrl() + IConstants.NorthBoundUrl - .SWITCH_LOGICAL_PORT.replace("{switch_id}", switchId), - HttpMethod.GET, "", "application/json", + .SWITCH_LOGICAL_PORT.replace("{switch_id}", switchId), + HttpMethod.GET, "", "application/json", applicationService.getAuthHeader()); if (RestClientManager.isValidResponse(response)) { return restClientManager.getResponseList(response, SwitchLogicalPort.class); diff --git a/src-gui/src/main/java/org/openkilda/integration/source/store/SwitchStoreService.java b/src-gui/src/main/java/org/openkilda/integration/source/store/SwitchInventoryService.java similarity index 98% rename from src-gui/src/main/java/org/openkilda/integration/source/store/SwitchStoreService.java rename to src-gui/src/main/java/org/openkilda/integration/source/store/SwitchInventoryService.java index c78fa32570a..535561668c6 100644 --- a/src-gui/src/main/java/org/openkilda/integration/source/store/SwitchStoreService.java +++ b/src-gui/src/main/java/org/openkilda/integration/source/store/SwitchInventoryService.java @@ -39,7 +39,7 @@ import java.util.Map; @Service -public class SwitchStoreService { +public class SwitchInventoryService { private static final Logger LOGGER = LoggerFactory.getLogger(FlowStoreService.class); @@ -65,7 +65,7 @@ public List getSwitches() { throw new StoreIntegrationException(e); } } - + /** * Gets the customer flows. * @@ -88,7 +88,7 @@ public List getPortFlows(String switchId, String port) { throw new StoreIntegrationException(e); } } - + /** * Gets the switch port. * @@ -99,12 +99,12 @@ public List getSwitchPort(final String switchId) { try { UrlDto urlDto = storeService.getUrl(StoreType.SWITCH_STORE, Url.GET_SWITCH_PORTS); AuthConfigDto authDto = authService.getAuth(StoreType.SWITCH_STORE); - + Map params = new HashMap(); params.put(RequestParams.SWITCH_ID.getName(), switchId); urlDto.setParams(params); - + IAuthService authService = IAuthService.getService(authDto.getAuthType()); return authService.getResponseList(urlDto, authDto, Port.class); } catch (Exception e) { @@ -112,8 +112,8 @@ public List getSwitchPort(final String switchId) { throw new StoreIntegrationException(e); } } - - + + /** * Gets the switch with params. * @@ -122,7 +122,7 @@ public List getSwitchPort(final String switchId) { public InventorySwitch getSwitch(String switchId) { try { UrlDto urlDto = storeService.getUrl(StoreType.SWITCH_STORE, Url.GET_SWITCH); - + Map params = new HashMap(); params.put(RequestParams.SWITCH_ID.getName(), switchId); diff --git a/src-gui/src/main/java/org/openkilda/integration/source/store/dto/InventorySwitch.java b/src-gui/src/main/java/org/openkilda/integration/source/store/dto/InventorySwitch.java index d2d7e0db820..38daf49b678 100644 --- a/src-gui/src/main/java/org/openkilda/integration/source/store/dto/InventorySwitch.java +++ b/src-gui/src/main/java/org/openkilda/integration/source/store/dto/InventorySwitch.java @@ -17,66 +17,37 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; -import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; - +import com.fasterxml.jackson.databind.PropertyNamingStrategies; +import com.fasterxml.jackson.databind.annotation.JsonNaming; import lombok.Data; import java.io.Serializable; @JsonInclude(JsonInclude.Include.NON_NULL) +@JsonNaming(PropertyNamingStrategies.KebabCaseStrategy.class) @JsonIgnoreProperties(ignoreUnknown = true) -@JsonPropertyOrder({ "uuid", "switch-id", "description", "state", "name", "common-name", "pop-location", "model", +@JsonPropertyOrder({"uuid", "switch-id", "description", "name", "common-name", "pop-location", "model", "status", "rack-location", "reference-url", "serial-number", "rack-number", "software-version", - "manufacturer" }) + "manufacturer"}) @Data public class InventorySwitch implements Serializable { private static final long serialVersionUID = 8314830507932457367L; - @JsonProperty("uuid") private String uuid; - - @JsonProperty("switch-id") private String switchId; - - @JsonProperty("description") private String description; - - @JsonProperty("state") - private String state; - - @JsonProperty("name") private String name; - - @JsonProperty("common-name") private String commonName; - - @JsonProperty("pop-location") private PopLocation popLocation; - - @JsonProperty("model") private String model; - - @JsonProperty("status") private String status; - - @JsonProperty("rack-location") private String rackLocation; - - @JsonProperty("reference-url") private String referenceUrl; - - @JsonProperty("serial-number") private String serialNumber; - - @JsonProperty("rack-number") private String rackNumber; - - @JsonProperty("software-version") private String softwareVersion; - - @JsonProperty("manufacturer") private String manufacturer; - + private boolean hasDuplicate; // the value for this field is dynamically generated } diff --git a/src-gui/src/main/java/org/openkilda/model/SwitchDetail.java b/src-gui/src/main/java/org/openkilda/model/SwitchDetail.java new file mode 100644 index 00000000000..a7d7040f636 --- /dev/null +++ b/src-gui/src/main/java/org/openkilda/model/SwitchDetail.java @@ -0,0 +1,54 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.model; + +import org.openkilda.integration.source.store.dto.InventorySwitch; + +import com.fasterxml.jackson.annotation.JsonIgnoreProperties; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.PropertyNamingStrategies; +import com.fasterxml.jackson.databind.annotation.JsonNaming; +import lombok.Builder; +import lombok.Data; + +import java.io.Serializable; + +@JsonInclude(JsonInclude.Include.NON_NULL) +@JsonNaming(PropertyNamingStrategies.SnakeCaseStrategy.class) +@JsonIgnoreProperties(ignoreUnknown = true) +@Builder +@Data +public class SwitchDetail implements Serializable { + + private static final long serialVersionUID = 6763064864461521069L; + private String switchId; + private String name; + private String address; + private String port; + private String hostname; + private String description; + private String state; + private boolean underMaintenance; + private String ofVersion; + private String manufacturer; + private String hardware; + private String software; + private String serialNumber; + private String pop; + private Location location; + private InventorySwitch inventorySwitchDetail; + +} diff --git a/src-gui/src/main/java/org/openkilda/service/StatsService.java b/src-gui/src/main/java/org/openkilda/service/StatsService.java index b09b7cb6225..50151b20359 100644 --- a/src-gui/src/main/java/org/openkilda/service/StatsService.java +++ b/src-gui/src/main/java/org/openkilda/service/StatsService.java @@ -32,7 +32,7 @@ import org.openkilda.integration.model.response.IslPath; import org.openkilda.integration.service.StatsIntegrationService; import org.openkilda.integration.service.SwitchIntegrationService; -import org.openkilda.integration.source.store.SwitchStoreService; +import org.openkilda.integration.source.store.SwitchInventoryService; import org.openkilda.integration.source.store.dto.Port; import org.openkilda.model.PortDiscrepancy; import org.openkilda.model.PortInfo; @@ -78,7 +78,7 @@ public class StatsService { private final StoreService storeService; - private final SwitchStoreService switchStoreService; + private final SwitchInventoryService switchInventoryService; private final ApplicationProperties appProps; @@ -86,12 +86,12 @@ public class StatsService { public StatsService(StatsIntegrationService statsIntegrationService, SwitchIntegrationService switchIntegrationService, StoreService storeService, - SwitchStoreService switchStoreService, + SwitchInventoryService switchInventoryService, ApplicationProperties appProps) { this.statsIntegrationService = statsIntegrationService; this.switchIntegrationService = switchIntegrationService; this.storeService = storeService; - this.switchStoreService = switchStoreService; + this.switchInventoryService = switchInventoryService; this.appProps = appProps; } @@ -208,7 +208,7 @@ public List getSwitchPortsStats(VictoriaStatsReq statsReq) throws Inva List portStats = getSwitchPortStatsReport(victoriaDataList, switchId); if (!storeService.getSwitchStoreConfig().getUrls().isEmpty()) { try { - List inventoryPorts = switchStoreService + List inventoryPorts = switchInventoryService .getSwitchPort(IoUtil.switchCodeToSwitchId(switchId)); processInventoryPorts(portStats, inventoryPorts); } catch (Exception ex) { diff --git a/src-gui/src/main/java/org/openkilda/service/SwitchService.java b/src-gui/src/main/java/org/openkilda/service/SwitchService.java index 5603b7e247e..f476086637a 100644 --- a/src-gui/src/main/java/org/openkilda/service/SwitchService.java +++ b/src-gui/src/main/java/org/openkilda/service/SwitchService.java @@ -1,4 +1,4 @@ -/* Copyright 2018 Telstra Open Source +/* Copyright 2024 Telstra Open Source * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -26,7 +26,7 @@ import org.openkilda.integration.model.PortConfiguration; import org.openkilda.integration.model.response.ConfiguredPort; import org.openkilda.integration.service.SwitchIntegrationService; -import org.openkilda.integration.source.store.SwitchStoreService; +import org.openkilda.integration.source.store.SwitchInventoryService; import org.openkilda.integration.source.store.dto.InventorySwitch; import org.openkilda.model.BfdProperties; import org.openkilda.model.FlowInfo; @@ -37,6 +37,7 @@ import org.openkilda.model.LinkProps; import org.openkilda.model.LinkUnderMaintenanceDto; import org.openkilda.model.PopLocation; +import org.openkilda.model.SwitchDetail; import org.openkilda.model.SwitchDiscrepancy; import org.openkilda.model.SwitchFlowsInfoPerPort; import org.openkilda.model.SwitchInfo; @@ -49,6 +50,9 @@ import org.openkilda.store.service.StoreService; import org.openkilda.utility.StringUtil; +import org.apache.commons.collections4.CollectionUtils; +import org.apache.commons.collections4.MapUtils; +import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.HttpStatus; @@ -60,8 +64,15 @@ import java.nio.file.AccessDeniedException; import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.Date; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.stream.Collectors; /** * The Class ServiceSwitchImpl. @@ -77,7 +88,7 @@ public class SwitchService { private SwitchIntegrationService switchIntegrationService; @Autowired - private SwitchStoreService switchStoreService; + private SwitchInventoryService switchInventoryService; @Autowired private UserService userService; @@ -91,13 +102,40 @@ public class SwitchService { @Autowired private ApplicationSettingService applicationSettingService; + + /** + * Retrieves the details of switches based on the provided switch ID and controller flag. + * + *

This method first fetches switch information from the controller. If {@code switchId} is blank, + * it retrieves all switches from the controller. If {@code switchId} is provided, it fetches the + * specific switch by its ID. Depending on the {@code controller} flag, it either returns the controller + * switch details or continues to fetch switch inventory information. If the logged-in user has the + * necessary permissions, it retrieves inventory switch details from the store.

+ * + * @param switchId The ID of the switch to retrieve details for. If blank, retrieves all switches. + * @param controller A flag indicating whether to return only controller switch details. + * @return A list of {@link SwitchDetail} objects containing the switch details. + * @throws IntegrationException if an error occurs while fetching switch details from the controller. + */ + public List getSwitchDetails(final String switchId, boolean controller) + throws IntegrationException { + List controllerSwitches = getControllerSwitches(switchId); + + if (controller) { + return adaptToSwitchDetailsAndGet(controllerSwitches, null); + } + + List inventorySwitches = getInventorySwitches(switchId); + return adaptToSwitchDetailsAndGet(controllerSwitches, inventorySwitches); + } + /** * get All SwitchList. * * @return SwitchRelationData the switch info * @throws IntegrationException the integration exception */ - public List getSwitches(boolean storeConfigurationStatus, boolean controller) + public List getSwitchInfos(boolean storeConfigurationStatus, boolean controller) throws IntegrationException { List switchInfo = switchIntegrationService.getSwitches(); if (switchInfo == null) { @@ -108,8 +146,7 @@ public List getSwitches(boolean storeConfigurationStatus, boolean co UserInfo userInfo = userService.getLoggedInUserInfo(); if (userInfo.getPermissions().contains(IConstants.Permission.SW_SWITCH_INVENTORY)) { if (storeConfigurationStatus && storeService.getSwitchStoreConfig().getUrls().size() > 0) { - List inventorySwitches = new ArrayList(); - inventorySwitches = switchStoreService.getSwitches(); + List inventorySwitches = switchInventoryService.getSwitches(); processInventorySwitch(switchInfo, inventorySwitches); } } @@ -120,72 +157,111 @@ public List getSwitches(boolean storeConfigurationStatus, boolean co return switchInfo; } - /** - * get All SwitchList. - * - * @return SwitchRelationData the switch info - * @throws IntegrationException the integration exception - */ - public SwitchInfo getSwitch(final String switchId, boolean controller) throws IntegrationException { - SwitchInfo switchInfo = null; - try { - switchInfo = switchIntegrationService.getSwitchesById(switchId); - } catch (InvalidResponseException ex) { - LOGGER.error("Error occurred while retrieving switches from controller", ex); - } - if (!controller) { + private List getControllerSwitches(String switchId) { + List controllerSwitches; + //get switch controller info + if (StringUtils.isBlank(switchId)) { + controllerSwitches = Optional.ofNullable(switchIntegrationService.getSwitches()).orElse(new ArrayList<>()); + } else { + SwitchInfo sw = null; try { - UserInfo userInfo = userService.getLoggedInUserInfo(); - if (userInfo.getPermissions().contains(IConstants.Permission.SW_SWITCH_INVENTORY)) { - if (storeService.getSwitchStoreConfig().getUrls().size() > 0) { - InventorySwitch inventorySwitch = switchStoreService.getSwitch(switchId); - if (inventorySwitch.getSwitchId() != null) { - switchInfo = processInventorySwitch(switchInfo, inventorySwitch); - } else { - SwitchDiscrepancy discrepancy = new SwitchDiscrepancy(); - discrepancy.setControllerDiscrepancy(false); - discrepancy.setStatus(true); - discrepancy.setInventoryDiscrepancy(true); - - SwitchStatus switchState = new SwitchStatus(); - switchState.setControllerStatus(switchInfo.getState()); - discrepancy.setStatusValue(switchState); - switchInfo.setDiscrepancy(discrepancy); - } + sw = switchIntegrationService.getSwitchesById(switchId); + } catch (InvalidResponseException e) { + LOGGER.error("Error occurred while retrieving switches from controller", e); + } + controllerSwitches = sw == null ? new ArrayList<>() : Collections.singletonList(sw); + } + return controllerSwitches; + } + + private List getInventorySwitches(String switchId) { + List inventorySwitches = null; + try { + UserInfo userInfo = userService.getLoggedInUserInfo(); + if (userInfo.getPermissions().contains(IConstants.Permission.SW_SWITCH_INVENTORY) + && MapUtils.isNotEmpty(storeService.getSwitchStoreConfig().getUrls())) { + if (switchId == null) { + inventorySwitches = switchInventoryService.getSwitches(); + } else { + InventorySwitch inventorySwitch = switchInventoryService.getSwitch(switchId); + if (inventorySwitch != null && inventorySwitch.getSwitchId() != null) { + inventorySwitches = Collections.singletonList(inventorySwitch); + } else { + inventorySwitches = Collections.emptyList(); } } - } catch (Exception ex) { - LOGGER.error("Error occurred while retrieving switches from store", ex); - throw new StoreIntegrationException("Error occurred while retrieving switches from store"); } + } catch (AccessDeniedException | StoreIntegrationException e) { + LOGGER.error("Error occurred while retrieving switches from store", e); + inventorySwitches = Collections.emptyList(); } - return switchInfo; + return inventorySwitches; } - private SwitchInfo processInventorySwitch(SwitchInfo switchInfo, final InventorySwitch inventorySwitch) { - if (switchInfo == null) { - switchInfo = new SwitchInfo(); - toSwitchInfo(switchInfo, inventorySwitch); - } else { - appendInventoryInfo(switchInfo, inventorySwitch); - SwitchDiscrepancy discrepancy = new SwitchDiscrepancy(); - discrepancy.setControllerDiscrepancy(false); - if (!((switchInfo.getState()).equalsIgnoreCase(inventorySwitch.getStatus()))) { - discrepancy.setStatus(true); - discrepancy.setInventoryDiscrepancy(true); - - SwitchStatus switchState = new SwitchStatus(); - switchState.setControllerStatus(switchInfo.getState()); - switchState.setInventoryStatus(inventorySwitch.getStatus()); - discrepancy.setStatusValue(switchState); + private List adaptToSwitchDetailsAndGet(List controllerSwitches, + List inventorySwitches) { + //inventory switches could contain switches with the same switchId. + final Map> switchIdInUpperCaseToInventorySwitch = new HashMap<>(); + + if (CollectionUtils.isNotEmpty(inventorySwitches)) { + inventorySwitches = inventorySwitches.stream().filter(Objects::nonNull).collect(Collectors.toList()); + inventorySwitches.forEach(inventorySw -> { + String switchIdUpperCase = inventorySw.getSwitchId().toUpperCase(); + if (switchIdInUpperCaseToInventorySwitch.containsKey(switchIdUpperCase)) { + inventorySw.setHasDuplicate(true); + switchIdInUpperCaseToInventorySwitch.get(switchIdUpperCase).get(0).setHasDuplicate(true); + switchIdInUpperCaseToInventorySwitch.get(switchIdUpperCase).add(inventorySw); + } else { + List list = new ArrayList<>(); + list.add(inventorySw); + switchIdInUpperCaseToInventorySwitch.put(switchIdUpperCase, list); + } + }); + } - switchInfo.setDiscrepancy(discrepancy); + List switchDetailsResult; + + switchDetailsResult = controllerSwitches.stream().map(contrlSw -> { + String switchIdUpperCase = contrlSw.getSwitchId().toUpperCase(); + SwitchDetail.SwitchDetailBuilder swDetailBuilder = SwitchDetail.builder() + .switchId(contrlSw.getSwitchId()) + .name(contrlSw.getName()) + .address(contrlSw.getAddress()) + .port(contrlSw.getPort()) + .hostname(contrlSw.getHostname()) + .description(contrlSw.getDescription()) + .state(contrlSw.getState()) + .underMaintenance(contrlSw.isUnderMaintenance()) + .ofVersion(contrlSw.getOfVersion()) + .manufacturer(contrlSw.getManufacturer()) + .hardware(contrlSw.getHardware()) + .software(contrlSw.getSoftware()) + .serialNumber(contrlSw.getSerialNumber()) + .pop(contrlSw.getPop()) + .location(contrlSw.getLocation()); + //add inventory info if exists, null otherwise + List invSwitches = switchIdInUpperCaseToInventorySwitch.get(switchIdUpperCase); + if (CollectionUtils.isNotEmpty(invSwitches)) { + swDetailBuilder.inventorySwitchDetail(invSwitches.remove(invSwitches.size() - 1)); + if (invSwitches.isEmpty()) { + switchIdInUpperCaseToInventorySwitch.remove(switchIdUpperCase); + } } - switchInfo.setInventorySwitch(true); + return swDetailBuilder.build(); + }).collect(Collectors.toList()); + + //add inventory switches that does not exist in controller list. + if (!switchIdInUpperCaseToInventorySwitch.isEmpty()) { + switchDetailsResult.addAll(switchIdInUpperCaseToInventorySwitch.values() + .stream().flatMap(Collection::stream) + .map(inventorySw -> + SwitchDetail.builder().inventorySwitchDetail(inventorySw).build()) + .collect(Collectors.toList())); } - return switchInfo; + return switchDetailsResult; } + /** * Process inventory switch. * @@ -416,7 +492,7 @@ public ResponseEntity> getPortFlows(String switchId, String port, boolea List customers = new ArrayList(); if (storeService.getSwitchStoreConfig().getUrls().size() > 0) { try { - customers = switchStoreService.getPortFlows(switchId, port); + customers = switchInventoryService.getPortFlows(switchId, port); } catch (Exception ex) { LOGGER.warn("Error occured while retreiving port flows.", ex); throw new StoreIntegrationException("Error occured while retreiving port flows.", ex); @@ -526,8 +602,7 @@ public List updateLinkMaintenanceStatus(LinkUnderMaintenanceDto lin */ public List deleteLink(LinkParametersDto linkParametersDto, Long userId) { if (userService.validateOtp(userId, linkParametersDto.getCode())) { - List status = switchIntegrationService.deleteLink(linkParametersDto); - return status; + return switchIntegrationService.deleteLink(linkParametersDto); } else { return null; } @@ -545,9 +620,8 @@ public List deleteLink(LinkParametersDto linkParametersDto, Long us */ public LinkMaxBandwidth updateLinkBandwidth(String srcSwitch, String srcPort, String dstSwitch, String dstPort, LinkMaxBandwidth linkMaxBandwidth) { - LinkMaxBandwidth linkBandwidthUpdate = switchIntegrationService + return switchIntegrationService .updateLinkBandwidth(srcSwitch, srcPort, dstSwitch, dstPort, linkMaxBandwidth); - return linkBandwidthUpdate; } /** @@ -557,8 +631,7 @@ public LinkMaxBandwidth updateLinkBandwidth(String srcSwitch, String srcPort, St * @return the SwitchInfo */ public SwitchInfo deleteSwitch(String switchId, boolean force) { - SwitchInfo switchInfo = switchIntegrationService.deleteSwitch(switchId, force); - return switchInfo; + return switchIntegrationService.deleteSwitch(switchId, force); } @@ -569,8 +642,7 @@ public SwitchInfo deleteSwitch(String switchId, boolean force) { * @return the IslLinkInfo */ public List updateLinkBfdFlag(LinkParametersDto linkParametersDto) { - List islLinkInfo = switchIntegrationService.updateIslBfdFlag(linkParametersDto); - return islLinkInfo; + return switchIntegrationService.updateIslBfdFlag(linkParametersDto); } /** @@ -582,9 +654,8 @@ public List updateLinkBfdFlag(LinkParametersDto linkParametersDto) * @return the SwitchProperty */ public SwitchProperty updateSwitchPortProperty(String switchId, String port, SwitchProperty switchProperty) { - SwitchProperty switchPropertyInfo = switchIntegrationService + return switchIntegrationService .updateSwitchPortProperty(switchId, port, switchProperty); - return switchPropertyInfo; } /** @@ -595,15 +666,15 @@ public SwitchProperty updateSwitchPortProperty(String switchId, String port, Swi * @return the SwitchProperty */ public SwitchProperty getSwitchPortProperty(String switchId, String port) { - SwitchProperty switchProperty = switchIntegrationService.getSwitchPortProperty(switchId, port); - return switchProperty; + return switchIntegrationService.getSwitchPortProperty(switchId, port); } public SwitchInfo updateSwitchLocation(String switchId, SwitchLocation switchLocation) { return switchIntegrationService.updateSwitchLocation(switchId, switchLocation); } - public LinkBfdProperties getLinkBfdProperties(String srcSwitch, String srcPort, String dstSwitch, String dstPort) { + public LinkBfdProperties getLinkBfdProperties(String srcSwitch, String srcPort, String dstSwitch, String + dstPort) { return switchIntegrationService.getLinkBfdProperties(srcSwitch, srcPort, dstSwitch, dstPort); } @@ -615,13 +686,12 @@ public LinkBfdProperties updateLinkBfdProperties(String srcSwitch, String srcPor public String deleteLinkBfd(String srcSwitch, String srcPort, String dstSwitch, String dstPort) { return switchIntegrationService.deleteLinkBfd(srcSwitch, srcPort, dstSwitch, dstPort); } - + public SwitchLogicalPort createLogicalPort(String switchId, SwitchLogicalPort switchLogicalPort) { return switchIntegrationService.createLogicalPort(switchId, switchLogicalPort); } - + public SwitchLogicalPort deleteLogicalPort(String switchId, String logicalPortNumber) { return switchIntegrationService.deleteLogicalPort(switchId, logicalPortNumber); } - } diff --git a/src-gui/src/main/java/org/openkilda/store/common/helper/PrepareRequest.java b/src-gui/src/main/java/org/openkilda/store/common/helper/PrepareRequest.java index 502ee288d22..ddac915ee23 100644 --- a/src-gui/src/main/java/org/openkilda/store/common/helper/PrepareRequest.java +++ b/src-gui/src/main/java/org/openkilda/store/common/helper/PrepareRequest.java @@ -1,4 +1,4 @@ -/* Copyright 2018 Telstra Open Source +/* Copyright 2024 Telstra Open Source * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/src-gui/src/main/resources/application.properties.example b/src-gui/src/main/resources/application.properties.example index eb0298649a1..a2883db0595 100644 --- a/src-gui/src/main/resources/application.properties.example +++ b/src-gui/src/main/resources/application.properties.example @@ -59,9 +59,11 @@ spring.velocity.enabled = false log.duration=7 error.code.prefix=100 -spring.mvc.view.prefix= /views/ +spring.mvc.view.prefix= /META-INF/jsp/ spring.mvc.view.suffix= .jsp +spring.resources.static-locations=file:src/main/webapp/ + status.cron.time=43200000 #Currently working for 2FA app name diff --git a/src-gui/src/main/webapp/META-INF/jsp/twofa.jsp b/src-gui/src/main/webapp/META-INF/jsp/twofa.jsp index 3d115517621..929d9b628c9 100644 --- a/src-gui/src/main/webapp/META-INF/jsp/twofa.jsp +++ b/src-gui/src/main/webapp/META-INF/jsp/twofa.jsp @@ -6,6 +6,7 @@ OPEN KILDA + @@ -37,7 +38,7 @@

Scan below code with the authenticator app on your mobile device and follow instructions to verify your identity.

- QrCode +
@@ -78,10 +79,14 @@ var username= $('#uname').text(); var appName = $('#appName').text(); $('#qr_code_text').text(key); - $('#qrCode').attr('src', 'https://chart.googleapis.com/chart?chs=200x200&cht=qr&chl=200x200&chld=M|0&cht=qr&' - + 'chl=' + encodeURIComponent('otpauth://totp/' + encodeURIComponent(appName) + ":" + username + '?secret=' + key + '&issuer=' + encodeURIComponent(appName))); - }); + new QRCode(document.getElementById("QrCode"), { + text: 'otpauth://totp/' + encodeURIComponent(appName) + ":" + username + '?secret=' + key + '&issuer=' + encodeURIComponent(appName), + width: 200, + height: 200 + }); + }); + function focusNextInput(){ var container = document.getElementsByClassName("otp-container")[0]; var input = container.getElementsByTagName('input')[0]; diff --git a/src-gui/src/test/java/org/openkilda/controller/SwitchControllerTest.java b/src-gui/src/test/java/org/openkilda/controller/SwitchControllerTest.java index 7da76c8b580..9407087a6c8 100644 --- a/src-gui/src/test/java/org/openkilda/controller/SwitchControllerTest.java +++ b/src-gui/src/test/java/org/openkilda/controller/SwitchControllerTest.java @@ -1,9 +1,9 @@ -/* Copyright 2018 Telstra Open Source +/* Copyright 2024 Telstra Open Source * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * + * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software @@ -17,11 +17,13 @@ import static org.junit.Assert.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.when; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import org.openkilda.auth.context.ServerContext; @@ -30,6 +32,7 @@ import org.openkilda.model.IslLinkInfo; import org.openkilda.model.LinkParametersDto; import org.openkilda.model.LinkUnderMaintenanceDto; +import org.openkilda.model.SwitchDetail; import org.openkilda.model.SwitchInfo; import org.openkilda.model.SwitchProperty; import org.openkilda.service.SwitchService; @@ -40,9 +43,8 @@ import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.ObjectMapper; - -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.InjectMocks; import org.mockito.Mock; @@ -69,7 +71,7 @@ public class SwitchControllerTest { @Mock private SwitchService serviceSwitch; - + @Mock private ServerContext serverContext; @@ -82,9 +84,9 @@ public class SwitchControllerTest { @SuppressWarnings("unused") private static final String switchUuid = "00:00:00:00:00:00:00:01"; - @Before + @BeforeEach public void init() { - MockitoAnnotations.initMocks(this); + MockitoAnnotations.openMocks(this); mockMvc = MockMvcBuilders.standaloneSetup(switchController).build(); RequestContext requestContext = new RequestContext(); requestContext.setUserId(TestIslMock.USER_ID); @@ -95,15 +97,16 @@ public void init() { public void testGetAllSwitchesDetails() { List switchesInfo = new ArrayList<>(); try { - when(serviceSwitch.getSwitches(false, TestFlowMock.CONTROLLER_FLAG)).thenReturn(switchesInfo); + when(serviceSwitch.getSwitchInfos(false, TestFlowMock.CONTROLLER_FLAG)) + .thenReturn(switchesInfo); mockMvc.perform(get("/api/switch/list").contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()); assertTrue(true); } catch (Exception exception) { - assertTrue(false); + fail(); } } - + @Test public void testGetAllSwitchFlows() { ResponseEntity> responseList = new ResponseEntity>(HttpStatus.OK); @@ -114,16 +117,32 @@ public void testGetAllSwitchFlows() { .contentType(MediaType.APPLICATION_JSON)).andExpect(status().isOk()); assertTrue(true); } catch (Exception e) { - assertTrue(false); + fail(); } } - + @Test - public void testGetSwitchById() throws Exception { - SwitchInfo switchInfo = new SwitchInfo(); - when(serviceSwitch.getSwitch(TestSwitchMock.SWITCH_ID, TestFlowMock.CONTROLLER_FLAG)).thenReturn(switchInfo); - mockMvc.perform(get("/api/switch/{switchId}", TestSwitchMock.SWITCH_ID).contentType(MediaType.APPLICATION_JSON)) - .andExpect(status().isOk()); + public void getSwitchDetails() throws Exception { + List switchDetails = new ArrayList<>(); + switchDetails.add(SwitchDetail.builder().build()); + when(serviceSwitch.getSwitchDetails(TestSwitchMock.SWITCH_ID, TestFlowMock.CONTROLLER_FLAG)) + .thenReturn(switchDetails); + mockMvc.perform(get("/api/switch/details") + .param("switchId", TestSwitchMock.SWITCH_ID) + .param("controller", "true") + .contentType(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(content().string("[{\"under_maintenance\":false}]")); + + + when(serviceSwitch.getSwitchDetails(null, TestFlowMock.CONTROLLER_FLAG)) + .thenReturn(switchDetails); + mockMvc.perform(get("/api/switch/details") + .param("controller", "true") + .contentType(MediaType.APPLICATION_JSON)) + .andExpect(status().isOk()) + .andExpect(content().string("[{\"under_maintenance\":false}]")); + assertTrue(true); } @@ -131,40 +150,41 @@ public void testGetSwitchById() throws Exception { public void testGetSwichLinkDetails() { List switchesInfo = new ArrayList<>(); try { - when(serviceSwitch.getSwitches(false, TestFlowMock.CONTROLLER_FLAG)).thenReturn(switchesInfo); + when(serviceSwitch.getSwitchInfos(false, TestFlowMock.CONTROLLER_FLAG)) + .thenReturn(switchesInfo); mockMvc.perform(get("/api/switch/links").contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()); assertTrue(true); } catch (Exception e) { - assertTrue(false); + fail(); } } - + @Test public void testDeleteSwitch() { SwitchInfo switcheInfo = new SwitchInfo(); try { when(serviceSwitch.deleteSwitch(TestSwitchMock.SWITCH_ID, false)).thenReturn(switcheInfo); mockMvc.perform(delete("/api/switch/{switchId}", TestSwitchMock.SWITCH_ID, true) - .contentType(MediaType.APPLICATION_JSON)) + .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()); assertTrue(true); } catch (Exception e) { System.out.println("exception: " + e.getMessage()); - assertTrue(false); + fail(); } } - + @Test public void testDeleteSwitchIfSwitchIdNotPassed() { try { mockMvc.perform(delete("/api/switch/{switchId}", TestSwitchMock.SWITCH_ID_NULL, true) - .contentType(MediaType.APPLICATION_JSON)) + .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isNotFound()); assertTrue(true); } catch (Exception e) { System.out.println("exception: " + e.getMessage()); - assertTrue(false); + fail(); } } @@ -176,32 +196,32 @@ public void testSwitchMaintenance() throws Exception { switchInfo.setEvacuate(TestSwitchMock.EVACUATE_STATUS); String inputJson = mapToJson(switchInfo); mockMvc.perform( - post("/api/switch/under-maintenance/{switchId}", TestSwitchMock.SWITCH_ID) - .content(inputJson).contentType(MediaType.APPLICATION_JSON)).andExpect( - status().isOk()); + post("/api/switch/under-maintenance/{switchId}", TestSwitchMock.SWITCH_ID) + .content(inputJson).contentType(MediaType.APPLICATION_JSON)).andExpect( + status().isOk()); } - + @Test public void testIslMaintenance() throws Exception { LinkUnderMaintenanceDto linkUnderMaintenanceDto = new LinkUnderMaintenanceDto(); - linkUnderMaintenanceDto.setSrcPort(Integer.valueOf(TestIslMock.SRC_PORT)); + linkUnderMaintenanceDto.setSrcPort(TestIslMock.SRC_PORT); linkUnderMaintenanceDto.setSrcSwitch(TestIslMock.SRC_SWITCH); - linkUnderMaintenanceDto.setDstPort(Integer.valueOf(TestIslMock.DST_PORT)); + linkUnderMaintenanceDto.setDstPort(TestIslMock.DST_PORT); linkUnderMaintenanceDto.setDstSwitch(TestIslMock.DST_SWITCH); linkUnderMaintenanceDto.setUnderMaintenance(TestIslMock.UNDER_MAINTENANE_FLAG); String inputJson = mapToJson(linkUnderMaintenanceDto); MvcResult mvcResult = mockMvc.perform(MockMvcRequestBuilders - .patch("/api/switch/links/under-maintenance") - .content(inputJson) - .contentType(MediaType.APPLICATION_JSON) - .accept(MediaType.APPLICATION_JSON)) + .patch("/api/switch/links/under-maintenance") + .content(inputJson) + .contentType(MediaType.APPLICATION_JSON) + .accept(MediaType.APPLICATION_JSON)) .andReturn(); int status = mvcResult.getResponse().getStatus(); assertEquals(200, status); } - + @Test public void testDeleteIsl() throws Exception { LinkParametersDto linkParametersDto = new LinkParametersDto(); @@ -214,15 +234,15 @@ public void testDeleteIsl() throws Exception { String inputJson = mapToJson(linkParametersDto); when(serviceSwitch.deleteLink(linkParametersDto, TestIslMock.USER_ID)).thenReturn(islLinkInfo); MvcResult mvcResult = mockMvc.perform(MockMvcRequestBuilders - .delete("/api/switch/links") - .content(inputJson) - .contentType(MediaType.APPLICATION_JSON) - .accept(MediaType.APPLICATION_JSON)) + .delete("/api/switch/links") + .content(inputJson) + .contentType(MediaType.APPLICATION_JSON) + .accept(MediaType.APPLICATION_JSON)) .andReturn(); int status = mvcResult.getResponse().getStatus(); assertEquals(200, status); } - + @Test public void testUpdateIslBfdFlag() throws Exception { LinkParametersDto linkParametersDto = new LinkParametersDto(); @@ -232,31 +252,31 @@ public void testUpdateIslBfdFlag() throws Exception { linkParametersDto.setDstSwitch(TestIslMock.DST_SWITCH); linkParametersDto.setEnableBfd(TestIslMock.ENABLE_BFD_FLAG); String inputJson = mapToJson(linkParametersDto); - + MvcResult mvcResult = mockMvc.perform(MockMvcRequestBuilders - .patch("/api/switch/link/enable-bfd") - .content(inputJson) - .contentType(MediaType.APPLICATION_JSON) - .accept(MediaType.APPLICATION_JSON)) + .patch("/api/switch/link/enable-bfd") + .content(inputJson) + .contentType(MediaType.APPLICATION_JSON) + .accept(MediaType.APPLICATION_JSON)) .andReturn(); int status = mvcResult.getResponse().getStatus(); assertEquals(200, status); } - + @Test public void testUpdateSwitchPortProperty() throws Exception { try { SwitchProperty switchProperty = new SwitchProperty(); switchProperty.setDiscoveryEnabled(true); String inputJson = mapToJson(switchProperty); - + MvcResult mvcResult = mockMvc.perform(MockMvcRequestBuilders - .put("/api/switch/{switchId}/ports/{port}/properties", - TestSwitchMock.SWITCH_ID, TestSwitchMock.PORT) - .content(inputJson) - .contentType(MediaType.APPLICATION_JSON) - .accept(MediaType.APPLICATION_JSON)) + .put("/api/switch/{switchId}/ports/{port}/properties", + TestSwitchMock.SWITCH_ID, TestSwitchMock.PORT) + .content(inputJson) + .contentType(MediaType.APPLICATION_JSON) + .accept(MediaType.APPLICATION_JSON)) .andReturn(); int status = mvcResult.getResponse().getStatus(); assertEquals(200, status); @@ -264,7 +284,7 @@ public void testUpdateSwitchPortProperty() throws Exception { System.out.println("Exception is: " + e); } } - + @Test public void testUpdateSwitchPortPropertyIfSwitchIdNotPassed() throws Exception { try { @@ -272,31 +292,31 @@ public void testUpdateSwitchPortPropertyIfSwitchIdNotPassed() throws Exception { switchProperty.setDiscoveryEnabled(true); String inputJson = mapToJson(switchProperty); mockMvc.perform( - put("/api/switch/{switchId}/ports/{port}/properties", - TestSwitchMock.SWITCH_ID_NULL, TestSwitchMock.PORT) - .content(inputJson).contentType(MediaType.APPLICATION_JSON)).andExpect( + put("/api/switch/{switchId}/ports/{port}/properties", + TestSwitchMock.SWITCH_ID_NULL, TestSwitchMock.PORT) + .content(inputJson).contentType(MediaType.APPLICATION_JSON)).andExpect( status().isNotFound()); assertTrue(true); } catch (Exception e) { System.out.println("Exception is: " + e); - assertTrue(false); + fail(); } } - + @Test public void testUpdateSwitchPortPropertyIfPortNotPassed() throws Exception { try { SwitchProperty switchProperty = new SwitchProperty(); switchProperty.setDiscoveryEnabled(true); String inputJson = mapToJson(switchProperty); - mockMvc.perform(put("/api/switch/{switchId}/ports/{port}/properties", - TestSwitchMock.SWITCH_ID, TestSwitchMock.SWITCH_ID_NULL) - .content(inputJson).contentType(MediaType.APPLICATION_JSON)) - .andExpect(status().isNotFound()); + mockMvc.perform(put("/api/switch/{switchId}/ports/{port}/properties", + TestSwitchMock.SWITCH_ID, TestSwitchMock.SWITCH_ID_NULL) + .content(inputJson).contentType(MediaType.APPLICATION_JSON)) + .andExpect(status().isNotFound()); assertTrue(true); } catch (Exception ex) { System.out.println("Exception is: " + ex); - assertTrue(false); + fail(); } } @@ -304,56 +324,56 @@ public void testUpdateSwitchPortPropertyIfPortNotPassed() throws Exception { public void testUpdateSwitchPortPropertyIfSwitchPropertyNotPassed() throws Exception { try { mockMvc.perform( - put("/api/switch/{switchId}/ports/{port}/properties", - TestSwitchMock.SWITCH_ID, TestSwitchMock.PORT) - .contentType(MediaType.APPLICATION_JSON)).andExpect( + put("/api/switch/{switchId}/ports/{port}/properties", + TestSwitchMock.SWITCH_ID, TestSwitchMock.PORT) + .contentType(MediaType.APPLICATION_JSON)).andExpect( status().isBadRequest()); - assertTrue(true); + assertTrue(true); } catch (Exception e) { System.out.println("Exception is: " + e); - assertTrue(false); + fail(); } } - + @Test public void testGetSwitchPortProperties() { SwitchProperty switchProperty = new SwitchProperty(); try { when(serviceSwitch.getSwitchPortProperty(TestSwitchMock.SWITCH_ID, TestSwitchMock.PORT)) - .thenReturn(switchProperty); - mockMvc.perform(get("/api/switch/{switchId}/ports/{port}/properties", TestSwitchMock.SWITCH_ID, - TestSwitchMock.SWITCH_PORT).contentType(MediaType.APPLICATION_JSON)) + .thenReturn(switchProperty); + mockMvc.perform(get("/api/switch/{switchId}/ports/{port}/properties", TestSwitchMock.SWITCH_ID, + TestSwitchMock.SWITCH_PORT).contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()); assertTrue(true); } catch (Exception e) { - assertTrue(false); + fail(); } - } - + } + @Test public void testGetSwitchPortPropertiesIfSwitchIdNotPassed() { try { mockMvc.perform(get("/api/switch/ports/{port}/properties", TestSwitchMock.PORT) - .contentType(MediaType.APPLICATION_JSON)) + .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isNotFound()); assertTrue(true); } catch (Exception e) { - assertTrue(false); + fail(); } } - + @Test public void testGetSwitchPortPropertiesIfPortNotPassed() { try { mockMvc.perform(get("/api/switch/{switchId}/ports/properties", TestSwitchMock.SWITCH_ID) - .contentType(MediaType.APPLICATION_JSON)) + .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isNotFound()); assertTrue(true); } catch (Exception e) { - assertTrue(false); + fail(); } } - + protected String mapToJson(Object obj) throws JsonProcessingException { ObjectMapper objectMapper = new ObjectMapper(); return objectMapper.writeValueAsString(obj); diff --git a/src-gui/src/test/java/org/openkilda/service/StatsServiceTest.java b/src-gui/src/test/java/org/openkilda/service/StatsServiceTest.java index fc0cbe2a4ca..c6f7643e96b 100644 --- a/src-gui/src/test/java/org/openkilda/service/StatsServiceTest.java +++ b/src-gui/src/test/java/org/openkilda/service/StatsServiceTest.java @@ -28,7 +28,7 @@ import org.openkilda.exception.InvalidRequestException; import org.openkilda.integration.service.StatsIntegrationService; import org.openkilda.integration.service.SwitchIntegrationService; -import org.openkilda.integration.source.store.SwitchStoreService; +import org.openkilda.integration.source.store.SwitchInventoryService; import org.openkilda.model.victoria.MetricValues; import org.openkilda.model.victoria.RangeQueryParams; import org.openkilda.model.victoria.Status; @@ -61,14 +61,14 @@ void setUp() { statsIntegrationService = mock(StatsIntegrationService.class); SwitchIntegrationService switchIntegrationService = mock(SwitchIntegrationService.class); StoreService storeService = mock(StoreService.class); - SwitchStoreService switchStoreService = mock(SwitchStoreService.class); + SwitchInventoryService switchInventoryService = mock(SwitchInventoryService.class); ApplicationProperties applicationProperties = mock(ApplicationProperties.class); when(applicationProperties.getMetricPrefix()).thenReturn("kilda."); // Initialize the service to be tested with the mock dependency statsService = new StatsService(statsIntegrationService, switchIntegrationService, - storeService, switchStoreService, applicationProperties); + storeService, switchInventoryService, applicationProperties); } @Test diff --git a/src-gui/src/test/java/org/openkilda/service/SwitchServiceTest.java b/src-gui/src/test/java/org/openkilda/service/SwitchServiceTest.java new file mode 100644 index 00000000000..2d4dbc35bcb --- /dev/null +++ b/src-gui/src/test/java/org/openkilda/service/SwitchServiceTest.java @@ -0,0 +1,403 @@ +/* Copyright 2024 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.service; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import org.openkilda.constants.IConstants; +import org.openkilda.integration.exception.InvalidResponseException; +import org.openkilda.integration.service.SwitchIntegrationService; +import org.openkilda.integration.source.store.SwitchInventoryService; +import org.openkilda.integration.source.store.dto.InventorySwitch; +import org.openkilda.integration.source.store.dto.PopLocation; +import org.openkilda.model.Location; +import org.openkilda.model.SwitchDetail; +import org.openkilda.model.SwitchInfo; +import org.openkilda.store.model.SwitchStoreConfigDto; +import org.openkilda.store.model.UrlDto; +import org.openkilda.store.service.StoreService; +import org.openkilda.test.MockitoExtension; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.usermanagement.model.UserInfo; +import org.usermanagement.service.UserService; + +import java.nio.file.AccessDeniedException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@ExtendWith(MockitoExtension.class) +class SwitchServiceTest { + + private static final String SW_TEST_1 = "sw_test_1"; + private static final String SW_TEST_2 = "sw_test_2"; + private static final String WRONG_SWITCH_ID = "WRONG_SWITCH_ID"; + + @InjectMocks + private SwitchService switchService; + @Mock + private SwitchIntegrationService switchIntegrationService; + @Mock + private SwitchInventoryService switchInventoryService; + @Mock + private UserService userService; + @Mock + private StoreService storeService; + + @BeforeEach + void setUp() { + MockitoAnnotations.openMocks(this); + } + + + @Test + void switchDetailsOnlyForController() throws AccessDeniedException { + when(switchIntegrationService.getSwitchesById(SW_TEST_1)).thenReturn(getSwitchInfo1(SW_TEST_1)); + + List actual = switchService.getSwitchDetails(SW_TEST_1, true); + Assertions.assertNotNull(actual); + Assertions.assertEquals(1, actual.size()); + Assertions.assertEquals(getSwitchDetail1(SW_TEST_1, null), actual.get(0)); + } + + /** + * SwitchId exist in controller and inventory switch with the same switch_id exist in inventory service + * should be returned only one switch. + */ + @Test + void switchDetails() throws AccessDeniedException { + when(switchIntegrationService.getSwitchesById(SW_TEST_1)).thenReturn(getSwitchInfo1(SW_TEST_1)); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitch(SW_TEST_1)).thenReturn(getInventorySwitch(SW_TEST_1)); + + List actual = switchService.getSwitchDetails(SW_TEST_1, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(1, actual.size()); + SwitchDetail expectedWithInventory = getSwitchDetail1(SW_TEST_1, getInventorySwitch(SW_TEST_1)); + Assertions.assertEquals(expectedWithInventory, actual.get(0)); + } + + /** + * Get inventory switch by id with wrong switchId, northbound causing InvalidResponseException. + * But inventory switch exist. + */ + @Test + void switchDetailsInvalidResponse() throws AccessDeniedException { + when(switchIntegrationService.getSwitchesById(WRONG_SWITCH_ID)) + .thenThrow(InvalidResponseException.class); + + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitch(WRONG_SWITCH_ID)) + .thenReturn(getInventorySwitch(WRONG_SWITCH_ID)); + + List actual = switchService.getSwitchDetails(WRONG_SWITCH_ID, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(1, actual.size()); + SwitchDetail expectedWithInventory = getSwitchDetail1(null, getInventorySwitch(WRONG_SWITCH_ID)); + Assertions.assertEquals(expectedWithInventory, actual.get(0)); + } + + /** + * SwitchId exist in controller, but does not exist in Inventory, should be returned only one switch. + */ + @Test + void switchDetailsEmptyInventory() throws AccessDeniedException { + when(switchIntegrationService.getSwitchesById(SW_TEST_1)).thenReturn(getSwitchInfo1(SW_TEST_1)); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitch(SW_TEST_1)).thenReturn(null); + + List actual1 = switchService.getSwitchDetails(SW_TEST_1, false); + Assertions.assertNotNull(actual1); + Assertions.assertEquals(1, actual1.size()); + SwitchDetail expectedWithoutInventory = getSwitchDetail1(SW_TEST_1, null); + Assertions.assertEquals(expectedWithoutInventory, actual1.get(0)); + } + + /** + * One switch in controller and different switch in Inventory should be returned. + */ + @Test + void switchDetailsDifferentSwitches() throws AccessDeniedException { + when(switchIntegrationService.getSwitches()).thenReturn(Collections.singletonList(getSwitchInfo1(SW_TEST_1))); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitches()).thenReturn(Collections.singletonList(getInventorySwitch(SW_TEST_2))); + + List actual = switchService.getSwitchDetails(null, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(2, actual.size()); + List expected = Lists.newArrayList(getSwitchDetail1(SW_TEST_1, null), + getSwitchDetailOnlyWithInventory(getInventorySwitch(SW_TEST_2))); + + Assertions.assertEquals(expected.get(0), actual.get(0)); + Assertions.assertEquals(expected.get(1), actual.get(1)); + } + + /** + * Only inventory switches exist. + */ + @Test + void switchDetailsOnlyInventoryExist() throws AccessDeniedException { + when(switchIntegrationService.getSwitches()).thenReturn(Collections.emptyList()); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitches()).thenReturn(Collections.singletonList(getInventorySwitch(SW_TEST_2))); + + List actual = switchService.getSwitchDetails(null, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(1, actual.size()); + List expected + = Lists.newArrayList(getSwitchDetailOnlyWithInventory(getInventorySwitch(SW_TEST_2))); + + Assertions.assertEquals(expected.get(0), actual.get(0)); + } + + /** + * No switches exist at all. + */ + @Test + void switchDetailsNoSwitchesExist() throws AccessDeniedException { + when(switchIntegrationService.getSwitches()).thenReturn(Collections.emptyList()); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitches()).thenReturn(Collections.emptyList()); + + List actual = switchService.getSwitchDetails(null, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(0, actual.size()); + List expected = Lists.newArrayList(Collections.emptyList()); + + Assertions.assertEquals(expected, actual); + } + + /** + * SwitchDetails for all switches, controller false. + * One switch in controller and three switches in inventory exist, two of them with the same switchId. + * Should return 3 switches in total. + */ + @Test + void switchDetailsWithOneConrollerAndDifferentInventory() throws AccessDeniedException { + when(switchIntegrationService.getSwitches()).thenReturn(Collections.singletonList(getSwitchInfo1(SW_TEST_2))); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + List inventorySwitches = Lists.newArrayList(); + inventorySwitches.add(getInventorySwitch(SW_TEST_1)); + inventorySwitches.add(getInventorySwitch(SW_TEST_2)); + inventorySwitches.add(getInventorySwitch(SW_TEST_2)); + when(switchInventoryService.getSwitches()).thenReturn(inventorySwitches); + + List actual = switchService.getSwitchDetails(null, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(3, actual.size()); + + List expected = + Lists.newArrayList(getSwitchDetail1(SW_TEST_2, getInventorySwitch(SW_TEST_2, true)), + getSwitchDetailOnlyWithInventory(getInventorySwitch(SW_TEST_2, true)), + getSwitchDetailOnlyWithInventory(getInventorySwitch(SW_TEST_1))); + + Assertions.assertEquals(expected.get(0), actual.get(0)); + Assertions.assertEquals(expected.get(1), actual.get(1)); + Assertions.assertEquals(expected.get(2), actual.get(2)); + } + + /** + * One switch in controller and the same switch in inventory exist, but switchId have the different letter case. + * One switch it total should be returned. + */ + @Test + void switchDetailsSameSwitchesDifferentSwitchIdLetterCase() throws AccessDeniedException { + when(switchIntegrationService.getSwitches()).thenReturn(Collections.singletonList(getSwitchInfo1(SW_TEST_1))); + when(userService.getLoggedInUserInfo()).thenReturn(getUserInfoWithPermission()); + SwitchStoreConfigDto switchStoreConfig = mock(SwitchStoreConfigDto.class); + when(storeService.getSwitchStoreConfig()).thenReturn(switchStoreConfig); + Map map = new HashMap<>(); + map.put("someKey", new UrlDto()); + when(storeService.getSwitchStoreConfig().getUrls()).thenReturn(map); + when(switchInventoryService.getSwitches()) + .thenReturn(Collections.singletonList(getInventorySwitch(SW_TEST_1.toUpperCase(), "f3"))); + + List actual = switchService.getSwitchDetails(null, false); + Assertions.assertNotNull(actual); + Assertions.assertEquals(1, actual.size()); + List expected = Lists.newArrayList(getSwitchDetail1(SW_TEST_1, + getInventorySwitch(SW_TEST_1.toUpperCase(), "f3"))); + Assertions.assertEquals(expected.get(0), actual.get(0)); + + + when(switchInventoryService.getSwitches()) + .thenReturn(Arrays.asList(getInventorySwitch(SW_TEST_1.toUpperCase(), "f1"), + getInventorySwitch(SW_TEST_1.toLowerCase(), "f2"), + getInventorySwitch(SW_TEST_1.toUpperCase(), "f3"))); + + + actual = switchService.getSwitchDetails(null, false); + expected.set(0, getSwitchDetail1(SW_TEST_1, getInventorySwitch(SW_TEST_1.toUpperCase(), true, "f3"))); + expected.add(getSwitchDetailOnlyWithInventory(getInventorySwitch(SW_TEST_1.toUpperCase(), true, "f1"))); + expected.add(getSwitchDetailOnlyWithInventory(getInventorySwitch(SW_TEST_1.toLowerCase(), true, "f2"))); + Assertions.assertNotNull(actual); + Assertions.assertEquals(3, actual.size()); + Assertions.assertEquals(expected.get(0), actual.get(0)); + Assertions.assertEquals(expected.get(1), actual.get(1)); + Assertions.assertEquals(expected.get(2), actual.get(2)); + } + + private SwitchDetail getSwitchDetail1(String switchId, InventorySwitch inventorySwitch) { + SwitchDetail.SwitchDetailBuilder builder = SwitchDetail.builder(); + if (switchId != null) { + builder.switchId(switchId) + .name(switchId) + .address("address") + .port("123") + .hostname("qwerty") + .description("qwerty") + .state("qwerty") + .underMaintenance(true) + .ofVersion("qwerty") + .manufacturer("qwerty") + .hardware("qwerty") + .software("qwerty") + .serialNumber("qwerty") + .pop("qwerty") + .location(getLocation1()); + } + builder.inventorySwitchDetail(inventorySwitch); + return builder.build(); + } + + private SwitchDetail getSwitchDetailOnlyWithInventory(InventorySwitch inventorySwitch) { + return SwitchDetail.builder() + .inventorySwitchDetail(inventorySwitch) + .build(); + } + + private SwitchInfo getSwitchInfo1(String switchId) { + SwitchInfo switchInfo = new SwitchInfo(); + switchInfo.setSwitchId(switchId); + switchInfo.setAddress("address"); + switchInfo.setPort("123"); + switchInfo.setHostname("qwerty"); + switchInfo.setDescription("qwerty"); + switchInfo.setState("qwerty"); + switchInfo.setUnderMaintenance(true); + switchInfo.setOfVersion("qwerty"); + switchInfo.setManufacturer("qwerty"); + switchInfo.setHardware("qwerty"); + switchInfo.setSoftware("qwerty"); + switchInfo.setSerialNumber("qwerty"); + switchInfo.setPop("qwerty"); + switchInfo.setLocation(getLocation1()); + switchInfo.setName(switchId); + return switchInfo; + } + + private Location getLocation1() { + Location location = new Location(); + location.setCity("Sydney"); + location.setCountry("Australia"); + location.setStreet("P. Sherman 42 Wallaby Way"); + return location; + } + + + private InventorySwitch getInventorySwitch(String switchId) { + return getInventorySwitch(switchId, false, null); + } + + private InventorySwitch getInventorySwitch(String switchId, String description) { + return getInventorySwitch(switchId, false, description); + } + + private InventorySwitch getInventorySwitch(String switchId, boolean duplicate) { + return getInventorySwitch(switchId, duplicate, null); + } + + private InventorySwitch getInventorySwitch(String switchId, boolean duplicate, String description) { + InventorySwitch inventorySwitch = new InventorySwitch(); + inventorySwitch.setUuid("randomUUID"); + inventorySwitch.setSwitchId(switchId); + inventorySwitch.setDescription(description != null ? description : "Sample switch description"); + inventorySwitch.setName("Switch1"); + inventorySwitch.setCommonName("CommonSwitch"); + inventorySwitch.setModel("ModelX"); + inventorySwitch.setStatus("Active"); + inventorySwitch.setRackLocation("Rack1"); + inventorySwitch.setReferenceUrl("http://example.com"); + inventorySwitch.setSerialNumber("123456789"); + inventorySwitch.setRackNumber("Rack01"); + inventorySwitch.setSoftwareVersion("1.0"); + inventorySwitch.setManufacturer("ManufacturerXYZ"); + + PopLocation popLocation = new PopLocation(); + popLocation.setStateCode("NY"); + popLocation.setCountryCode("US"); + popLocation.setPopUuid("POP_UUID"); + popLocation.setPopName("Sample POP"); + popLocation.setPopCode("POP123"); + inventorySwitch.setPopLocation(popLocation); + + inventorySwitch.setHasDuplicate(duplicate); + return inventorySwitch; + } + + private UserInfo getUserInfoWithPermission() { + UserInfo userInfo = new UserInfo(); + userInfo.setPermissions(Sets.newHashSet(IConstants.Permission.SW_SWITCH_INVENTORY)); + return userInfo; + } +} diff --git a/src-gui/src/test/java/org/openkilda/test/BaseTest.java b/src-gui/src/test/java/org/openkilda/test/BaseTest.java index b19964e6dd9..1a862b16e11 100644 --- a/src-gui/src/test/java/org/openkilda/test/BaseTest.java +++ b/src-gui/src/test/java/org/openkilda/test/BaseTest.java @@ -15,13 +15,15 @@ package org.openkilda.test; +import static org.junit.jupiter.api.Assertions.assertTrue; + import org.openkilda.util.IConstantsTest; import org.openkilda.utility.IoUtil; import org.apache.log4j.Logger; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import java.io.BufferedInputStream; import java.io.BufferedReader; @@ -39,6 +41,7 @@ * * @author Gaurav Chugh */ +@ExtendWith(MockitoExtension.class) public class BaseTest { private static final Logger LOGGER = Logger.getLogger(BaseTest.class); @@ -68,27 +71,27 @@ public void executeKildaFiles() { if (url.contains(".css")) { downloadFiles(url, IConstantsTest.CLASSPATH + IConstantsTest.CSS_PATH + fileName); - Assert.assertTrue(true); + assertTrue(true); } if (url.contains(".js")) { if (fileName.contains(IConstantsTest.JQUERY_FILE)) { fileName = IConstantsTest.JQUERY_MIN_FILE; } downloadFiles(url, IConstantsTest.CLASSPATH + IConstantsTest.JAVASCRIPT_PATH + fileName); - Assert.assertTrue(true); + assertTrue(true); } if (url.contains("ttf") || url.contains("woff2") || url.contains("woff")) { downloadFiles(url, IConstantsTest.CLASSPATH + IConstantsTest.FONTS_PATH + fileName); - Assert.assertTrue(true); + assertTrue(true); } if (url.contains("Roboto")) { downloadFiles(url, IConstantsTest.CLASSPATH + IConstantsTest.CSS_PATH + "roboto.css"); - Assert.assertTrue(true); + assertTrue(true); } } catch (Exception e) { LOGGER.error("exception occurred Inside method executeKildaFiles.", e); - Assert.assertTrue(false); + assertTrue(false); } } LOGGER.info("executeKildaFiles has been successfully executed"); diff --git a/src-gui/src/test/java/org/openkilda/util/TestSwitchMock.java b/src-gui/src/test/java/org/openkilda/util/TestSwitchMock.java index 75a00e83441..e6b234dd42f 100644 --- a/src-gui/src/test/java/org/openkilda/util/TestSwitchMock.java +++ b/src-gui/src/test/java/org/openkilda/util/TestSwitchMock.java @@ -1,4 +1,4 @@ -/* Copyright 2018 Telstra Open Source +/* Copyright 2024 Telstra Open Source * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,10 +17,10 @@ public interface TestSwitchMock { - static final String SWITCH_ID = "00:00:00:22:3d:5a:04:87"; - static final String SWITCH_ID_NULL = null; - static final String PORT = "27"; - static final int SWITCH_PORT = 27; - static final boolean MAINTENANCE_STATUS = false; - static final boolean EVACUATE_STATUS = false; + String SWITCH_ID = "00:00:00:22:3d:5a:04:87"; + String SWITCH_ID_NULL = null; + String PORT = "27"; + int SWITCH_PORT = 27; + boolean MAINTENANCE_STATUS = false; + boolean EVACUATE_STATUS = false; } diff --git a/src-gui/src/test/resources/config b/src-gui/src/test/resources/config index 04e988de485..714f83518b9 100644 --- a/src-gui/src/test/resources/config +++ b/src-gui/src/test/resources/config @@ -1,3 +1,4 @@ https://fonts.googleapis.com/css?family=Roboto:100,100i,300,300i,400,400i,500,500i,700,700i https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css -https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js \ No newline at end of file +https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js +https://cdnjs.cloudflare.com/ajax/libs/qrcodejs/1.0.0/qrcode.min.js \ No newline at end of file diff --git a/src-gui/ui/src/app/common/services/switch.service.ts b/src-gui/ui/src/app/common/services/switch.service.ts index cfe75160a7e..7af6b99f9e2 100644 --- a/src-gui/ui/src/app/common/services/switch.service.ts +++ b/src-gui/ui/src/app/common/services/switch.service.ts @@ -1,119 +1,151 @@ -import { Injectable } from '@angular/core'; +import {Injectable} from '@angular/core'; import {HttpClient, HttpParams} from '@angular/common/http'; -import { environment } from '../../../environments/environment'; -import { Observable } from 'rxjs'; -import { Switch } from '../data-models/switch'; -import { catchError } from 'rxjs/operators'; -import { CookieManagerService } from './cookie-manager.service'; +import {environment} from '../../../environments/environment'; +import {Observable} from 'rxjs'; +import {Switch} from '../data-models/switch'; +import {catchError} from 'rxjs/operators'; +import {CookieManagerService} from './cookie-manager.service'; @Injectable({ - providedIn: 'root' + providedIn: 'root' }) export class SwitchService { - constructor(private httpClient: HttpClient, private cookieManager: CookieManagerService) {} + constructor(private httpClient: HttpClient, private cookieManager: CookieManagerService) { + } - getSwitchList(query?: any): Observable { - return this.httpClient.get(`${environment.apiEndPoint}/switch/list`, {params: query}); - } + getSwitchList(query?: any): Observable { + return this.httpClient.get(`${environment.apiEndPoint}/switch/list`, {params: query}); + } - getSwitchLinks(): Observable { - return this.httpClient.get(`${environment.apiEndPoint}/switch/links`); - } + getSwitchLinks(): Observable { + return this.httpClient.get(`${environment.apiEndPoint}/switch/links`); + } - getSwitchRulesList(switchId): Observable { - const timestamp = new Date().getTime(); - return this.httpClient.get(`${environment.apiEndPoint}/switch/${switchId}/rules?_=${timestamp}`, {responseType: 'text'}); - } + getSwitchRulesList(switchId): Observable { + const timestamp = new Date().getTime(); + return this.httpClient.get(`${environment.apiEndPoint}/switch/${switchId}/rules?_=${timestamp}`, {responseType: 'text'}); + } -getNetworkPath(source_switch, target_switch, strategy, max_latency) { - const timestamp = new Date().getTime(); - return this.httpClient.get(`${environment.apiEndPoint}/network/paths?src_switch=${source_switch}&dst_switch=${target_switch}&strategy=${strategy}&max_latency=${max_latency}&_=${timestamp}`); -} + getNetworkPath(source_switch, target_switch, strategy, max_latency) { + const timestamp = new Date().getTime(); + return this.httpClient.get(`${environment.apiEndPoint}/network/paths?src_switch=${source_switch}&dst_switch=${target_switch}&strategy=${strategy}&max_latency=${max_latency}&_=${timestamp}`); + } - configurePort(switchId, portNumber, status): Observable<{}> { + configurePort(switchId, portNumber, status): Observable<{}> { const url = `${environment.apiEndPoint}/switch/${switchId}/${portNumber}/config`; return this.httpClient.put(url, {'status': status}); - } - - getSwitchPortFlows(switchId, portNumber, filter): Observable { - const url = `${environment.apiEndPoint}/switch/${switchId}/${portNumber}/flows`; - return this.httpClient.get(url); - } - - getSwitchDetail(switchId, filter): Observable<{}> { - const query: any = {controller: filter == 'controller'}; - return this.httpClient.get(`${environment.apiEndPoint}/switch/${switchId}`, {params: query}); - } - - getSwitchFlows(switchId, filter, port): Observable<{}> { - let url = `${environment.apiEndPoint}/switch/${switchId}/flows?inventory=` + filter; - if (port) { - url = url + '&port=' + port; - } - return this.httpClient.get(url); - } - - getSwitchFlowsForPorts(switchId, ports: Array): Observable<{}> { - let queryParams = new HttpParams(); - ports.forEach(port => queryParams = queryParams.append('ports', String(port))); - const url = `${environment.apiEndPoint}/switch/${switchId}/flows-by-port`; - console.log('calling the API, generated url: ' + url + ', params: ' + queryParams.getAll('ports')); - return this.httpClient.get(url, {params: queryParams}); - } - - getSwitchMetersList(switchId): Observable { - const timestamp = new Date().getTime(); - return this.httpClient.get(`${environment.apiEndPoint}/switch/meters/${switchId}?_=${timestamp}`); - } - - saveSwitcName(name, switchid) { - return this.httpClient.patch(`${environment.apiEndPoint}/switch/name/${switchid}`, name); - } - - switchMaintenance(data, switchid) { - return this.httpClient.post(`${environment.apiEndPoint}/switch/under-maintenance/${switchid}`, data); - } - updatediscoveryPackets(switchId, portNumber, value) { - const url = `${environment.apiEndPoint}/switch/${switchId}/ports/${portNumber}/properties`; - return this.httpClient.put(url, {'discovery_enabled': value}); - } - getdiscoveryPackets(switchId, portNumber) { - const timestamp = new Date().getTime(); - return this.httpClient.get(`${environment.apiEndPoint}/switch/${switchId}/ports/${portNumber}/properties?_=${timestamp}`); - } - - updateSwitch(data, switch_id) { - return this.httpClient.patch(`${environment.apiEndPoint}/switch/location/${switch_id}`, data); - } - - deleteSwitch(switchId, data, successCb, errorCb): void { - const requestBody = JSON.stringify(data); - const token = this.cookieManager.get('XSRF-TOKEN') as string; - const xhr = new XMLHttpRequest(); - xhr.withCredentials = false; - xhr.addEventListener('readystatechange', function () { - if (this.readyState == 4 && this.status == 200) { - successCb(JSON.parse(this.responseText)); - } else if (this.readyState == 4 && this.status >= 300) { - errorCb(JSON.parse(this.responseText)); - } - }); - - xhr.open('DELETE', `${environment.apiEndPoint}/switch/${switchId}`); - xhr.setRequestHeader('Content-Type', 'application/json'); - if (token !== null) { - xhr.setRequestHeader( 'X-XSRF-TOKEN' , token); - } - xhr.send(requestBody); - } - createLagLogicalPort(data: CreateLagPortModel, switchid) { - return this.httpClient.post(`${environment.apiEndPoint}/switch/${switchid}/lags`, data); - } - - deleteLagLogicalPort(switchid, logical_port_number ) { - return this.httpClient.delete(`${environment.apiEndPoint}/switch/${switchid}/lags/${logical_port_number }`); - } + } + + getSwitchPortFlows(switchId, portNumber, filter): Observable { + const url = `${environment.apiEndPoint}/switch/${switchId}/${portNumber}/flows`; + return this.httpClient.get(url); + } + + getSwitchDetails(switchId, filter): Observable { + const reqParams: any = {}; + if (switchId) { + reqParams.switchId = switchId; + } + reqParams.controller = filter === 'controller'; + return this.httpClient.get(`${environment.apiEndPoint}/switch/details`, {params: reqParams}); + } + + getSwitchFlows(switchId, filter, port): Observable<{}> { + let url = `${environment.apiEndPoint}/switch/${switchId}/flows?inventory=` + filter; + if (port) { + url = url + '&port=' + port; + } + return this.httpClient.get(url); + } + + getSwitchFlowsForPorts(switchId, ports: Array): Observable<{}> { + let queryParams = new HttpParams(); + ports.forEach(port => queryParams = queryParams.append('ports', String(port))); + const url = `${environment.apiEndPoint}/switch/${switchId}/flows-by-port`; + return this.httpClient.get(url, {params: queryParams}); + } + + getSwitchMetersList(switchId): Observable { + const timestamp = new Date().getTime(); + return this.httpClient.get(`${environment.apiEndPoint}/switch/meters/${switchId}?_=${timestamp}`); + } + + saveSwitcName(name, switchid) { + return this.httpClient.patch(`${environment.apiEndPoint}/switch/name/${switchid}`, name); + } + + switchMaintenance(data, switchid) { + return this.httpClient.post(`${environment.apiEndPoint}/switch/under-maintenance/${switchid}`, data); + } + updatediscoveryPackets(switchId, portNumber, value) { + const url = `${environment.apiEndPoint}/switch/${switchId}/ports/${portNumber}/properties`; + return this.httpClient.put(url, {'discovery_enabled': value}); + } + + getdiscoveryPackets(switchId, portNumber) { + const timestamp = new Date().getTime(); + return this.httpClient.get(`${environment.apiEndPoint}/switch/${switchId}/ports/${portNumber}/properties?_=${timestamp}`); + } + + updateSwitch(data, switch_id) { + return this.httpClient.patch(`${environment.apiEndPoint}/switch/location/${switch_id}`, data); + } + + deleteSwitch(switchId, data, successCb, errorCb): void { + const requestBody = JSON.stringify(data); + const token = this.cookieManager.get('XSRF-TOKEN') as string; + const xhr = new XMLHttpRequest(); + xhr.withCredentials = false; + xhr.addEventListener('readystatechange', function () { + if (this.readyState == 4 && this.status == 200) { + successCb(JSON.parse(this.responseText)); + } else if (this.readyState == 4 && this.status >= 300) { + errorCb(JSON.parse(this.responseText)); + } + }); + + xhr.open('DELETE', `${environment.apiEndPoint}/switch/${switchId}`); + xhr.setRequestHeader('Content-Type', 'application/json'); + if (token !== null) { + xhr.setRequestHeader('X-XSRF-TOKEN', token); + } + xhr.send(requestBody); + } + + createLagLogicalPort(data: CreateLagPortModel, switchid) { + return this.httpClient.post(`${environment.apiEndPoint}/switch/${switchid}/lags`, data); + } + + deleteLagLogicalPort(switchid, logical_port_number) { + return this.httpClient.delete(`${environment.apiEndPoint}/switch/${switchid}/lags/${logical_port_number}`); + } + + hasDiscrepancy(switchDetail: any): boolean { + if (!switchDetail.inventory_switch_detail) { + return true; + } + if (switchDetail.switch_id == null) { + return true; + } + return switchDetail.state !== switchDetail.inventory_switch_detail.status; + } + + extractSwitchId(switchDetail: any): string { + return switchDetail.switch_id || switchDetail.inventory_switch_detail['switch-id']; + } + + isControllerSwitch(switchDetail: any): boolean { + return !!switchDetail.switch_id; + } + + isInventorySwitch(switchDetail: any): boolean { + return !!switchDetail.inventory_switch_detail; + } + + extractState(switchDetail: any): string { + return switchDetail.state || switchDetail.inventory_switch_detail.status; + } } diff --git a/src-gui/ui/src/app/common/services/user.service.ts b/src-gui/ui/src/app/common/services/user.service.ts index 78b180c1d98..851bda2922b 100644 --- a/src-gui/ui/src/app/common/services/user.service.ts +++ b/src-gui/ui/src/app/common/services/user.service.ts @@ -33,8 +33,8 @@ const httpOptions = { } /** POST: add user to the server */ - addUser(user: User): Observable { - return this.http.post(this.configUrl + '/user', user); + addUser(user: User): Observable { + return this.http.post(this.configUrl + '/user', user); } getUserById(id: number): Observable { diff --git a/src-gui/ui/src/app/modules/settings/switch-store/switch-store.component.ts b/src-gui/ui/src/app/modules/settings/switch-store/switch-store.component.ts index 8886dcaea40..971c89e6cad 100644 --- a/src-gui/ui/src/app/modules/settings/switch-store/switch-store.component.ts +++ b/src-gui/ui/src/app/modules/settings/switch-store/switch-store.component.ts @@ -324,7 +324,7 @@ export class SwitchStoreComponent implements OnInit { setTimeout(function() { localStorage.removeItem('hasSwtStoreSetting'); localStorage.removeItem('switchStoreSetting'); - localStorage.removeItem('switchFilterFlag'); + sessionStorage.removeItem('switchFilterFlag'); location.reload(); }, 500); }, (error) => { diff --git a/src-gui/ui/src/app/modules/switches/port-list/port-list.component.ts b/src-gui/ui/src/app/modules/switches/port-list/port-list.component.ts index 4dbbfd44bce..eb3b115f897 100644 --- a/src-gui/ui/src/app/modules/switches/port-list/port-list.component.ts +++ b/src-gui/ui/src/app/modules/switches/port-list/port-list.component.ts @@ -37,7 +37,7 @@ export class PortListComponent implements OnInit, AfterViewInit, OnDestroy, OnCh portListSubscriber = null; portFlowSubscription: Subscription[] = []; loadPorts = false; - switchFilterFlag: string = localStorage.getItem('switchFilterFlag') || 'controller'; + switchFilterFlag: string = sessionStorage.getItem('switchFilterFlag') || 'controller'; hasStoreSetting; constructor(private switchService: SwitchService, diff --git a/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.html b/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.html index 9071ac6dc83..bd385b2ef7e 100644 --- a/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.html +++ b/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.html @@ -1,118 +1,140 @@ -
-
+
- - -
-
+ + +
+
- +
- + - + - - + - + - - - - - - - - - - + + + + + + + + + + + -
- Switch ID - - + Switch ID + + Name - + Address - - + + Hostname - - + + - Pop Location - - + Pop Location + + - Sum(Bandwidth) of Flows(Mbps) - - + Sum(Bandwidth) of Flows(Mbps) + + - No Of Flows - - + No Of Flows + + Description - + State - + type - +
- {{checkValue(row.switch_id)}} - +
+ {{ checkValue(extractSwitchId(row)) }} + - Copy to Clipboard + Copy to Clipboard - - - {{ ((row['controller-switch'] == true && row['state'] == 'ACTIVATED') || (row['name']!=='undefined') )? checkValue(row['name']) : (row['common-name']) ? checkValue(row['common-name']) : checkValue(row['name']) }} - + + + {{ ((isControllerSwitch(row) && extractState(row) == 'ACTIVATED') || (row['name'] !== undefined)) ? checkValue(row['name']) : (row['inventory_switch_detail']['common-name']) ? checkValue(row['inventory_switch_detail']['common-name']) : checkValue(row['name']) }} + - Copy to Clipboard + Copy to Clipboard - - - {{checkValue(row.address)}} - - {{checkValue(row.hostname)}} - - {{ row['pop-location'] && row['pop-location']['pop-code'] ? checkValue(row['pop-location']['pop-code']) : '-'}} - - {{(row.sumofbandwidth || row.sumofbandwidth == 0) ? row.sumofbandwidth : 'loading..'}} - - {{(row.noofflows || row.noofflows == 0) ? row.noofflows : 'loading..'}} - - {{checkValue(row.description)}} - - {{checkValue(row.state)}} - {{ descrepancyString(row) }}
+ {{ checkValue(row.address) }} + + {{ checkValue(row.hostname) }} + + {{ row['inventory_switch_detail']?.['pop-location']?.['pop-code'] ? checkValue(row['inventory_switch_detail']['pop-location']['pop-code']) : '-' }} + + {{ (row.sumofbandwidth || row.sumofbandwidth == 0) ? row.sumofbandwidth : 'loading..' }} + + {{ (row.noofflows || row.noofflows == 0) ? row.noofflows : 'loading..' }} + + {{ checkValue(row.description || row.inventory_switch_detail?.description) }} + + {{ checkValue(extractState(row)) }} + {{ descrepancyString(row) }}
+ diff --git a/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.ts b/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.ts index 13196eb4bc5..4060d282e4f 100644 --- a/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.ts +++ b/src-gui/ui/src/app/modules/switches/switch-datatable/switch-datatable.component.ts @@ -1,344 +1,366 @@ -import { Component, OnInit, Input, ViewChild, Output, EventEmitter, OnChanges, OnDestroy, AfterViewInit, SimpleChanges, Renderer2 } from '@angular/core'; -import { Subject, Subscription } from 'rxjs'; -import { DataTableDirective } from 'angular-datatables'; -import { LoaderService } from 'src/app/common/services/loader.service'; -import { Router } from '@angular/router'; -import { Switch } from 'src/app/common/data-models/switch'; -import { StoreSettingtService } from 'src/app/common/services/store-setting.service'; -import { ClipboardService } from 'ngx-clipboard'; -import { SwitchService } from 'src/app/common/services/switch.service'; -import { CommonService } from 'src/app/common/services/common.service'; -import { ToastrService } from 'ngx-toastr'; -import { MessageObj } from 'src/app/common/constants/constants'; -declare var jQuery: any; +import { + AfterViewInit, + Component, + Input, + OnChanges, + OnDestroy, + OnInit, + Renderer2, + SimpleChanges, + ViewChild +} from '@angular/core'; +import {Subject, Subscription} from 'rxjs'; +import {DataTableDirective} from 'angular-datatables'; +import {LoaderService} from 'src/app/common/services/loader.service'; +import {Router} from '@angular/router'; +import {Switch} from 'src/app/common/data-models/switch'; +import {StoreSettingtService} from 'src/app/common/services/store-setting.service'; +import {ClipboardService} from 'ngx-clipboard'; +import {SwitchService} from 'src/app/common/services/switch.service'; +import {CommonService} from 'src/app/common/services/common.service'; +import {ToastrService} from 'ngx-toastr'; +import {MessageObj} from 'src/app/common/constants/constants'; + +declare let jQuery: any; @Component({ - selector: 'app-switch-datatable', - templateUrl: './switch-datatable.component.html', - styleUrls: ['./switch-datatable.component.css'] + selector: 'app-switch-datatable', + templateUrl: './switch-datatable.component.html', + styleUrls: ['./switch-datatable.component.css'] }) export class SwitchDatatableComponent implements OnInit, OnChanges, OnDestroy, AfterViewInit { - @ViewChild(DataTableDirective, { static: true }) datatableElement: DataTableDirective; - @Input() data = []; - @Input() switchFilterFlag: string; - @Input() textSearch: any; - dtOptions: any = {}; - dtTrigger: Subject = new Subject(); - wrapperHide = false; - hasStoreSetting = false; - flowSubscription: Subscription[] = []; - switch_id = false; - commonname = false; - name = false; - address = false; - hostname = false; - poplocation = false; - description = false; - sumofflows = false; - noofflows = false; - state = false; - enableExportBtn = false; - clipBoardItems = []; - flowDataOfSwitch: any = {}; - constructor(private loaderService: LoaderService, - private renderer: Renderer2, - private router: Router, - private commonService: CommonService, - private toastr: ToastrService, - private storeSwitchService: StoreSettingtService, - private clipboardService: ClipboardService, - private switchService: SwitchService - ) { - if (!this.commonService.hasPermission('menu_switches')) { - this.toastr.error(MessageObj.unauthorised); - this.router.navigate(['/home']); - } - } - - ngOnInit() { - this.wrapperHide = false; - const ref = this; - this.dtOptions = { - pageLength: 10, - retrieve: true, - autoWidth: false, - dom: 'tpli', - colResize: false, - 'aLengthMenu': [[10, 20, 35, 50, -1], [10, 20, 35, 50, 'All']], - 'responsive': true, - drawCallback: function() { - if (jQuery('#switchDataTable tbody tr').length < 10) { - jQuery('#switchDataTable_next').addClass('disabled'); - } else { - jQuery('#switchDataTable_next').removeClass('disabled'); - } - }, - 'aoColumns': [ - { sWidth: '10%' }, - { sWidth: '10%', 'sType': 'name', 'bSortable': true }, - { sWidth: '10%' }, - { sWidth: '10%' }, - { sWidth: '10%' }, - { sWidth: '10%' }, - { sWidth: '15%' }, - { sWidth: '25%' }, - { sWidth: '10%' } - ], - language: { - searchPlaceholder: 'Search' - }, - initComplete: function( settings, json ) { - setTimeout(function() { - ref.loaderService.hide(); - ref.wrapperHide = true; - }, this.data.length / 2); - }, - columnDefs: [ - { targets: [4], visible: false}, - { targets: [9], visible: false} - ] - }; - - this.fetchSwitchFlowDataObj(); - - } - - async fetchSwitchFlowDataObj() { - - if (this.data && this.data.length) { - let i = 0; - let processComplete = 1; - this.data.forEach((d) => { - this.flowSubscription[i] = this.switchService.getSwitchFlows(d.switch_id, d['inventory-switch'], null).subscribe(data => { - const flowsData: any = data; - d['sumofbandwidth'] = 0; - d['noofflows'] = 0; - if (flowsData && flowsData.length) { - for (const flow of flowsData) { - d['sumofbandwidth'] = d['sumofbandwidth'] + (flow.maximum_bandwidth / 1000); - } - if (d['sumofbandwidth'] ) { - d['sumofbandwidth'] = d['sumofbandwidth'].toFixed(3); - } - d['noofflows'] = flowsData.length; - } - }, error => { - d['sumofbandwidth'] = 0; - d['noofflows'] = 0; - }, () => { - processComplete = processComplete + 1; - if (this.data.length == processComplete) { - this.enableExportBtn = true; - } - }); - i++; - }); + @ViewChild(DataTableDirective, {static: true}) datatableElement: DataTableDirective; + @Input() data = []; + @Input() switchFilterFlag: string; + @Input() textSearch: any; + dtOptions: any = {}; + dtTrigger: Subject = new Subject(); + wrapperHide = false; + hasStoreSetting = false; + flowSubscription: Subscription[] = []; + switch_id = false; + commonname = false; + name = false; + address = false; + hostname = false; + poplocation = false; + description = false; + sumofflows = false; + noofflows = false; + state = false; + enableExportBtn = false; + clipBoardItems = []; + constructor(private loaderService: LoaderService, + private renderer: Renderer2, + private router: Router, + private commonService: CommonService, + private toastr: ToastrService, + private storeSwitchService: StoreSettingtService, + private clipboardService: ClipboardService, + public switchService: SwitchService + ) { + if (!this.commonService.hasPermission('menu_switches')) { + this.toastr.error(MessageObj.unauthorised); + this.router.navigate(['/home']); + } } - } - - exportCsv(val) { - let headings = ['Switch ID', 'Name', 'Address', 'Hostname', 'Pop Location', 'Sum(Bandwidth) of Flows(Mbps)', 'No Of Flows', 'Description', 'State', 'Evacuate', 'Hardware', 'Location', 'Manufacturer', 'Version', 'Port', 'Serial Number', 'Software', 'Under Maintenance']; - - if (val) { - headings = ['Switch ID', 'Name', 'Address', 'Hostname', 'Pop Location', 'Sum(Bandwidth) of Flows(Mbps)', 'No Of Flows', 'Description', 'State']; - } - const lineArray = []; - lineArray.push(headings); - this.data.forEach(function(d) { - const line = []; - line.push('"' + ((d.switch_id) ? d.switch_id : '-') + '"'); - line.push('"' + ((d.name) ? d.name : '-') + '"'); - line.push('"' + ((d.address) ? d.address : '-') + '"'); - line.push('"' + ((d.hostname) ? d.hostname : '-') + '"'); - line.push('"' + (d['pop-location'] || '-') + '"'); - line.push('"' + ((d.sumofbandwidth || d.sumofbandwidth == 0) ? d.sumofbandwidth : '-') + '"'); - line.push('"' + ((d.noofflows || d.noofflows == 0 ) ? d.noofflows : '-') + '"'); - line.push('"' + ((d.description) ? d.description : '-') + '"'); - line.push('"' + ((d.state) ? d.state : '-') + '"'); - if (!val) { - const locationString = 'longitude:' + ((d.location.longitude) ? d.location.longitude : '-') + ', latitude:' + ((d.location.latitude) ? d.location.latitude : '-') + ', city:' + ((d.location.city) ? d.location.city : '-') + ', street:' + ((d.location.street) ? d.location.street : '-') + ', Country:' + ((d.location.country) ? d.location.country : '-'); - line.push('"' + ((d.evacuate) ? d.evacuate : 'false') + '"'); - line.push('"' + ((d.hardware) ? d.hardware : '-') + '"'); - line.push('"' + locationString + '"'); - line.push('"' + ((d.manufacturer) ? d.manufacturer : '-') + '"'); - line.push('"' + ((d.of_version) ? d.of_version : '-') + '"'); - line.push('"' + ((d.port) ? d.port : '-') + '"'); - line.push('"' + ((d.serial_number) ? d.serial_number : '-') + '"'); - line.push('"' + ((d.software) ? d.software : '-') + '"'); - line.push('"' + ((d.under_maintenance) ? d.under_maintenance : 'false') + '"'); - } - - const csvLine = line.join(','); - lineArray.push(csvLine); - }); - const fileName = 'OPEN KILDA - Switches'; - const csvContent = lineArray.join('\n'); - const blob = new Blob(['\ufeff' + csvContent], { - type: 'text/csv;charset=utf-8;' - }); - const dwldLink = document.createElement('a'); - const url = URL.createObjectURL(blob); - const isSafariBrowser = navigator.userAgent.indexOf('Safari') != -1; - navigator.userAgent.indexOf('Chrome') == -1; - if (isSafariBrowser) { - dwldLink.setAttribute('target', '_blank'); + + ngOnInit() { + this.wrapperHide = false; + const ref = this; + this.dtOptions = { + pageLength: 10, + retrieve: true, + autoWidth: false, + dom: 'tpli', + colResize: false, + 'aLengthMenu': [[10, 20, 35, 50, -1], [10, 20, 35, 50, 'All']], + 'responsive': true, + drawCallback: function () { + if (jQuery('#switchDataTable tbody tr').length < 10) { + jQuery('#switchDataTable_next').addClass('disabled'); + } else { + jQuery('#switchDataTable_next').removeClass('disabled'); + } + }, + 'aoColumns': [ + {sWidth: '10%'}, + {sWidth: '10%', 'sType': 'name', 'bSortable': true}, + {sWidth: '10%'}, + {sWidth: '10%'}, + {sWidth: '10%'}, + {sWidth: '10%'}, + {sWidth: '15%'}, + {sWidth: '25%'}, + {sWidth: '10%'} + ], + language: { + searchPlaceholder: 'Search' + }, + initComplete: function (settings, json) { + setTimeout(function () { + ref.loaderService.hide(); + ref.wrapperHide = true; + }, this.data.length / 2); + }, + columnDefs: [ + {targets: [4], visible: false}, + {targets: [9], visible: false} + ] + }; + + this.fetchSwitchFlowDataObj(); + } - dwldLink.setAttribute('href', url); - dwldLink.setAttribute('download', fileName + '.csv'); - dwldLink.style.visibility = 'hidden'; - document.body.appendChild(dwldLink); - dwldLink.click(); - document.body.removeChild(dwldLink); - } - - - ngAfterViewInit(): void { - this.dtTrigger.next(); - this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { - dtInstance.columns().every(function() { - const that = this; - $('input', this.header()).on('keyup change', function() { - if (that.search() !== this['value']) { - that.search(this['value']).draw(); - } + + async fetchSwitchFlowDataObj() { + if (!this.data || this.data.length === 0) { + return; + } + let processComplete = 1; + this.data.forEach((switchDetail, i) => { + this.flowSubscription[i] = this.switchService.getSwitchFlows( + this.switchService.extractSwitchId(switchDetail), + !!switchDetail['inventory_switch_detail'], null) + .subscribe(flows => { + const flowsData: any = flows; + switchDetail['sumofbandwidth'] = 0; + switchDetail['noofflows'] = 0; + if (flowsData && flowsData.length) { + for (const flow of flowsData) { + switchDetail['sumofbandwidth'] = switchDetail['sumofbandwidth'] + (flow.maximum_bandwidth / 1000); + } + if (switchDetail['sumofbandwidth']) { + switchDetail['sumofbandwidth'] = switchDetail['sumofbandwidth'].toFixed(3); + } + switchDetail['noofflows'] = flowsData.length; + } + }, error => { + switchDetail['sumofbandwidth'] = 0; + switchDetail['noofflows'] = 0; + }, () => { + processComplete = processComplete + 1; + if (this.data.length === processComplete) { + this.enableExportBtn = true; + } + }); }); - }); - }); - this.checkSwitchSettings(); - } - - ngOnDestroy(): void { - this.dtTrigger.unsubscribe(); - if (this.flowSubscription && this.flowSubscription.length) { - this.flowSubscription.forEach((subscription) => subscription.unsubscribe()); - this.flowSubscription = []; } - } + exportCsv(val) { + let headings = ['Switch ID', 'Name', 'Address', 'Hostname', 'Pop Location', 'Sum(Bandwidth) of Flows(Mbps)', 'No Of Flows', 'Description', 'State', 'Evacuate', 'Hardware', 'Location', 'Manufacturer', 'Version', 'Port', 'Serial Number', 'Software', 'Under Maintenance']; - fulltextSearch(value: any) { - if (this.dtTrigger) { - this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { - dtInstance.search(value) - .draw(); + if (val) { + headings = ['Switch ID', 'Name', 'Address', 'Hostname', 'Pop Location', 'Sum(Bandwidth) of Flows(Mbps)', 'No Of Flows', 'Description', 'State']; + } + const switchService = this.switchService; + const lineArray = []; + lineArray.push(headings); + this.data.forEach(function (switchDetail) { + const line = []; + + line.push('"' + (switchService.extractSwitchId(switchDetail) || '-') + '"'); + line.push('"' + ((switchDetail.name) ? switchDetail.name : '-') + '"'); + line.push('"' + ((switchDetail.address) ? switchDetail.address : '-') + '"'); + line.push('"' + ((switchDetail.hostname) ? switchDetail.hostname : '-') + '"'); + line.push('"' + (switchDetail['inventory_switch_detail']?.['pop-location'] || '-') + '"'); + line.push('"' + ((switchDetail.sumofbandwidth || switchDetail.sumofbandwidth === 0) ? switchDetail.sumofbandwidth : '-') + '"'); + line.push('"' + ((switchDetail.noofflows || switchDetail.noofflows === 0) ? switchDetail.noofflows : '-') + '"'); + line.push('"' + (switchDetail.description || switchDetail.inventory_switch_detail?.description || '-') + '"'); + line.push('"' + (switchService.extractState(switchDetail) || '-') + '"'); + if (!val) { + const locationString = 'longitude:' + ((switchDetail.location.longitude) ? switchDetail.location.longitude : '-') + + ', latitude:' + ((switchDetail.location.latitude) ? switchDetail.location.latitude : '-') + + ', city:' + ((switchDetail.location.city) ? switchDetail.location.city : '-') + ', street:' + + ((switchDetail.location.street) ? switchDetail.location.street : '-') + ', Country:' + + ((switchDetail.location.country) ? switchDetail.location.country : '-'); + + line.push('"' + ((switchDetail.evacuate) ? switchDetail.evacuate : 'false') + '"'); + line.push('"' + ((switchDetail.hardware) ? switchDetail.hardware : '-') + '"'); + line.push('"' + locationString + '"'); + line.push('"' + ((switchDetail.manufacturer || switchDetail.inventory_switch_detail?.manufacturer || '-') + '"')); + line.push('"' + ((switchDetail.of_version) ? switchDetail.of_version : '-') + '"'); + line.push('"' + ((switchDetail.port) ? switchDetail.port : '-') + '"'); + line.push('"' + ((switchDetail.serial_number) ? switchDetail.serial_number : '-') + '"'); + line.push('"' + ((switchDetail.software) ? switchDetail.software : '-') + '"'); + line.push('"' + ((switchDetail.under_maintenance) ? switchDetail.under_maintenance : 'false') + '"'); + } + + const csvLine = line.join(','); + lineArray.push(csvLine); + }); + const fileName = 'OPEN KILDA - Switches'; + const csvContent = lineArray.join('\n'); + const blob = new Blob(['\ufeff' + csvContent], { + type: 'text/csv;charset=utf-8;' }); + const dwldLink = document.createElement('a'); + const url = URL.createObjectURL(blob); + const isSafariBrowser = navigator.userAgent.indexOf('Safari') !== -1; + if (isSafariBrowser) { + dwldLink.setAttribute('target', '_blank'); + } + dwldLink.setAttribute('href', url); + dwldLink.setAttribute('download', fileName + '.csv'); + dwldLink.style.visibility = 'hidden'; + document.body.appendChild(dwldLink); + dwldLink.click(); + document.body.removeChild(dwldLink); } - } - - ngOnChanges(change: SimpleChanges) { - if (change.data) { - if (change.data.currentValue) { - this.data = change.data.currentValue; - this.clipBoardItems = this.data; - } + + + ngAfterViewInit(): void { + this.dtTrigger.next(); + this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { + dtInstance.columns().every(function () { + const that = this; + $('input', this.header()).on('keyup change', function () { + if (that.search() !== this['value']) { + that.search(this['value']).draw(); + } + }); + }); + }); + this.checkSwitchSettings(); } - if (typeof(change.textSearch) !== 'undefined' && change.textSearch.currentValue) { - this.fulltextSearch(change.textSearch.currentValue); + + ngOnDestroy(): void { + this.dtTrigger.unsubscribe(); + if (this.flowSubscription && this.flowSubscription.length) { + this.flowSubscription.forEach((subscription) => subscription.unsubscribe()); + this.flowSubscription = []; + } + } - } - - showSwitch(switchObj: Switch) { - const switchDetailsJSON = { - 'switch_id': switchObj.switch_id, - 'name': switchObj.name, - 'common-name': switchObj['common-name'], - 'address': switchObj.address, - 'hostname': switchObj.hostname, - 'description': switchObj.description, - 'state': switchObj.state - }; - const switchDetailsKey = 'switchDetailsKey_' + switchObj.switch_id; - localStorage.setItem( - switchDetailsKey, - JSON.stringify(switchDetailsJSON) - ); - localStorage.setItem('switchFilterFlag', this.switchFilterFlag); - this.router.navigate(['/switches/details/' + switchObj.switch_id]); - } - - checkValue(value) { - if (value === '' || value == undefined) { - return '-'; - } else { - return value; + + fulltextSearch(value: any) { + if (this.dtTrigger) { + this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { + dtInstance.search(value) + .draw(); + }); + } } - } - - toggleSearch(e, inputContainer) { - event.stopPropagation(); - this[inputContainer] = this[inputContainer] ? false : true; - if (this[inputContainer]) { - setTimeout(() => { - this.renderer.selectRootElement('#' + inputContainer).focus(); - }); - } else { - setTimeout(() => { - this.renderer.selectRootElement('#' + inputContainer).value = ''; - jQuery('#' + inputContainer).trigger('change'); - }); + + ngOnChanges(change: SimpleChanges) { + if (change.data) { + if (change.data.currentValue) { + this.data = change.data.currentValue; + this.clipBoardItems = this.data; + } + } + if (typeof (change.textSearch) !== 'undefined' && change.textSearch.currentValue) { + this.fulltextSearch(change.textSearch.currentValue); + } } - } - stopPropagationmethod(e) { - event.stopPropagation(); + showSwitch(switchObj: Switch) { + const swId = switchObj.switch_id || switchObj['inventory_switch_detail']?.['switch-id']; + const switchDetailsJSON = { + 'switch_id': swId, + 'name': switchObj.name || switchObj['inventory_switch_detail']?.name, + 'common-name': switchObj['inventory_switch_detail']?.['common-name'], + 'address': switchObj.address, + 'hostname': switchObj.hostname, + 'description': switchObj.description || switchObj['inventory_switch_detail']?.['description'], + 'state': switchObj.state || switchObj['inventory_switch_detail']?.['status'] + }; + const switchDetailsKey = 'switchDetailsKey_' + swId; + localStorage.setItem(switchDetailsKey, JSON.stringify(switchDetailsJSON)); + sessionStorage.setItem('switchFilterFlag', this.switchFilterFlag); + this.router.navigate(['/switches/details/' + swId]); + } - if (e.key === 'Enter') { - return false; + checkValue(value) { + if (value === '' || value === undefined) { + return '-'; + } else { + return value; + } } - } - descrepancyString(row) { - const text = []; - if (row.hasOwnProperty('controller-switch')) { - if (row['controller-switch']) { - text.push('controller:true'); + + toggleSearch(e, inputContainer) { + event.stopPropagation(); + this[inputContainer] = !this[inputContainer]; + if (this[inputContainer]) { + setTimeout(() => { + this.renderer.selectRootElement('#' + inputContainer).focus(); + }); } else { - text.push('controller:false'); + setTimeout(() => { + this.renderer.selectRootElement('#' + inputContainer).value = ''; + jQuery('#' + inputContainer).trigger('change'); + }); } - } else { - text.push('controller:false'); } - if (row.hasOwnProperty('inventory-switch')) { - if (row['inventory-switch']) { - text.push('inventory:true'); - } else { - text.push('inventory:false'); - } - } else { - text.push('inventory:false'); + stopPropagationmethod(e) { + event.stopPropagation(); + + if (e.key === 'Enter') { + return false; + } + } + + descrepancyString(row) { + const text = []; + if (this.switchService.isControllerSwitch(row)) { + text.push('controller:true'); + } else { + text.push('controller:false'); + } + + if (this.switchService.isInventorySwitch(row)) { + text.push('inventory:true'); + } else { + text.push('inventory:false'); + } + return text.join(', '); } - return text.join(', '); - } + checkSwitchSettings() { - checkSwitchSettings() { + this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1'; + if (this.hasStoreSetting) { + this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { + dtInstance.columns([4]).visible(true); + }); + } else { + this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { + dtInstance.columns([4]).visible(false); + }); - this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1' ? true : false; - if (this.hasStoreSetting) { - this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { - dtInstance.columns( [4] ).visible( true ); - }); - } else { - this.datatableElement.dtInstance.then((dtInstance: DataTables.Api) => { - dtInstance.columns( [4] ).visible( false ); - }); + } + } + copyToClip(event: any, copyItem: string, index: string | number) { + let copyData; + if (copyItem == 'name') { + copyData = (this.clipBoardItems[index]?.inventory_switch_detail?.['common-name']) ? + this.checkValue(this.clipBoardItems[index].inventory_switch_detail['common-name']) : + this.checkValue(this.clipBoardItems[index][copyItem]); + } else { + copyData = this.checkValue(this.clipBoardItems[index][copyItem]); + } + + this.clipboardService.copyFromContent(copyData); } - } - - copyToClip(event, copyItem, index) { - let copyData; - if (copyItem == 'name') { - copyData = (this.clipBoardItems[index]['common-name']) ? this.checkValue(this.clipBoardItems[index]['common-name']) : this.checkValue(this.clipBoardItems[index][copyItem]); - } else { - copyData = this.checkValue(this.clipBoardItems[index][copyItem]); + + extractState(switchDetail: any): string { + return this.switchService.extractState(switchDetail); } - this.clipboardService.copyFromContent(copyData); - } + hasDiscrepancy(switchDetail: any): boolean { + return this.switchService.hasDiscrepancy(switchDetail); + } - + extractSwitchId(switchDetail: any): string { + return this.switchService.extractSwitchId(switchDetail); + } + isControllerSwitch(switchDetail: any): boolean { + return this.switchService.isControllerSwitch(switchDetail); + } } diff --git a/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.html b/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.html index 2a0b10fc62a..96af9adcb39 100644 --- a/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.html +++ b/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.html @@ -1,256 +1,394 @@
- - + +
- - -
-
- -
-
-

{{ ((switchDetail['controller-switch'] == true && switchDetail['state'] == 'ACTIVATED') || (switchDetail['name']!=='undefined') ) ? switchDetail['name'] : (switchDetail['common-name']) ? switchDetail['common-name'] : switchDetail['name'] }} - -

-   -
- -
-
+
- -
-   -   -
-
- -
-
- -

{{ switchDetail['uuid'] || '-'}}

+ +
+
+ +
+
+ +
+
+

+ {{ this.switchDetail['name'] }} +

+   +
-
-
- -
-

{{ switchDetail['switch_id'] }}

- -
-
- -
-
- - -
-
-
- -

{{ switchDetail['hostname'] || '-' }}

- - -
- -
- -

{{ switchDetail['address'] || '-' }}

- -

{{ switchDetail['description'] || '-' }}

- -
- -
- -

{{ switchDetail['state'] || '-' }} - -

- -

{{ switchDetail['manufacturer'] || '-' }}

-
-
- -

{{ switchDetail['hardware'] || '-' }}

- -

{{ switchDetail['of_version'] || '-' }}

-
- - -
- -

{{ switchDetail['software'] || switchDetail['software-version'] || '-' }}

-
-
- -
-
- - -
+
+
+
+ +
+   +   +
+
+
+
+
+
+ +
+

{{ switchDetail['switch_id'] }}

+ +
+
+ +
+
+ + +
+
+
+
+
+ +

{{ switchDetail['hostname'] || '-' }}

+
+
+ +

{{ switchDetail['address'] || '-' }}

+
+
+ +

{{ switchDetail['description'] || '-' }}

+
+ +
+ +

{{ switchDetail['state'] || '-' }}

+
+
+ +

{{ switchDetail['manufacturer'] || '-' }}

+
+
+ +

{{ switchDetail['hardware'] || '-' }}

+
+
+ +

{{ switchDetail['of_version'] || '-' }}

+
+
+ +

{{ switchDetail['software'] || '-' }}

+
+
+ +
+
+ + +
+
+
+
+ +
+
+ + +
+
+
+
+ +

{{ !flowBandwidthFlag ? flowBandwidthSum : 'loading..' }}

+
+
+ +

{{ (switchFlows && (switchFlows.length || switchFlows.length == 0)) ? switchFlows.length : 'loading..' }}

+
+
+ +

{{ (switchDetail.pop) ? switchDetail.pop : "-" }}

+
+
+ +

{{ (switchDetail['location'] && switchDetail['location'].latitude) ? switchDetail['location'].latitude : 0 }}

+
+
+ +

{{ (switchDetail['location'] && switchDetail['location'].longitude) ? switchDetail['location'].longitude : 0 }}

+
+
+ +

{{ (switchDetail['location'] && (switchDetail['location'].street) != 'undefined' && switchDetail['location'].street != '') ? switchDetail['location'].street || "-" : "-" }}

+
+
+ +

{{ (switchDetail['location'] && (switchDetail['location'].city) != 'undefined' && switchDetail['location'].city != '') ? switchDetail['location'].city || "-" : "-" }}

+
+
+ +

{{ (switchDetail['location'] && (switchDetail['location'].country) != 'undefined' && switchDetail['location'].country != '') ? switchDetail['location'].country || "-" : "-" }}

+
+
+ +

{{ switchDetail['serial_number'] || '-' }}

+
- -
-
- - + + +
+
+ +
+
+ +

{{ inventorySwitch['name'] || '-' }}

+
+
+ +

{{ inventorySwitch['common-name'] || '-' }}

+
+
+ +
+

{{ inventorySwitch['switch-id'] }} + +

+ +
+
+ +
+
+ + +
+
+
+
+
+ +

{{ inventorySwitch['uuid'] || '-' }}

+
+ +
+ +

{{ inventorySwitch['description'] || '-' }}

+
+ +
+ +

{{ inventorySwitch['status'] || '-' }}

+
+ +
+ +

{{ inventorySwitch['manufacturer'] || '-' }}

+
+ +
+ +

{{ inventorySwitch['software-version'] || '-' }}

+
+ +
+ +

{{ inventorySwitch['serial-number'] || '-' }}

+
+ + + +
+ +

{{ inventorySwitch['rack-location'] || '-' }}

+
+ +
+ +

{{ inventorySwitch['model'] || '-' }}

+
+
+ +

{{ inventorySwitch['pop-location']?.['state-code'] || '-' }}

+
+
+ +

{{ inventorySwitch['pop-location']?.['country-code'] || '-' }}

+
+
+ +

{{ inventorySwitch['pop-location']?.['pop-uuid'] || '-' }}

+
+
+ +

{{ inventorySwitch['pop-location']?.['pop-name'] || '-' }}

+
+
+ +

{{ inventorySwitch['pop-location']?.['pop-code'] || '-' }}

+
+
+ +

{{ inventorySwitch['rack-number'] || '-' }}

+
-
+
-
- -

{{ (!flowBandwidthFlag) ? flowBandwidthSum : 'loading..' }}

- -

{{ (switchFlows && (switchFlows.length || switchFlows.length == 0)) ? switchFlows.length : 'loading..' }}

-
-
- -

{{ (switchDetail.pop) ? switchDetail.pop : "-" }}

- -

{{ (switchDetail['location'] && switchDetail['location'].latitude) ? switchDetail['location'].latitude : 0 }}

-
-
- -

{{ (switchDetail['location'] && switchDetail['location'].longitude) ? switchDetail['location'].longitude : 0 }}

- -

{{ (switchDetail['location'] && (switchDetail['location'].street) !='undefined' && switchDetail['location'].street != '') ? switchDetail['location'].street || "-" : "-" }}

-
-
- -

{{ (switchDetail['location'] && (switchDetail['location'].city) != 'undefined' && switchDetail['location'].city != '') ? switchDetail['location'].city || "-" : "-" }}

- -

{{ (switchDetail['location'] && (switchDetail['location'].country) !='undefined' && switchDetail['location'].country != '') ? switchDetail['location'].country || "-" : "-" }}

-
-
- -

{{ switchDetail['serial_number'] || switchDetail['serial-number'] || '-' }}

- - -

- - - {{ switchDetail['reference-url'] }} -

-
-
- -

{{ switchDetail['rack-location'] || '-' }}

- - -

{{ switchDetail['model'] || '-' }}

-
-
- -

{{ switchDetail['pop-location'] && switchDetail['pop-location']['pop-code'] ? switchDetail['pop-location']['pop-code'] : '-'}}

- - -

{{ switchDetail['rack-number'] || '-' }}

-
-
+ +
-
- -
-
- -
-
- -
-
- -
-
- -
-
-
- - - - - - - - - - +
+ +
+
+ +
+
+ +
+
+ +
+
+ +
+
+
+ + + + + + + + + + + + + + + + +
+
+

+ *Note: Switch doesn't exist in the controller

+ +

+ *Note: Switch doesn't exist in the inventory

+

+ *Note: There is discrepancy in switch status

+
+
+
-
-

- *Note: Switch doesn't exist in the controller

- -

- *Note: Switch doesn't exist in the inventory

-

- *Note: There is descrepancy in switch status

- -
-
-
- +
- - + +
- - - Copy to Clipboard - + + Copy to Clipboard + + + + + + Copy to Clipboard + - - Copy to Clipboard - + + Copy to Clipboard + - - Copy to Clipboard - + + Copy to Clipboard + diff --git a/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.ts b/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.ts index 3232f942000..33c86bd03dd 100644 --- a/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.ts +++ b/src-gui/ui/src/app/modules/switches/switch-detail/switch-detail.component.ts @@ -1,476 +1,480 @@ -import { Component, OnInit, AfterViewInit, OnDestroy, NgModuleRef } from '@angular/core'; -import { SwitchService } from '../../../common/services/switch.service'; -import { SwitchidmaskPipe } from '../../../common/pipes/switchidmask.pipe'; -import { ToastrService } from 'ngx-toastr'; -import { Router, NavigationEnd, ActivatedRoute} from '@angular/router'; -import { filter } from 'rxjs/operators'; -import { LoaderService } from '../../../common/services/loader.service'; -import { ClipboardService } from 'ngx-clipboard'; -import { Title } from '@angular/platform-browser'; -import { CommonService } from '../../../common/services/common.service'; -import { StoreSettingtService } from 'src/app/common/services/store-setting.service'; -import { FormGroup, FormBuilder } from '@angular/forms'; -import { NgbModal } from '@ng-bootstrap/ng-bootstrap'; -import { ModalconfirmationComponent } from 'src/app/common/components/modalconfirmation/modalconfirmation.component'; -import { IslmaintenancemodalComponent } from 'src/app/common/components/islmaintenancemodal/islmaintenancemodal.component'; -import { ModalComponent } from '../../../common/components/modal/modal.component'; -import { OtpComponent } from '../../../common/components/otp/otp.component'; -import { MessageObj } from 'src/app/common/constants/constants'; -import { SwitchupdatemodalComponent } from 'src/app/common/components/switchupdatemodal/switchupdatemodal.component'; +import {AfterViewInit, Component, OnDestroy, OnInit} from '@angular/core'; +import {SwitchService} from '../../../common/services/switch.service'; +import {SwitchidmaskPipe} from '../../../common/pipes/switchidmask.pipe'; +import {ToastrService} from 'ngx-toastr'; +import {ActivatedRoute, NavigationEnd, Router} from '@angular/router'; +import {filter} from 'rxjs/operators'; +import {LoaderService} from '../../../common/services/loader.service'; +import {ClipboardService} from 'ngx-clipboard'; +import {Title} from '@angular/platform-browser'; +import {CommonService} from '../../../common/services/common.service'; +import {StoreSettingtService} from 'src/app/common/services/store-setting.service'; +import {FormBuilder, FormGroup} from '@angular/forms'; +import {NgbModal} from '@ng-bootstrap/ng-bootstrap'; +import {ModalconfirmationComponent} from 'src/app/common/components/modalconfirmation/modalconfirmation.component'; +import {IslmaintenancemodalComponent} from 'src/app/common/components/islmaintenancemodal/islmaintenancemodal.component'; +import {ModalComponent} from '../../../common/components/modal/modal.component'; +import {OtpComponent} from '../../../common/components/otp/otp.component'; +import {MessageObj} from 'src/app/common/constants/constants'; +import {SwitchupdatemodalComponent} from 'src/app/common/components/switchupdatemodal/switchupdatemodal.component'; @Component({ - selector: 'app-switch-detail', - templateUrl: './switch-detail.component.html', - styleUrls: ['./switch-detail.component.css'] + selector: 'app-switch-detail', + templateUrl: './switch-detail.component.html', + styleUrls: ['./switch-detail.component.css'] }) export class SwitchDetailComponent implements OnInit, AfterViewInit, OnDestroy { - switchDetail: any = {}; - switch_id: string; - switchNameForm: FormGroup; - loadswitchFlows = false; - name: string; - address: string; - hostname: string; - description: string; - state: string; - switchFlows: any = []; - openedTab = 'port'; - isSwitchNameEdit = false; - evacuate = false; - underMaintenance: boolean; - flowBandwidthSum: any = 0; - flowBandwidthFlag = false; - currentRoute = 'switch-details'; - switchFlowFlag: any = 'controller'; - clipBoardItems = { - sourceSwitchName: '', - sourceSwitch: '', - targetSwitchName: '' - }; - switchId = null; - hasStoreSetting; - settingSubscriber = null; - - descrepancyData = { - status: { - controller: '-', - inventory: '-' + switchDetail: any = {}; + inventorySwitch: any = {}; + switchId = null; + switchNameForm: FormGroup; + loadswitchFlows = false; + name: string; + switchFlows: any = []; + openedTab = 'port'; + isSwitchNameEdit = false; + evacuate = false; + underMaintenance: boolean; + flowBandwidthSum: any = 0; + flowBandwidthFlag = false; + currentRoute = 'switch-details'; + switchFlowFlag: any = 'controller'; + clipBoardItems = { + sourceSwitchName: '', + sourceSwitch: '', + inventorySourceSwitch: '', + targetSwitchName: '' + }; + hasStoreSetting; + settingSubscriber = null; + + isLoaderActive = true; + + statusDiscrepancy = false; + discrepancyData = { + status: { + controller: '-', + inventory: '-' + } + }; + + + constructor(private switchService: SwitchService, + private maskPipe: SwitchidmaskPipe, + private toastr: ToastrService, + private router: Router, + private route: ActivatedRoute, + private loaderService: LoaderService, + private clipboardService: ClipboardService, + private titleService: Title, + private commonService: CommonService, + private storeSwitchService: StoreSettingtService, + private formBuilder: FormBuilder, + private modalService: NgbModal, + ) { + + this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1'; } - }; - - isLoaderActive = true; - statusDescrepancy = false; + ngOnInit() { + this.titleService.setTitle('OPEN KILDA - View Switch'); + this.route.params.subscribe(params => { + this.switchId = params['id']; + this.switchFlowFlag = filter; + localStorage.removeItem('portLoaderEnabled'); + this.getSwitchDetail(params['id'], 'inventory'); + }); - constructor(private switchService: SwitchService, - private maskPipe: SwitchidmaskPipe, - private toastr: ToastrService, - private router: Router, - private route: ActivatedRoute, - private loaderService: LoaderService, - private clipboardService: ClipboardService, - private titleService: Title, - private commonService: CommonService, - private storeSwitchService: StoreSettingtService, - private formBuilder: FormBuilder, - private modalService: NgbModal, - ) { - - this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1' ? true : false; + if (this.router.url.includes('/port')) { + this.router.navigated = false; + this.router.navigate([this.router.url]); + } + this.router.events + .pipe(filter(event => event instanceof NavigationEnd)).pipe(filter(event => event instanceof NavigationEnd)) + .subscribe(event => { + const tempRoute: any = event; + if (tempRoute.url.includes('/port')) { + this.currentRoute = 'port-details'; + } else { + this.currentRoute = 'switch-details'; + } + }); + this.switchNameForm = this.formBuilder.group({ + name: [''] + }); } - ngOnInit() { - this.titleService.setTitle('OPEN KILDA - View Switch'); - - this.route.params.subscribe(params => { - this.switchId = params['id']; - const filter = localStorage.getItem('switchFilterFlag'); - this.switchFlowFlag = filter; - localStorage.removeItem('portLoaderEnabled'); - this.getSwitchDetail(params['id'], filter); - }); - - if (this.router.url.includes('/port')) { - this.router.navigated = false; - this.router.navigate([this.router.url]); + maskControllerSwitchId(switchType, e) { + if (e.target.checked) { + this.switchDetail.switch_id = this.maskPipe.transform(this.switchDetail.switch_id, 'legacy'); + } else { + this.switchDetail.switch_id = this.maskPipe.transform(this.switchDetail.switch_id, 'kilda'); + } + this.clipBoardItems.sourceSwitch = this.switchDetail.switch_id; } - this.router.events - .pipe(filter(event => event instanceof NavigationEnd)).pipe(filter(event => event instanceof NavigationEnd)) - .subscribe(event => { - const tempRoute: any = event; - if (tempRoute.url.includes('/port')) { - this.currentRoute = 'port-details'; + + maskInventorySwitchId(switchType, e) { + if (e.target.checked) { + this.switchDetail.inventory_switch_detail['switch-id'] = this.maskPipe.transform(this.switchDetail.inventory_switch_detail['switch-id'], 'legacy'); } else { - this.currentRoute = 'switch-details'; + this.switchDetail.inventory_switch_detail['switch-id'] = this.maskPipe.transform(this.switchDetail.inventory_switch_detail['switch-id'], 'kilda'); } - }); - this.switchNameForm = this.formBuilder.group({ - name: [''] - }); - } - - maskSwitchId(switchType, e) { - if (e.target.checked) { - this.switchDetail.switch_id = this.maskPipe.transform(this.switchDetail.switch_id, 'legacy'); - } else { - this.switchDetail.switch_id = this.maskPipe.transform(this.switchDetail.switch_id, 'kilda'); + this.clipBoardItems.inventorySourceSwitch = this.switchDetail.inventory_switch_detail['switch-id']; } - this.clipBoardItems.sourceSwitch = this.switchDetail.switch_id; - } - - deleteSwitch() { - const is2FaEnabled = localStorage.getItem('is2FaEnabled'); - const self = this; - const modalReff = this.modalService.open(ModalconfirmationComponent); - modalReff.componentInstance.title = 'Delete Switch'; - modalReff.componentInstance.content = 'Are you sure you want to perform delete action ?'; - modalReff.result.then((response) => { - if (response && response == true) { - if (is2FaEnabled == 'true') { - const modalRef = this.modalService.open(OtpComponent); - modalRef.componentInstance.emitService.subscribe( - otp => { - if (otp) { - this.loaderService.show(MessageObj.deleting_switch); - this.switchService.deleteSwitch( - this.switchId, - { code: otp }, - response => { - modalRef.close(); - this.toastr.success(MessageObj.switch_deleted, 'Success!'); - this.loaderService.hide(); - const switchDetailsKey = 'switchDetailsKey_' + this.switchId; - localStorage.removeItem(switchDetailsKey); - localStorage.removeItem('SWITCHES_LIST'); - localStorage.removeItem('switchPortDetail'); - this.router.navigate(['/switches']); - }, - error => { - this.loaderService.hide(); - this.toastr.error( - error['error-auxiliary-message'], - 'Error!' + + deleteSwitch() { + const is2FaEnabled = localStorage.getItem('is2FaEnabled'); + const self = this; + const modalReff = this.modalService.open(ModalconfirmationComponent); + modalReff.componentInstance.title = 'Delete Switch'; + modalReff.componentInstance.content = 'Are you sure you want to perform delete action ?'; + modalReff.result.then((response) => { + if (response && response == true) { + if (is2FaEnabled == 'true') { + const modalRef = this.modalService.open(OtpComponent); + modalRef.componentInstance.emitService.subscribe( + otp => { + if (otp) { + this.loaderService.show(MessageObj.deleting_switch); + this.switchService.deleteSwitch( + this.switchId, + {code: otp}, + response => { + modalRef.close(); + this.toastr.success(MessageObj.switch_deleted, 'Success!'); + this.loaderService.hide(); + const switchDetailsKey = 'switchDetailsKey_' + this.switchId; + localStorage.removeItem(switchDetailsKey); + localStorage.removeItem('SWITCHES_LIST'); + localStorage.removeItem('switchPortDetail'); + this.router.navigate(['/switches']); + }, + error => { + this.loaderService.hide(); + this.toastr.error( + error['error-auxiliary-message'], + 'Error!' + ); + + } + ); + } else { + this.toastr.error(MessageObj.otp_not_detected, 'Error!'); + } + }, + error => { + } ); + } else { + const modalRef2 = this.modalService.open(ModalComponent); + modalRef2.componentInstance.title = 'Warning'; + modalRef2.componentInstance.content = MessageObj.not_authorised_to_delete_switch; + } + } + }); + } - } - ); - } else { - this.toastr.error(MessageObj.otp_not_detected, 'Error!'); - } - }, - error => { + toggleTab(tab, enableLoader = false) { + this.openedTab = tab; + if (tab == 'flows') { + if (this.switchFlows && this.switchFlows.length) { + } else { + this.loadSwitchFlows(this.switchDetail.switch_id, true); } - ); + } else if (enableLoader) { + this.isLoaderActive = true; } else { - const modalRef2 = this.modalService.open(ModalComponent); - modalRef2.componentInstance.title = 'Warning'; - modalRef2.componentInstance.content = MessageObj.not_authorised_to_delete_switch; + this.isLoaderActive = false; } - } - }); - } - - toggleTab(tab, enableLoader = false) { - this.openedTab = tab; - if (tab == 'flows') { - if (this.switchFlows && this.switchFlows.length) { - - } else { - this.loadSwitchFlows(this.switchDetail.switch_id, true); - } - } else if (enableLoader) { - this.isLoaderActive = true; - } else { - this.isLoaderActive = false; } - } - refreshSwitchFlows() { - this.loadSwitchFlows(this.switchDetail.switch_id, true); - } - loadSwitchFlows(switchId, loader) { - if (loader) { - this.loaderService.show('Loading Flows..'); - } - const filter = this.switchFlowFlag == 'inventory' ; - this.loadswitchFlows = false; - this.flowBandwidthFlag = true; - this.flowBandwidthSum = 0; - this.switchService.getSwitchFlows(switchId, filter, null).subscribe(data => { - this.switchFlows = data; - if (this.switchFlows && this.switchFlows.length) { - for (const flow of this.switchFlows) { - this.flowBandwidthSum = parseFloat(this.flowBandwidthSum) + (flow.maximum_bandwidth / 1000); + + refreshSwitchFlows() { + if (this.isControllerSwitch()) { + this.loadSwitchFlows(this.switchDetail.switch_id, true); } - } else { - if (this.switchFlows == null) { - this.switchFlows = []; + } + + loadSwitchFlows(switchId, loader) { + if (loader) { + this.loaderService.show('Loading Flows..'); } - } - if (this.flowBandwidthSum && parseFloat(this.flowBandwidthSum)) { - this.flowBandwidthSum = parseFloat(this.flowBandwidthSum).toFixed(3); - } - this.loadswitchFlows = true; - this.loaderService.hide(); - this.flowBandwidthFlag = false; - }, error => { - this.loaderService.hide(); - this.switchFlows = []; - this.flowBandwidthFlag = false; - this.loadswitchFlows = true; - }); - } - - copyToClip(event, copyItem) { - this.clipboardService.copyFromContent(this.clipBoardItems[copyItem]); - } - - editSwitchName() { - this.isSwitchNameEdit = true; - } - - cancelSwitchName() { - this.isSwitchNameEdit = false; - } - - saveSwitchName() { - const modalReff = this.modalService.open(ModalconfirmationComponent); - modalReff.componentInstance.title = 'Confirmation'; - modalReff.componentInstance.content = 'Are you sure you want to update switch name ?'; - modalReff.result.then((response) => { - if (response && response == true) { - this.isSwitchNameEdit = false; - const self = this; - this.loaderService.show(MessageObj.saving_switchname); - const name = this.switchNameForm.controls['name'].value; - const switchId = this.switch_id; - this.switchService.saveSwitcName(name, switchId).subscribe((response) => { - self.loaderService.hide(); - self.name = response.name; - self.switchDetail.name = response.name; - const switchDetailsKey = 'switchDetailsKey_' + this.switch_id; - const retrievedSwitchObject = JSON.parse(localStorage.getItem(switchDetailsKey)); - localStorage.removeItem(switchDetailsKey); - retrievedSwitchObject.name = response.name; - localStorage.setItem(switchDetailsKey, JSON.stringify(retrievedSwitchObject)); - localStorage.removeItem('SWITCHES_LIST'); - }, (error) => { - this.toastr.error(error.error['error-message']); + const filter = this.switchFlowFlag == 'inventory'; + this.loadswitchFlows = false; + this.flowBandwidthFlag = true; + this.flowBandwidthSum = 0; + this.switchService.getSwitchFlows(switchId, filter, null).subscribe(data => { + this.switchFlows = data; + if (this.switchFlows && this.switchFlows.length) { + for (const flow of this.switchFlows) { + this.flowBandwidthSum = parseFloat(this.flowBandwidthSum) + (flow.maximum_bandwidth / 1000); + } + } else { + if (this.switchFlows == null) { + this.switchFlows = []; + } + } + if (this.flowBandwidthSum && parseFloat(this.flowBandwidthSum)) { + this.flowBandwidthSum = parseFloat(this.flowBandwidthSum).toFixed(3); + } + this.loadswitchFlows = true; this.loaderService.hide(); - }); - } - }); - } + this.flowBandwidthFlag = false; + }, error => { + this.loaderService.hide(); + this.switchFlows = []; + this.flowBandwidthFlag = false; + this.loadswitchFlows = true; + }); + } - ngAfterViewInit() { + copyToClip(event, copyItem) { + this.clipboardService.copyFromContent(this.clipBoardItems[copyItem]); + } - } + editSwitchName() { + this.isSwitchNameEdit = true; + } - getSwitchDetail(switchId, filter) { + cancelSwitchName() { + this.isSwitchNameEdit = false; + } - this.loaderService.show(MessageObj.loading_switch_detail); + saveSwitchName() { + const modalReff = this.modalService.open(ModalconfirmationComponent); + modalReff.componentInstance.title = 'Confirmation'; + modalReff.componentInstance.content = 'Are you sure you want to update switch name ?'; + modalReff.result.then((response) => { + if (response && response == true) { + this.isSwitchNameEdit = false; + const self = this; + this.loaderService.show(MessageObj.saving_switchname); + const name = this.switchNameForm.controls['name'].value; + this.switchService.saveSwitcName(name, this.switchId).subscribe((response) => { + self.loaderService.hide(); + self.name = response.name; + self.switchDetail.name = response.name; + const switchDetailsKey = 'switchDetailsKey_' + this.switchId; + const retrievedSwitchObject = JSON.parse(localStorage.getItem(switchDetailsKey)); + localStorage.removeItem(switchDetailsKey); + retrievedSwitchObject.name = response.name; + localStorage.setItem(switchDetailsKey, JSON.stringify(retrievedSwitchObject)); + localStorage.removeItem('SWITCHES_LIST'); + }, (error) => { + this.toastr.error(error.error['error-message']); + this.loaderService.hide(); + }); + } + }); + } - this.settingSubscriber = this.storeSwitchService.switchSettingReceiver.subscribe(setting => { - this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1' ? true : false; + ngAfterViewInit() { - let switchDetail = null; - if (filter == 'controller') { - const switchData = JSON.parse(localStorage.getItem('SWITCHES_LIST')) || {}; - const switchList = typeof(switchData.list_data) != 'undefined' ? switchData.list_data : []; - if (switchList && switchList.length) { - switchList.forEach(element => { - if (element.switch_id == switchId) { - switchDetail = element; - return; - } - }); - } - } else { - const switchData = JSON.parse(localStorage.getItem('SWITCHES_LIST_ALL')) || {}; - const switchList = typeof(switchData.list_data) != 'undefined' ? switchData.list_data : []; - if (switchList && switchList.length) { - switchList.forEach(element => { - if (element.switch_id == switchId) { - switchDetail = element; - return; + } + + getSwitchDetail(switchId, filter) { + this.loaderService.show(MessageObj.loading_switch_detail); + + this.settingSubscriber = this.storeSwitchService.switchSettingReceiver.subscribe(setting => { + this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1'; + let switchDetail = null; + let inventorySwitchDetail = null; + + const switchData = JSON.parse(localStorage.getItem('SWITCHES_LIST_ALL')) || {}; + const switchList = typeof (switchData.list_data) != 'undefined' ? switchData.list_data : []; + if (switchList && switchList.length) { + switchList.forEach(swDetail => { + if (swDetail.switch_id == switchId || swDetail?.inventory_switch_detail?.['switch-id'] == switchId) { + if (swDetail.switch_id != null || switchDetail == null) { + switchDetail = swDetail; + } + inventorySwitchDetail = swDetail.inventory_switch_detail; + return; + } + }); + } + + if (switchDetail && (switchDetail.switch_id || inventorySwitchDetail['switch-id'])) { + this.setSwitchDetails(switchDetail, inventorySwitchDetail); + } else { + this.switchService.getSwitchDetails(switchId, filter).subscribe((retrievedSwitchObject: any) => { + if (retrievedSwitchObject == null || retrievedSwitchObject.length == 0) { + this.handleEmptySwitchRes(); + } else { + this.setSwitchDetails(retrievedSwitchObject[0], retrievedSwitchObject[0].inventory_switch_detail); + } + }, err => { + this.handleEmptySwitchRes(); + } + ); + } } - }); - } - } - if (switchDetail && switchDetail.switch_id) { + ); + const query = {_: new Date().getTime()}; + this.storeSwitchService.checkSwitchStoreDetails(query); + } + + private handleEmptySwitchRes(message?: string) { + this.loaderService.hide(); + const errorMessage = message ? `Error: ${message}` : 'Error'; + this.toastr.error(MessageObj.no_switch_found, errorMessage); + this.router.navigate(['/switches']); + } + + + private setSwitchDetails(switchDetail: any, inventorySwitch: any) { this.switchDetail = switchDetail; - this.switch_id = switchDetail.switch_id; - this.switchNameForm.controls['name'].setValue(switchDetail.name); - this.name = switchDetail.name; - this.address = switchDetail.address; - this.hostname = switchDetail.hostname; - this.description = switchDetail.description; - this.state = switchDetail.state; + this.inventorySwitch = inventorySwitch; + const sw_id = switchDetail.switch_id || inventorySwitch['switch-id']; + this.name = switchDetail.name || inventorySwitch.name; + this.switchNameForm.controls['name'].setValue(this.name); + this.underMaintenance = switchDetail['under_maintenance']; this.clipBoardItems = Object.assign(this.clipBoardItems, { - - sourceSwitchName: switchDetail.name, - sourceSwitch: this.switch_id, + sourceSwitchName: this.name, + sourceSwitch: sw_id, + inventorySourceSwitch: inventorySwitch?.['switch-id'], targetSwitchName: switchDetail.hostname + }); + this.loaderService.hide(); + this.loadSwitchFlows(sw_id, false); - }); - this.loaderService.hide(); - if (switchDetail['discrepancy'] && (switchDetail['discrepancy']['status'])) { - if (switchDetail['discrepancy']['status']) { - this.statusDescrepancy = true; - this.descrepancyData.status.controller = (typeof(switchDetail['discrepancy']['status-value']['controller-status']) != 'undefined') ? switchDetail['discrepancy']['status-value']['controller-status'] : '-'; - this.descrepancyData.status.inventory = (typeof(switchDetail['discrepancy']['status-value']['inventory-status']) != 'undefined') ? switchDetail['discrepancy']['status-value']['inventory-status'] : '-'; + if (this.hasStoreSetting) { + if (switchDetail.state == inventorySwitch?.status) { + return; } - - } - this.loadSwitchFlows(this.switchDetail.switch_id, false); - } else { - this.switchService.getSwitchDetail(switchId, filter).subscribe((retrievedSwitchObject: any) => { - if (!retrievedSwitchObject) { - this.loaderService.hide(); - this.toastr.error(MessageObj.no_switch_found, 'Error'); - this.router.navigate([ - '/switches' - ]); - } else { - this.switchDetail = retrievedSwitchObject; - this.switch_id = retrievedSwitchObject.switch_id; - this.switchNameForm.controls['name'].setValue(retrievedSwitchObject.name); - this.name = retrievedSwitchObject.name; - this.address = retrievedSwitchObject.address; - this.hostname = retrievedSwitchObject.hostname; - this.description = retrievedSwitchObject.description; - this.state = retrievedSwitchObject.state; - this.underMaintenance = retrievedSwitchObject['under_maintenance']; - this.clipBoardItems = Object.assign(this.clipBoardItems, { - sourceSwitchName: retrievedSwitchObject.name, - sourceSwitch: this.switch_id, - targetSwitchName: retrievedSwitchObject.hostname - }); - this.loaderService.hide(); - if (retrievedSwitchObject['discrepancy'] && (retrievedSwitchObject['discrepancy']['status'])) { - if (retrievedSwitchObject['discrepancy']['status']) { - this.statusDescrepancy = true; - this.descrepancyData.status.controller = (typeof(retrievedSwitchObject['discrepancy']['status-value']['controller-status']) != 'undefined') ? retrievedSwitchObject['discrepancy']['status-value']['controller-status'] : '-'; - this.descrepancyData.status.inventory = (typeof(retrievedSwitchObject['discrepancy']['status-value']['inventory-status']) != 'undefined') ? retrievedSwitchObject['discrepancy']['status-value']['inventory-status'] : '-'; - } + this.statusDiscrepancy = true; + if (switchDetail.state != null) { + this.discrepancyData.status.controller = switchDetail.state; } - } - this.loadSwitchFlows(this.switchDetail.switch_id, false); - }, err => { - this.loaderService.hide(); - this.toastr.error(MessageObj.no_switch_found, 'Error'); - this.router.navigate(['/switches']); + if (inventorySwitch?.status != null) { + this.discrepancyData.status.inventory = inventorySwitch.status; + } + } + } + editSwitchLocation() { + const self = this; + const locationData = this.switchDetail.location; + locationData['pop'] = this.switchDetail.pop; + const modalRef = this.modalService.open(SwitchupdatemodalComponent); + modalRef.componentInstance.title = 'Update Switch Location'; + modalRef.componentInstance.data = locationData; + modalRef.result.then((response) => { + }, error => { }); - } - - }); - const query = {_: new Date().getTime()}; - this.storeSwitchService.checkSwitchStoreDetails(query); - - } - - editSwitchLocation() { - const self = this; - const locationData = this.switchDetail.location; - locationData['pop'] = this.switchDetail.pop; - const modalRef = this.modalService.open(SwitchupdatemodalComponent); - modalRef.componentInstance.title = 'Update Switch Location'; - modalRef.componentInstance.data = locationData ; - modalRef.result.then((response) => { - }, error => { - }); - modalRef.componentInstance.emitService.subscribe( - data => { - this.loaderService.show(MessageObj.apply_changes); - this.switchService.updateSwitch(data, this.switchId).subscribe((response) => { - this.toastr.success(MessageObj.switch_updated_success, 'Success'); - this.loaderService.hide(); - modalRef.componentInstance.activeModal.close(true); - this.switchDetail.pop = response.pop; - this.switchDetail.location = response.location; + modalRef.componentInstance.emitService.subscribe( + data => { + this.loaderService.show(MessageObj.apply_changes); + this.switchService.updateSwitch(data, this.switchId).subscribe((response) => { + this.toastr.success(MessageObj.switch_updated_success, 'Success'); + this.loaderService.hide(); + modalRef.componentInstance.activeModal.close(true); + this.switchDetail.pop = response.pop; + this.switchDetail.location = response.location; - }, error => { - this.loaderService.hide(); - const message = (error && error.error && typeof error.error['error-auxiliary-message'] != 'undefined') ? error.error['error-auxiliary-message'] : MessageObj.switch_updated_error; - this.toastr.error(message, 'Error'); - }); - }, - error => { - } - ); - } - switchMaintenance(e) { - const modalRef = this.modalService.open(IslmaintenancemodalComponent); - modalRef.componentInstance.title = 'Confirmation'; - modalRef.componentInstance.isMaintenance = !this.underMaintenance; - modalRef.componentInstance.content = 'Are you sure ?'; - this.underMaintenance = e.target.checked; - modalRef.result.then((response) => { - if (!response) { - this.underMaintenance = false; - } - }, error => { - this.underMaintenance = false; - }); - modalRef.componentInstance.emitService.subscribe( - evacuate => { - const data = {'under_maintenance': e.target.checked, 'evacuate': evacuate}; - this.loaderService.show(MessageObj.apply_changes); - this.switchService.switchMaintenance(data, this.switchId).subscribe((response) => { - this.toastr.success(MessageObj.maintenance_mode_changed, 'Success'); - this.loaderService.hide(); - this.underMaintenance = e.target.checked; - if (evacuate) { - location.reload(); + }, error => { + this.loaderService.hide(); + const message = (error && error.error && typeof error.error['error-auxiliary-message'] != 'undefined') ? error.error['error-auxiliary-message'] : MessageObj.switch_updated_error; + this.toastr.error(message, 'Error'); + }); + }, + error => { } - }, error => { - this.loaderService.hide(); - this.toastr.error(MessageObj.error_im_maintenance_mode, 'Error'); - }); - }, - error => { - } - ); - - } - - - evacuateSwitch(e) { - const modalRef = this.modalService.open(ModalconfirmationComponent); - modalRef.componentInstance.title = 'Confirmation'; - this.evacuate = e.target.checked; - if (this.evacuate) { - modalRef.componentInstance.content = 'Are you sure you want to evacuate all flows?'; - } else { - modalRef.componentInstance.content = 'Are you sure ?'; + ); + } + + switchMaintenance(e) { + const modalRef = this.modalService.open(IslmaintenancemodalComponent); + modalRef.componentInstance.title = 'Confirmation'; + modalRef.componentInstance.isMaintenance = !this.underMaintenance; + modalRef.componentInstance.content = 'Are you sure ?'; + this.underMaintenance = e.target.checked; + modalRef.result.then((response) => { + if (!response) { + this.underMaintenance = false; + } + }, error => { + this.underMaintenance = false; + }); + modalRef.componentInstance.emitService.subscribe( + evacuate => { + const data = {'under_maintenance': e.target.checked, 'evacuate': evacuate}; + this.loaderService.show(MessageObj.apply_changes); + this.switchService.switchMaintenance(data, this.switchId).subscribe((response) => { + this.toastr.success(MessageObj.maintenance_mode_changed, 'Success'); + this.loaderService.hide(); + this.underMaintenance = e.target.checked; + if (evacuate) { + location.reload(); + } + }, error => { + this.loaderService.hide(); + this.toastr.error(MessageObj.error_im_maintenance_mode, 'Error'); + }); + }, + error => { + } + ); + + } + + isControllerSwitch() { + return this.switchDetail?.switch_id != null; + } + + isInventorySwitch() { + return this.switchDetail.inventory_switch_detail != null; } - modalRef.result.then((response) => { - if (response && response == true) { - const data = {'under_maintenance': this.underMaintenance, 'evacuate': e.target.checked}; - this.switchService.switchMaintenance(data, this.switchId).subscribe((serverResponse) => { - this.toastr.success(MessageObj.flows_evacuated, 'Success'); - location.reload(); + + canChangeSwitchName() { + return !this.isSwitchNameEdit + && this.commonService.hasPermission('sw_switch_update_name') + && this.isControllerSwitch(); + } + + canEvacuate() { + return this.commonService.hasPermission('sw_switch_maintenance') && !this.isInventorySwitch(); + } + + hasPermission(permissionName: string): boolean { + return this.commonService.hasPermission(permissionName); + } + + evacuateSwitch(e) { + const modalRef = this.modalService.open(ModalconfirmationComponent); + modalRef.componentInstance.title = 'Confirmation'; + this.evacuate = e.target.checked; + if (this.evacuate) { + modalRef.componentInstance.content = 'Are you sure you want to evacuate all flows?'; + } else { + modalRef.componentInstance.content = 'Are you sure ?'; + } + modalRef.result.then((response) => { + if (response && response == true) { + const data = {'under_maintenance': this.underMaintenance, 'evacuate': e.target.checked}; + this.switchService.switchMaintenance(data, this.switchId).subscribe((serverResponse) => { + this.toastr.success(MessageObj.flows_evacuated, 'Success'); + location.reload(); + }, error => { + this.toastr.error(MessageObj.error_flows_evacuated, 'Error'); + }); + } else { + this.evacuate = false; + } }, error => { - this.toastr.error(MessageObj.error_flows_evacuated, 'Error'); + this.evacuate = false; }); - } else { - this.evacuate = false; - } - }, error => { - this.evacuate = false; - }); - } + } - ngOnDestroy() { - if (this.settingSubscriber) { - this.settingSubscriber.unsubscribe(); - this.settingSubscriber = null; + ngOnDestroy() { + if (this.settingSubscriber) { + this.settingSubscriber.unsubscribe(); + this.settingSubscriber = null; + } } - } } diff --git a/src-gui/ui/src/app/modules/switches/switch-list/switch-list.component.ts b/src-gui/ui/src/app/modules/switches/switch-list/switch-list.component.ts index 622ad556c7c..e46c9cc3ef0 100644 --- a/src-gui/ui/src/app/modules/switches/switch-list/switch-list.component.ts +++ b/src-gui/ui/src/app/modules/switches/switch-list/switch-list.component.ts @@ -30,7 +30,7 @@ export class SwitchListComponent implements OnDestroy, OnInit, AfterViewInit { hasStoreSetting = false; settingSubscriber = null; textSearch: any; - switchFilterFlag: string = localStorage.getItem('switchFilterFlag') || 'controller'; + switchFilterFlag: string = sessionStorage.getItem('switchFilterFlag') || 'controller'; constructor( private router: Router, @@ -76,7 +76,7 @@ export class SwitchListComponent implements OnDestroy, OnInit, AfterViewInit { this.dataSet = switchList; if (this.switchFilterFlag == 'inventory') { this.dataSet = this.dataSet.filter((d) => { - return d['inventory-switch']; + return d['inventory_switch_detail']; }); } setTimeout(() => { @@ -100,8 +100,7 @@ export class SwitchListComponent implements OnDestroy, OnInit, AfterViewInit { } this.loadingData = true; this.loaderService.show(MessageObj.loading_switches); - const query = {controller: this.switchFilterFlag == 'controller', _: new Date().getTime(), storeConfigurationStatus: this.hasStoreSetting }; - this.switchService.getSwitchList(query).subscribe( + this.switchService.getSwitchDetails(null,this.switchFilterFlag).subscribe( (data: any) => { const switchListData = JSON.stringify({'timeStamp': new Date().getTime(), 'list_data': data}); if (this.switchFilterFlag == 'controller') { @@ -117,7 +116,7 @@ export class SwitchListComponent implements OnDestroy, OnInit, AfterViewInit { this.dataSet = data; if (this.switchFilterFlag == 'inventory') { this.dataSet = this.dataSet.filter((d) => { - return d['inventory-switch']; + return d['inventory_switch_detail']; }); } } @@ -135,7 +134,7 @@ export class SwitchListComponent implements OnDestroy, OnInit, AfterViewInit { getStoreSwitchSettings() { const query = {_: new Date().getTime()}; this.settingSubscriber = this.storeSwitchService.switchSettingReceiver.subscribe(setting => { - this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1' ? true : false; + this.hasStoreSetting = localStorage.getItem('hasSwtStoreSetting') == '1'; this.loadSwitchList(this.switchFilterFlag); }); diff --git a/src-java/flowhs-topology/flowhs-messaging/build.gradle b/src-java/flowhs-topology/flowhs-messaging/build.gradle index bd72572cde3..8702dea6610 100644 --- a/src-java/flowhs-topology/flowhs-messaging/build.gradle +++ b/src-java/flowhs-topology/flowhs-messaging/build.gradle @@ -13,6 +13,8 @@ dependencies { implementation 'com.fasterxml.jackson.core:jackson-annotations' implementation('com.fasterxml.jackson.core:jackson-databind') + implementation project(':reroute-messaging') + implementation 'com.google.guava:guava' testImplementation 'org.junit.jupiter:junit-jupiter-api' testImplementation 'org.junit.jupiter:junit-jupiter-engine' diff --git a/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/flow/FlowRerouteFlushRequest.java b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/flow/FlowRerouteFlushRequest.java new file mode 100644 index 00000000000..dc6b6b80417 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/command/flow/FlowRerouteFlushRequest.java @@ -0,0 +1,51 @@ +/* Copyright 2017 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.messaging.command.flow; + +import org.openkilda.messaging.command.CommandData; +import org.openkilda.messaging.info.reroute.FlowType; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.EqualsAndHashCode; +import lombok.NonNull; +import lombok.ToString; +import lombok.Value; + +@Value +@EqualsAndHashCode(callSuper = true) +@ToString +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlowRerouteFlushRequest extends CommandData { + private static final long serialVersionUID = 1L; + + @JsonProperty("flow_id") + String flowId; + + FlowType flowType; + + String reason; + + @JsonCreator + public FlowRerouteFlushRequest(@NonNull @JsonProperty("flow_id") String flowId, + @JsonProperty("flow_type") FlowType flowType, + @JsonProperty("reason") String reason) { + this.flowId = flowId; + this.flowType = flowType; + this.reason = reason; + } +} diff --git a/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/info/flow/FlowRerouteFlushResponse.java b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/info/flow/FlowRerouteFlushResponse.java new file mode 100644 index 00000000000..62ec9cf503c --- /dev/null +++ b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/info/flow/FlowRerouteFlushResponse.java @@ -0,0 +1,56 @@ +/* Copyright 2017 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.messaging.info.flow; + +import static org.openkilda.messaging.Utils.CORRELATION_ID; +import static org.openkilda.messaging.Utils.TIMESTAMP; + +import org.openkilda.messaging.info.InfoMessage; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import lombok.EqualsAndHashCode; +import lombok.ToString; +import lombok.Value; + + + +/** + * Represents a flow reroute northbound response. + */ +@JsonSerialize +@JsonInclude(JsonInclude.Include.NON_NULL) +@Value +@EqualsAndHashCode(callSuper = false) +@ToString +public class FlowRerouteFlushResponse extends InfoMessage { + + private static final long serialVersionUID = 1L; + + @JsonProperty("flushed") + private boolean flushed; + + @JsonCreator + public FlowRerouteFlushResponse( + @JsonProperty(TIMESTAMP) final long timestamp, + @JsonProperty(CORRELATION_ID) final String correlationId, + @JsonProperty("flow_data") FlowResponse flowData) { + super(flowData, timestamp, correlationId); + this.flushed = true; + } +} diff --git a/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/payload/flow/FlowFlushReroutePayload.java b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/payload/flow/FlowFlushReroutePayload.java new file mode 100644 index 00000000000..70d9d378a19 --- /dev/null +++ b/src-java/flowhs-topology/flowhs-messaging/src/main/java/org/openkilda/messaging/payload/flow/FlowFlushReroutePayload.java @@ -0,0 +1,54 @@ +/* Copyright 2017 Telstra Open Source + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.openkilda.messaging.payload.flow; + +import org.openkilda.messaging.Utils; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.io.Serializable; + +/** + * Flow reroute representation class. + */ +@Data +@NoArgsConstructor +@JsonInclude(JsonInclude.Include.NON_NULL) +public class FlowFlushReroutePayload implements Serializable { + + private static final long serialVersionUID = 1L; + + /** + * The id of the flow. + */ + @JsonProperty(Utils.FLOW_ID) + protected String id; + + @JsonProperty("rerouted") + private boolean rerouted; + + @JsonCreator + public FlowFlushReroutePayload( + @JsonProperty(value = Utils.FLOW_ID) String id, + @JsonProperty(value = "rerouted") boolean rerouted) { + this.id = id; + this.rerouted = rerouted; + } +} diff --git a/src-java/northbound-service/northbound/build.gradle b/src-java/northbound-service/northbound/build.gradle index 6cb208daac3..7527d2d2d33 100644 --- a/src-java/northbound-service/northbound/build.gradle +++ b/src-java/northbound-service/northbound/build.gradle @@ -14,6 +14,7 @@ dependencies { implementation project(':northbound-api') implementation project(':flowhs-messaging') + implementation project(':reroute-messaging') implementation project(':nbworker-messaging') implementation project(':swmanager-messaging') implementation project(':ping-messaging') diff --git a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/controller/v1/FlowController.java b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/controller/v1/FlowController.java index 0d6f4d93eef..a818337e429 100644 --- a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/controller/v1/FlowController.java +++ b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/controller/v1/FlowController.java @@ -20,7 +20,9 @@ import org.openkilda.messaging.error.ErrorType; import org.openkilda.messaging.error.MessageException; import org.openkilda.messaging.info.meter.FlowMeterEntries; +import org.openkilda.messaging.info.reroute.FlowType; import org.openkilda.messaging.payload.flow.FlowCreatePayload; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowIdStatusPayload; import org.openkilda.messaging.payload.flow.FlowPathPayload; import org.openkilda.messaging.payload.flow.FlowReroutePayload; @@ -244,6 +246,21 @@ public CompletableFuture rerouteFlow(@PathVariable("flow_id" return flowService.rerouteFlow(flowId); } + /** + * Flush rerouting when stuck in the progress queue. For Internal Use + * + * @param flowId id of flow to be flushed. + * @return flow payload with updated path. + */ + @ApiOperation(value = "Reroute flow of specified type", response = FlowFlushReroutePayload.class) + @PatchMapping(path = "/{flow_id}/reroute/flush") + @ResponseStatus(HttpStatus.OK) + public CompletableFuture flushRerouteFlow( + @PathVariable("flow_id") String flowId, + @RequestParam(value = "flow_type", defaultValue = "FLOW") FlowType flowType) { + return flowService.flushRerouteFlow(flowId, flowType); + } + /** * Initiates flow paths swapping for flow with protected path. * diff --git a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/converter/FlowMapper.java b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/converter/FlowMapper.java index 91957d5160f..ade0dacb1a7 100644 --- a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/converter/FlowMapper.java +++ b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/converter/FlowMapper.java @@ -37,6 +37,7 @@ import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload; import org.openkilda.messaging.payload.flow.FlowCreatePayload; import org.openkilda.messaging.payload.flow.FlowEndpointPayload; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowIdStatusPayload; import org.openkilda.messaging.payload.flow.FlowPayload; import org.openkilda.messaging.payload.flow.FlowReroutePayload; @@ -247,6 +248,10 @@ public FlowRequest toFlowUpdateRequest(FlowUpdatePayload source) { @Mapping(source = "rerouted", target = "rerouted") public abstract FlowReroutePayload toReroutePayload(String flowId, PathInfoData path, boolean rerouted); + @Mapping(source = "flowId", target = "id") + @Mapping(source = "rerouted", target = "rerouted") + public abstract FlowFlushReroutePayload toRerouteFlushPayload(String flowId, boolean rerouted); + @Mapping(source = "path", target = "path") public abstract FlowRerouteResponseV2 toRerouteResponseV2(String flowId, PathInfoData path, boolean rerouted); diff --git a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/FlowService.java b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/FlowService.java index 87852e74574..b08150562b7 100644 --- a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/FlowService.java +++ b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/FlowService.java @@ -16,7 +16,9 @@ package org.openkilda.northbound.service; import org.openkilda.messaging.info.meter.FlowMeterEntries; +import org.openkilda.messaging.info.reroute.FlowType; import org.openkilda.messaging.payload.flow.FlowCreatePayload; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowIdStatusPayload; import org.openkilda.messaging.payload.flow.FlowPathPayload; import org.openkilda.messaging.payload.flow.FlowReroutePayload; @@ -192,6 +194,15 @@ public interface FlowService extends FlowHistoryAware { */ CompletableFuture rerouteFlow(final String flowId); + /** + * Flush rerouting when stuck in the progress queue. + * + * @param flowId id of flow to be flushed. + * @param flowType name of flow to be flushed. + * @return updated flow path information. + */ + CompletableFuture flushRerouteFlow(final String flowId, FlowType flowType); + /** * Performs flow paths swapping for flow with protected path. * diff --git a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/impl/FlowServiceImpl.java b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/impl/FlowServiceImpl.java index ed926704952..4bfbc138f5a 100644 --- a/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/impl/FlowServiceImpl.java +++ b/src-java/northbound-service/northbound/src/main/java/org/openkilda/northbound/service/impl/FlowServiceImpl.java @@ -31,6 +31,7 @@ import org.openkilda.messaging.command.flow.FlowPingRequest; import org.openkilda.messaging.command.flow.FlowRequest; import org.openkilda.messaging.command.flow.FlowRequest.Type; +import org.openkilda.messaging.command.flow.FlowRerouteFlushRequest; import org.openkilda.messaging.command.flow.FlowRerouteRequest; import org.openkilda.messaging.command.flow.FlowSyncRequest; import org.openkilda.messaging.command.flow.FlowValidationRequest; @@ -44,6 +45,7 @@ import org.openkilda.messaging.info.flow.FlowValidationResponse; import org.openkilda.messaging.info.flow.SwapFlowResponse; import org.openkilda.messaging.info.meter.FlowMeterEntries; +import org.openkilda.messaging.info.reroute.FlowType; import org.openkilda.messaging.model.FlowDto; import org.openkilda.messaging.model.FlowPatch; import org.openkilda.messaging.model.FlowPathDto; @@ -63,11 +65,13 @@ import org.openkilda.messaging.nbtopology.response.GetFlowPathResponse; import org.openkilda.messaging.payload.flow.DiverseGroupPayload; import org.openkilda.messaging.payload.flow.FlowCreatePayload; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowIdStatusPayload; import org.openkilda.messaging.payload.flow.FlowPathPayload; import org.openkilda.messaging.payload.flow.FlowPathPayload.FlowProtectedPath; import org.openkilda.messaging.payload.flow.FlowReroutePayload; import org.openkilda.messaging.payload.flow.FlowResponsePayload; +import org.openkilda.messaging.payload.flow.FlowState; import org.openkilda.messaging.payload.flow.FlowUpdatePayload; import org.openkilda.messaging.payload.flow.GroupFlowPathPayload; import org.openkilda.messaging.payload.history.FlowHistoryEntry; @@ -602,6 +606,21 @@ public CompletableFuture rerouteFlow(String flowId) { flowMapper.toReroutePayload(flowId, response.getPayload(), response.isRerouted())); } + @Override + public CompletableFuture flushRerouteFlow(String flowId, FlowType flowType) { + log.info("API request: Flush flow reroute: {}={}, flow type={}", FLOW_ID, flowId, flowType); + + FlowRerouteFlushRequest payload = new FlowRerouteFlushRequest( + flowId, flowType, "initiated via Northbound"); + CommandMessage command = new CommandMessage( + payload, System.currentTimeMillis(), RequestCorrelationId.getId()); + + return messagingChannel.sendAndGet(rerouteTopic, command) + .thenApply(FlowResponse.class::cast) + .thenApply(response -> flowMapper.toRerouteFlushPayload(flowId, response != null + && response.getPayload() != null && response.getPayload().getState() == FlowState.IN_PROGRESS)); + } + @Override public CompletableFuture syncFlow(String flowId) { log.info("API request: Sync flow {}", flowId); diff --git a/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/mock/TestMessageMock.java b/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/mock/TestMessageMock.java index c65edb3e497..3810235745d 100644 --- a/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/mock/TestMessageMock.java +++ b/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/mock/TestMessageMock.java @@ -27,6 +27,7 @@ import org.openkilda.messaging.command.CommandMessage; import org.openkilda.messaging.command.flow.FlowDeleteRequest; import org.openkilda.messaging.command.flow.FlowRequest; +import org.openkilda.messaging.command.flow.FlowRerouteFlushRequest; import org.openkilda.messaging.command.flow.SwapFlowEndpointRequest; import org.openkilda.messaging.command.switches.SwitchRulesDeleteRequest; import org.openkilda.messaging.error.ErrorData; @@ -47,6 +48,7 @@ import org.openkilda.messaging.nbtopology.response.GetFlowPathResponse; import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload; import org.openkilda.messaging.payload.flow.FlowEndpointPayload; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowIdStatusPayload; import org.openkilda.messaging.payload.flow.FlowPathPayload; import org.openkilda.messaging.payload.flow.FlowPayload; @@ -116,6 +118,9 @@ public class TestMessageMock implements MessagingChannel { .status(FlowState.UP.getState()) .build(); + public static final FlowFlushReroutePayload FLOW_FLUSH_RESPONSE_PAYLOAD = + new FlowFlushReroutePayload(FLOW_ID, true); + public static final SwapFlowPayload firstSwapFlow = SwapFlowPayload.builder() .flowId(FLOW_ID) .source(FLOW_PAYLOAD_ENDPOINT) @@ -168,6 +173,9 @@ public class TestMessageMock implements MessagingChannel { ErrorType.DATA_INVALID.toString(), "flow_id from body and from path are different", format("Body flow_id: %s, path flow_id: %s", FLOW_ID, FLOW_ID_FROM_PATH)); + private static final FlowResponse flowFlushResponse = new FlowResponse( + FlowDto.builder().flowId(FLOW_ID).state(FlowState.IN_PROGRESS).build()); + /** * Chooses response by request. * @@ -186,6 +194,8 @@ private CompletableFuture formatResponse(final String correlationId, f result = completedFuture(switchRulesResponse); } else if (data instanceof SwapFlowEndpointRequest) { result = completedFuture(bulkFlowResponse); + } else if (data instanceof FlowRerouteFlushRequest) { + result = completedFuture(flowFlushResponse); } else { return null; } diff --git a/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/v1/FlowControllerTest.java b/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/v1/FlowControllerTest.java index be9049feddb..6984e7428a8 100644 --- a/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/v1/FlowControllerTest.java +++ b/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/controller/v1/FlowControllerTest.java @@ -27,12 +27,14 @@ import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.asyncDispatch; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.delete; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.get; +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.patch; import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.put; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.content; import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; import org.openkilda.messaging.error.ErrorType; import org.openkilda.messaging.error.MessageError; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowIdStatusPayload; import org.openkilda.messaging.payload.flow.FlowPathPayload; import org.openkilda.messaging.payload.flow.FlowResponsePayload; @@ -114,7 +116,6 @@ public void getFlow() throws Exception { MvcResult result = mockMvc.perform(asyncDispatch(mvcResult)) .andExpect(status().isOk()) - .andExpect(content().contentType(APPLICATION_JSON_VALUE)) .andReturn(); FlowResponsePayload response = MAPPER.readValue(result.getResponse().getContentAsString(), @@ -279,6 +280,23 @@ public void emptyCredentials() throws Exception { assertEquals(AUTH_ERROR, response); } + @Test + @WithMockUser(username = USERNAME, password = PASSWORD, roles = ROLE) + public void rerouteFlushFlow() throws Exception { + MvcResult mvcResult = mockMvc.perform(patch("/v1/flows/{flow-id}/reroute/flush", TestMessageMock.FLOW_ID) + .header(CORRELATION_ID, testCorrelationId()) + .contentType(APPLICATION_JSON_VALUE)) + .andReturn(); + + MvcResult result = mockMvc.perform(asyncDispatch(mvcResult)) + .andExpect(status().isOk()) + .andExpect(content().contentType(APPLICATION_JSON_VALUE)) + .andReturn(); + FlowFlushReroutePayload response = MAPPER.readValue(result.getResponse().getContentAsString(), + FlowFlushReroutePayload.class); + assertEquals(TestMessageMock.FLOW_FLUSH_RESPONSE_PAYLOAD, response); + } + private static String testCorrelationId() { return UUID.randomUUID().toString(); } diff --git a/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/converter/FlowMapperTest.java b/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/converter/FlowMapperTest.java index ce4f101b710..3e179b03ee9 100644 --- a/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/converter/FlowMapperTest.java +++ b/src-java/northbound-service/northbound/src/test/java/org/openkilda/northbound/converter/FlowMapperTest.java @@ -38,6 +38,7 @@ import org.openkilda.messaging.payload.flow.FlowCreatePayload; import org.openkilda.messaging.payload.flow.FlowEncapsulationType; import org.openkilda.messaging.payload.flow.FlowEndpointPayload; +import org.openkilda.messaging.payload.flow.FlowFlushReroutePayload; import org.openkilda.messaging.payload.flow.FlowPayload; import org.openkilda.messaging.payload.flow.FlowState; import org.openkilda.messaging.payload.flow.FlowStatusDetails; @@ -519,6 +520,14 @@ FLOW_ID, new UniFlowPingResponse(false, Errors.TIMEOUT, null, null), Assertions.assertNull(output.getReverse().getError()); } + @Test + public void testRerouteFlushPayload() { + FlowFlushReroutePayload output = flowMapper.toRerouteFlushPayload(FLOW_ID, true); + + Assertions.assertEquals(FLOW_ID, output.getId()); + Assertions.assertTrue(output.isRerouted()); + } + @Test public void testVlanStatisticsMapping() { Set vlanStatistics = new HashSet<>(); diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/RerouteTopology.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/RerouteTopology.java index 47f26cc6748..e66119d3acb 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/RerouteTopology.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/RerouteTopology.java @@ -18,6 +18,7 @@ import static org.openkilda.wfm.share.hubandspoke.CoordinatorBolt.FIELDS_KEY; import static org.openkilda.wfm.topology.reroute.bolts.FlowRerouteQueueBolt.STREAM_NORTHBOUND_ID; import static org.openkilda.wfm.topology.reroute.bolts.OperationQueueBolt.REROUTE_QUEUE_STREAM; +import static org.openkilda.wfm.topology.reroute.bolts.RerouteBolt.STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID; import static org.openkilda.wfm.topology.reroute.bolts.RerouteBolt.STREAM_MANUAL_REROUTE_REQUEST_ID; import static org.openkilda.wfm.topology.reroute.bolts.RerouteBolt.STREAM_REROUTE_REQUEST_ID; import static org.openkilda.wfm.topology.reroute.bolts.TimeWindowBolt.STREAM_TIME_WINDOW_EVENT_ID; @@ -126,9 +127,12 @@ private void rerouteQueueBolt(TopologyBuilder topologyBuilder, topologyConfig.getDefaultFlowPriority(), topologyConfig.getMaxRetry(), rerouteTimeout); declareBolt(topologyBuilder, flowRerouteQueueBolt, FlowRerouteQueueBolt.BOLT_ID) - .fieldsGrouping(RerouteBolt.BOLT_ID, STREAM_REROUTE_REQUEST_ID, new Fields(RerouteBolt.FLOW_ID_FIELD)) + .fieldsGrouping(RerouteBolt.BOLT_ID, STREAM_REROUTE_REQUEST_ID, + new Fields(RerouteBolt.FLOW_ID_FIELD)) .fieldsGrouping(RerouteBolt.BOLT_ID, STREAM_MANUAL_REROUTE_REQUEST_ID, new Fields(RerouteBolt.FLOW_ID_FIELD)) + .fieldsGrouping(RerouteBolt.BOLT_ID, STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID, + new Fields(RerouteBolt.FLOW_ID_FIELD)) .fieldsGrouping(OperationQueueBolt.BOLT_ID, REROUTE_QUEUE_STREAM, new Fields(OperationQueueBolt.FLOW_ID_FIELD)) .allGrouping(TimeWindowBolt.BOLT_ID) diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/FlowRerouteQueueBolt.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/FlowRerouteQueueBolt.java index 10c9ffcfccd..c2c83e27da6 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/FlowRerouteQueueBolt.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/FlowRerouteQueueBolt.java @@ -15,6 +15,7 @@ package org.openkilda.wfm.topology.reroute.bolts; +import static org.openkilda.wfm.topology.reroute.bolts.RerouteBolt.STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID; import static org.openkilda.wfm.topology.reroute.bolts.RerouteBolt.STREAM_MANUAL_REROUTE_REQUEST_ID; import static org.openkilda.wfm.topology.reroute.bolts.RerouteBolt.STREAM_REROUTE_REQUEST_ID; import static org.openkilda.wfm.topology.reroute.bolts.TimeWindowBolt.STREAM_TIME_WINDOW_EVENT_ID; @@ -25,6 +26,8 @@ import org.openkilda.messaging.command.yflow.YFlowRerouteRequest; import org.openkilda.messaging.error.ErrorData; import org.openkilda.messaging.error.ErrorMessage; +import org.openkilda.messaging.info.flow.FlowRerouteFlushResponse; +import org.openkilda.messaging.info.flow.FlowResponse; import org.openkilda.messaging.info.reroute.RerouteResultInfoData; import org.openkilda.persistence.PersistenceManager; import org.openkilda.wfm.CommandContext; @@ -86,6 +89,10 @@ private void handleRerouteBoltMessage(Tuple tuple) throws PipelineException { throttlingData = (FlowThrottlingData) tuple.getValueByField(RerouteBolt.THROTTLING_DATA_FIELD); rerouteQueueService.processManualRequest(flowId, throttlingData); break; + case STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID: + throttlingData = (FlowThrottlingData) tuple.getValueByField(RerouteBolt.THROTTLING_DATA_FIELD); + rerouteQueueService.processManualFlushRequest(flowId, throttlingData); + break; default: unhandledInput(tuple); } @@ -145,6 +152,13 @@ public void emitFlowRerouteError(ErrorData errorData) { new ErrorMessage(errorData, System.currentTimeMillis(), correlationId))); } + @Override + public void emitFlowRerouteInfo(FlowResponse flowData) { + String correlationId = getCommandContext().getCorrelationId(); + getOutput().emit(STREAM_NORTHBOUND_ID, getCurrentTuple(), new Values(correlationId, + new FlowRerouteFlushResponse(System.currentTimeMillis(), correlationId, flowData))); + } + @Override public void sendExtendTimeWindowEvent() { getOutput().emit(STREAM_TIME_WINDOW_EVENT_ID, getCurrentTuple(), new Values(getCommandContext())); diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/MessageSender.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/MessageSender.java index 5e485fd605f..0cbd849f4d2 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/MessageSender.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/MessageSender.java @@ -23,6 +23,8 @@ public interface MessageSender { void emitManualRerouteCommand(String flowId, FlowThrottlingData flowThrottlingData); + void emitManualRerouteFlushCommand(String flowId, FlowThrottlingData flowThrottlingData); + void emitPathSwapCommand(String correlationId, String flowId, String reason); void emitYFlowPathSwapCommand(String correlationId, String yFlowId, String reason); diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/RerouteBolt.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/RerouteBolt.java index c18b8eea2e1..3082dfdad1d 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/RerouteBolt.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/bolts/RerouteBolt.java @@ -21,6 +21,7 @@ import org.openkilda.messaging.command.CommandData; import org.openkilda.messaging.command.CommandMessage; import org.openkilda.messaging.command.flow.FlowPathSwapRequest; +import org.openkilda.messaging.command.flow.FlowRerouteFlushRequest; import org.openkilda.messaging.command.flow.FlowRerouteRequest; import org.openkilda.messaging.command.haflow.HaFlowPathSwapRequest; import org.openkilda.messaging.command.haflow.HaFlowRerouteRequest; @@ -59,6 +60,7 @@ public class RerouteBolt extends AbstractBolt implements MessageSender { public static final String BOLT_ID = "reroute-bolt"; public static final String STREAM_REROUTE_REQUEST_ID = "reroute-request-stream"; public static final String STREAM_MANUAL_REROUTE_REQUEST_ID = "manual-reroute-request-stream"; + public static final String STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID = "manual-reroute-flush-request-stream"; public static final String STREAM_TO_METRICS_BOLT = "to-metrics-bolt-stream"; public static final String STREAM_OPERATION_QUEUE_ID = "operation-queue"; @@ -110,6 +112,8 @@ private void handleCommandMessage(CommandMessage commandMessage) { rerouteService.processRerouteRequest(this, correlationId, (YFlowRerouteRequest) commandData); } else if (commandData instanceof HaFlowRerouteRequest) { rerouteService.processRerouteRequest(this, correlationId, (HaFlowRerouteRequest) commandData); + } else if (commandData instanceof FlowRerouteFlushRequest) { + rerouteService.processRerouteFlushRequest(this, correlationId, (FlowRerouteFlushRequest) commandData); } else { unhandledInput(getCurrentTuple()); } @@ -168,6 +172,14 @@ public void emitManualRerouteCommand(String flowId, FlowThrottlingData flowThrot log.info("Manual reroute command message sent for flow {}", flowId); } + @Override + public void emitManualRerouteFlushCommand(String flowId, FlowThrottlingData flowThrottlingData) { + emitWithContext(STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID, + getCurrentTuple(), new Values(flowId, flowThrottlingData)); + + log.info("Manual reroute flush command message sent for flow {}. Reason: {}", flowId, flowThrottlingData); + } + /** * Emit swap command for consumer. * @@ -225,6 +237,8 @@ public void declareOutputFields(OutputFieldsDeclarer declarer) { new Fields(FLOW_ID_FIELD, THROTTLING_DATA_FIELD, FIELD_ID_CONTEXT)); declarer.declareStream(STREAM_MANUAL_REROUTE_REQUEST_ID, new Fields(FLOW_ID_FIELD, THROTTLING_DATA_FIELD, FIELD_ID_CONTEXT)); + declarer.declareStream(STREAM_MANUAL_REROUTE_FLUSH_REQUEST_ID, + new Fields(FLOW_ID_FIELD, THROTTLING_DATA_FIELD, FIELD_ID_CONTEXT)); declarer.declareStream(STREAM_OPERATION_QUEUE_ID, FIELDS_OPERATION_QUEUE); declarer.declareStream(ZkStreams.ZK.toString(), new Fields(ZooKeeperBolt.FIELD_ID_STATE, ZooKeeperBolt.FIELD_ID_CONTEXT)); diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/IRerouteQueueCarrier.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/IRerouteQueueCarrier.java index 56e83077898..49dd758a521 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/IRerouteQueueCarrier.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/IRerouteQueueCarrier.java @@ -19,6 +19,7 @@ import org.openkilda.messaging.command.haflow.HaFlowRerouteRequest; import org.openkilda.messaging.command.yflow.YFlowRerouteRequest; import org.openkilda.messaging.error.ErrorData; +import org.openkilda.messaging.info.flow.FlowResponse; public interface IRerouteQueueCarrier { @@ -30,6 +31,8 @@ public interface IRerouteQueueCarrier { void emitFlowRerouteError(ErrorData errorData); + void emitFlowRerouteInfo(FlowResponse flowData); + void sendExtendTimeWindowEvent(); void cancelTimeout(String key); diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteQueueService.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteQueueService.java index 329bc464740..1ffefe67d05 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteQueueService.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteQueueService.java @@ -23,12 +23,15 @@ import org.openkilda.messaging.command.yflow.YFlowRerouteRequest; import org.openkilda.messaging.error.ErrorData; import org.openkilda.messaging.error.ErrorType; +import org.openkilda.messaging.info.flow.FlowResponse; import org.openkilda.messaging.info.reroute.FlowType; import org.openkilda.messaging.info.reroute.RerouteResultInfoData; import org.openkilda.messaging.info.reroute.error.FlowInProgressError; import org.openkilda.messaging.info.reroute.error.NoPathFoundError; import org.openkilda.messaging.info.reroute.error.RerouteError; import org.openkilda.messaging.info.reroute.error.SpeakerRequestError; +import org.openkilda.messaging.model.FlowDto; +import org.openkilda.messaging.payload.flow.FlowState; import org.openkilda.model.Flow; import org.openkilda.model.HaFlow; import org.openkilda.model.PathComputationStrategy; @@ -138,6 +141,36 @@ public void processManualRequest(String flowId, FlowThrottlingData throttlingDat } } + /** + * Process manual reroute flush request. + * + * @param flowId flow id + */ + public void processManualFlushRequest(String flowId, FlowThrottlingData throttlingData) { + log.info("Process manual Flush Request for flow: {}", flowId); + FlowInfo flowInfo = getFlowInfo(flowId, throttlingData.getFlowType()); + if (!flowInfo.isPresent()) { + String description = format("Flow with ID %s not found in rerouting queue", flowId); + ErrorData errorData = new ErrorData( + ErrorType.NOT_FOUND, format("Could not flush reroute %s", flowId), description); + carrier.emitFlowRerouteError(errorData); + } + + FlowResponse flowData; + RerouteQueue rerouteQueue = getRerouteQueue(flowId); + if (rerouteQueue.hasInProgress()) { + rerouteQueue.putToInProgress(null); + flowData = new FlowResponse(FlowDto.builder() + .state(FlowState.IN_PROGRESS) + .statusInfo("Reroute has flushed").build()); + } else { + flowData = new FlowResponse(FlowDto.builder() + .state(FlowState.DOWN) + .statusInfo("Flow is not in rerouting process").build()); + } + carrier.emitFlowRerouteInfo(flowData); + } + /** * Process reroute result. Check fail reason, decide if retry is needed and schedule it if yes. * diff --git a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteService.java b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteService.java index 4c97d111a85..0f976c6195b 100644 --- a/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteService.java +++ b/src-java/reroute-topology/reroute-storm-topology/src/main/java/org/openkilda/wfm/topology/reroute/service/RerouteService.java @@ -18,6 +18,7 @@ import static java.lang.String.format; import static java.util.stream.Collectors.toSet; +import org.openkilda.messaging.command.flow.FlowRerouteFlushRequest; import org.openkilda.messaging.command.flow.FlowRerouteRequest; import org.openkilda.messaging.command.haflow.HaFlowRerouteRequest; import org.openkilda.messaging.command.reroute.RerouteAffectedFlows; @@ -724,6 +725,21 @@ public void processRerouteRequest(MessageSender sender, String correlationId, Ha } } + /** + * Process manual reroute request. + */ + public void processRerouteFlushRequest(MessageSender sender, String correlationId, + FlowRerouteFlushRequest request) { + Optional flow = flowRepository.findById(request.getFlowId()); + FlowThrottlingData flowThrottlingData = getFlowThrottlingDataBuilder(flow.orElse(null)) + .correlationId(correlationId) + .reason(request.getReason()) + .flowType(request.getFlowType()) + .affectedIsl(new HashSet<>()) + .build(); + sender.emitManualRerouteFlushCommand(request.getFlowId(), flowThrottlingData); + } + /** * Handles request to update single switch flow status. */ diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/MeterExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/MeterExpectedError.groovy new file mode 100644 index 00000000000..8579a74e24d --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/MeterExpectedError.groovy @@ -0,0 +1,13 @@ +package org.openkilda.functionaltests.error + +import org.springframework.http.HttpStatus + +import java.util.regex.Pattern + +class MeterExpectedError extends AbstractExpectedError { + final static HttpStatus statusCode = HttpStatus.BAD_REQUEST + + MeterExpectedError(String message, Pattern descriptionPattern){ + super(statusCode, message, descriptionPattern) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/MissingServletRequestParameterException.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/MissingServletRequestParameterException.groovy new file mode 100644 index 00000000000..aa8007ea670 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/MissingServletRequestParameterException.groovy @@ -0,0 +1,11 @@ +package org.openkilda.functionaltests.error + +import org.springframework.http.HttpStatus + +class MissingServletRequestParameterException extends AbstractExpectedError { + final static HttpStatus statusCode = HttpStatus.BAD_REQUEST + + MissingServletRequestParameterException(String message){ + super(statusCode, message, ~/MissingServletRequestParameterException/) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchIsInIllegalStateExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchIsInIllegalStateExpectedError.groovy new file mode 100644 index 00000000000..2b311564c2e --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchIsInIllegalStateExpectedError.groovy @@ -0,0 +1,12 @@ +package org.openkilda.functionaltests.error + +import org.springframework.http.HttpStatus + +class SwitchIsInIllegalStateExpectedError extends AbstractExpectedError { + final static HttpStatus statusCode = HttpStatus.BAD_REQUEST + + SwitchIsInIllegalStateExpectedError(String message) { + super(statusCode, message, ~/Switch is in illegal state/) + + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchNotFoundExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchNotFoundExpectedError.groovy index 99f746bb968..17ddfefb32f 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchNotFoundExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/SwitchNotFoundExpectedError.groovy @@ -9,10 +9,9 @@ import java.util.regex.Pattern class SwitchNotFoundExpectedError extends AbstractExpectedError{ final static HttpStatus statusCode = HttpStatus.NOT_FOUND final static String messagePattern = "Switch %s not found." - final static Pattern descriptionPattern = ~/Switch was not found./ SwitchNotFoundExpectedError(SwitchId switchId) { - super(statusCode, String.format(messagePattern, switchId), descriptionPattern) + super(statusCode, String.format(messagePattern, switchId), ~/Switch was not found./) } SwitchNotFoundExpectedError(SwitchId switchId, Pattern descriptionPattern) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/UnableToParseRequestArgumentsException.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/UnableToParseRequestArgumentsException.groovy new file mode 100644 index 00000000000..6cba6916bf6 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/UnableToParseRequestArgumentsException.groovy @@ -0,0 +1,13 @@ +package org.openkilda.functionaltests.error + +import org.springframework.http.HttpStatus + +import java.util.regex.Pattern + +class UnableToParseRequestArgumentsException extends AbstractExpectedError { + final static HttpStatus statusCode = HttpStatus.BAD_REQUEST + + UnableToParseRequestArgumentsException(String message, Pattern descriptionPattern){ + super(statusCode, message, descriptionPattern) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowEndpointsNotSwappedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowEndpointsNotSwappedExpectedError.groovy index 813bc6dcff1..53f4a16de9a 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowEndpointsNotSwappedExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowEndpointsNotSwappedExpectedError.groovy @@ -6,10 +6,13 @@ import org.springframework.http.HttpStatus import java.util.regex.Pattern class FlowEndpointsNotSwappedExpectedError extends AbstractExpectedError{ - final static HttpStatus statusCode = HttpStatus.INTERNAL_SERVER_ERROR final static String message = "Could not swap endpoints" FlowEndpointsNotSwappedExpectedError(Pattern descriptionPattern) { + super(HttpStatus.INTERNAL_SERVER_ERROR, message, descriptionPattern) + } + + FlowEndpointsNotSwappedExpectedError(HttpStatus statusCode, Pattern descriptionPattern) { super(statusCode, message, descriptionPattern) } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotCreatedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotCreatedExpectedError.groovy index 45ac07c8c0c..6a64b3eead1 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotCreatedExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotCreatedExpectedError.groovy @@ -7,10 +7,9 @@ import java.util.regex.Pattern class FlowNotCreatedExpectedError extends AbstractExpectedError{ final static HttpStatus statusCode = HttpStatus.BAD_REQUEST - final static String message = "Could not create flow" FlowNotCreatedExpectedError(Pattern descriptionPattern) { - super(statusCode, message, descriptionPattern) + super(statusCode, "Could not create flow", descriptionPattern) } FlowNotCreatedExpectedError(String message, Pattern descriptionPattern) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotFoundExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotFoundExpectedError.groovy index 1724c870eea..fc2f27f750e 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotFoundExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotFoundExpectedError.groovy @@ -8,13 +8,12 @@ import java.util.regex.Pattern class FlowNotFoundExpectedError extends AbstractExpectedError{ final static HttpStatus statusCode = HttpStatus.NOT_FOUND - final static String messagePattern = "Flow %s not found" - FlowNotFoundExpectedError(String flowId, Pattern descriptionPattern) { - super(statusCode, String.format(messagePattern, flowId), descriptionPattern) + FlowNotFoundExpectedError(String message, Pattern descriptionPattern) { + super(statusCode, message, descriptionPattern) } FlowNotFoundExpectedError(String flowId) { - super(statusCode, 'Flow not found', ~/${String.format(messagePattern, "\'${flowId}\'")}/) + super(statusCode, 'Flow not found', ~/${String.format("Flow %s not found", "\'${flowId}\'")}/) } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotUpdatedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotUpdatedExpectedError.groovy index 5665fe7a016..635417cafc4 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotUpdatedExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowNotUpdatedExpectedError.groovy @@ -7,10 +7,9 @@ import java.util.regex.Pattern class FlowNotUpdatedExpectedError extends AbstractExpectedError{ final static HttpStatus statusCode = HttpStatus.BAD_REQUEST - final static String message = "Could not update flow" FlowNotUpdatedExpectedError(Pattern descriptionPattern) { - super(statusCode, message, descriptionPattern) + super(statusCode, "Could not update flow", descriptionPattern) } FlowNotUpdatedExpectedError(String message, Pattern descriptionPattern) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowPathNotSwappedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowPathNotSwappedExpectedError.groovy new file mode 100644 index 00000000000..1f35faf8a10 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/flow/FlowPathNotSwappedExpectedError.groovy @@ -0,0 +1,18 @@ +package org.openkilda.functionaltests.error.flow + +import org.openkilda.functionaltests.error.AbstractExpectedError + +import org.springframework.http.HttpStatus + +import java.util.regex.Pattern + +class FlowPathNotSwappedExpectedError extends AbstractExpectedError { + final static String message = "Could not swap paths for flow" + + FlowPathNotSwappedExpectedError(Pattern descriptionPattern) { + super(HttpStatus.BAD_REQUEST, message, descriptionPattern) + } + FlowPathNotSwappedExpectedError(HttpStatus statusCode, Pattern descriptionPattern) { + super(statusCode, message, descriptionPattern) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/haflow/HaFlowPathNotSwappedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/haflow/HaFlowPathNotSwappedExpectedError.groovy new file mode 100644 index 00000000000..718bf72f8e5 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/haflow/HaFlowPathNotSwappedExpectedError.groovy @@ -0,0 +1,15 @@ +package org.openkilda.functionaltests.error.haflow + +import org.openkilda.functionaltests.error.AbstractExpectedError + +import org.springframework.http.HttpStatus + +import java.util.regex.Pattern + +class HaFlowPathNotSwappedExpectedError extends AbstractExpectedError { + final static String message = "Could not swap paths for flow" + + HaFlowPathNotSwappedExpectedError(HttpStatus statusCode, Pattern descriptionPattern){ + super(statusCode, message, descriptionPattern) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/link/LinkIsInIllegalStateExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/link/LinkIsInIllegalStateExpectedError.groovy new file mode 100644 index 00000000000..0c7fb7b118f --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/link/LinkIsInIllegalStateExpectedError.groovy @@ -0,0 +1,13 @@ +package org.openkilda.functionaltests.error.link + +import org.openkilda.functionaltests.error.AbstractExpectedError + +import org.springframework.http.HttpStatus + +class LinkIsInIllegalStateExpectedError extends AbstractExpectedError { + final static HttpStatus statusCode = HttpStatus.BAD_REQUEST + + LinkIsInIllegalStateExpectedError(String message) { + super(statusCode, message, ~/ISL is in illegal state./) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/link/LinkNotFoundExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/link/LinkNotFoundExpectedError.groovy new file mode 100644 index 00000000000..b8d5133ef44 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/link/LinkNotFoundExpectedError.groovy @@ -0,0 +1,14 @@ +package org.openkilda.functionaltests.error.link + +import org.openkilda.functionaltests.error.AbstractExpectedError + +import org.springframework.http.HttpStatus + + +class LinkNotFoundExpectedError extends AbstractExpectedError { + final static HttpStatus statusCode = HttpStatus.NOT_FOUND + + LinkNotFoundExpectedError(String message){ + super(statusCode, message, ~/ISL was not found./) + } +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/switchproperties/SwitchPropertiesNotUpdatedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/switchproperties/SwitchPropertiesNotUpdatedExpectedError.groovy index f1042f85ef9..14f1f3e5af6 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/switchproperties/SwitchPropertiesNotUpdatedExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/switchproperties/SwitchPropertiesNotUpdatedExpectedError.groovy @@ -8,10 +8,9 @@ import java.util.regex.Pattern class SwitchPropertiesNotUpdatedExpectedError extends AbstractExpectedError{ final static HttpStatus statusCode = HttpStatus.BAD_REQUEST - final static Pattern descriptionPattern = ~/Failed to update switch properties./ SwitchPropertiesNotUpdatedExpectedError(String message) { - super(statusCode, message, descriptionPattern) + super(statusCode, message, ~/Failed to update switch properties./) } SwitchPropertiesNotUpdatedExpectedError(String message, Pattern descriptionPattern) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowNotUpdatedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowNotUpdatedExpectedError.groovy index 2bc89118992..722d9b5f561 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowNotUpdatedExpectedError.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowNotUpdatedExpectedError.groovy @@ -7,10 +7,9 @@ import java.util.regex.Pattern class YFlowNotUpdatedExpectedError extends AbstractExpectedError{ final static HttpStatus statusCode = HttpStatus.BAD_REQUEST - final static String message = "Could not update y-flow" YFlowNotUpdatedExpectedError(Pattern descriptionPattern) { - super(statusCode, message, descriptionPattern) + super(statusCode, "Could not update y-flow", descriptionPattern) } YFlowNotUpdatedExpectedError(String message, Pattern descriptionPattern) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowPathNotSwappedExpectedError.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowPathNotSwappedExpectedError.groovy new file mode 100644 index 00000000000..060b4a11664 --- /dev/null +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/error/yflow/YFlowPathNotSwappedExpectedError.groovy @@ -0,0 +1,16 @@ +package org.openkilda.functionaltests.error.yflow + +import org.openkilda.functionaltests.error.AbstractExpectedError + +import org.springframework.http.HttpStatus + +import java.util.regex.Pattern + +class YFlowPathNotSwappedExpectedError extends AbstractExpectedError { + final static String message = "Could not swap y-flow paths" + + YFlowPathNotSwappedExpectedError(HttpStatus status, Pattern descriptionPattern) { + super(status, message, descriptionPattern) + } + +} diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/IslHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/IslHelper.groovy index 0cf476f617c..a57154b606d 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/IslHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/IslHelper.groovy @@ -1,6 +1,20 @@ package org.openkilda.functionaltests.helpers -import groovy.util.logging.Slf4j +import static groovyx.gpars.GParsExecutorsPool.withPool +import static org.openkilda.functionaltests.helpers.Wrappers.wait +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.CLEAN_LINK_DELAY +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_ISLS_PROPERTIES +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESET_ISLS_COST +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESET_ISL_PARAMETERS +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_ISL +import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST +import static org.openkilda.messaging.info.event.IslChangeType.DISCOVERED +import static org.openkilda.messaging.info.event.IslChangeType.FAILED +import static org.openkilda.messaging.info.event.IslChangeType.MOVED +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.helpers.thread.PortBlinker import org.openkilda.functionaltests.model.cleanup.CleanupAfter import org.openkilda.functionaltests.model.cleanup.CleanupManager @@ -13,27 +27,14 @@ import org.openkilda.testing.service.lockkeeper.LockKeeperService import org.openkilda.testing.service.northbound.NorthboundService import org.openkilda.testing.service.northbound.NorthboundServiceV2 import org.openkilda.testing.tools.IslUtils + +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component -import static groovyx.gpars.GParsExecutorsPool.withPool -import static org.openkilda.functionaltests.helpers.Wrappers.wait -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.CLEAN_LINK_DELAY -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_ISLS_PROPERTIES -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESET_ISLS_COST -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESET_ISL_PARAMETERS -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_ISL -import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST -import static org.openkilda.messaging.info.event.IslChangeType.DISCOVERED -import static org.openkilda.messaging.info.event.IslChangeType.FAILED -import static org.openkilda.messaging.info.event.IslChangeType.MOVED -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE - /** * Holds utility methods for manipulating y-flows. */ @@ -68,7 +69,7 @@ class IslHelper { cleanupManager.addAction(RESTORE_ISL,{restoreIsl(islToBreak)}, cleanupAfter) cleanupManager.addAction(RESET_ISLS_COST,{database.resetCosts(topology.isls)}, cleanupAfter) if (getIslStatus(islToBreak).equals(DISCOVERED)) { - antiflapHelper.portDown(islToBreak.getSrcSwitch().getDpId(), islToBreak.getSrcPort()) + antiflapHelper.portDown(islToBreak.getSrcSwitch().getDpId(), islToBreak.getSrcPort(), cleanupAfter, false) } islUtils.waitForIslStatus([islToBreak], FAILED) } @@ -189,7 +190,13 @@ class IslHelper { //TOOD: replace boolean parameter with enum FORCE/NOT_FORCE def deleteIsl(Isl isl, boolean isForce = false) { cleanupManager.addAction(RESTORE_ISL, { - northbound.portDown(isl.srcSwitch.dpId, isl.srcPort) + def links = northbound.getAllLinks() + def forwardIsl = islUtils.getIslInfo(links, isl) + def reverseIsl = islUtils.getIslInfo(links, isl.reversed) + if(!((forwardIsl.isPresent() && reverseIsl.isPresent()) && (forwardIsl.get().state == DISCOVERED && reverseIsl.get().state == DISCOVERED))) { + northbound.portDown(isl.srcSwitch.dpId, isl.srcPort) + northbound.portDown(isl.dstSwitch.dpId, isl.dstPort) + } restoreIsl(isl) database.resetCosts([isl, isl.reversed]) }) diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/PortAntiflapHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/PortAntiflapHelper.groovy index 5beaad996ca..fc9d399155c 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/PortAntiflapHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/PortAntiflapHelper.groovy @@ -1,6 +1,9 @@ package org.openkilda.functionaltests.helpers +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.helpers.model.PortHistoryEvent +import org.openkilda.functionaltests.model.cleanup.CleanupAfter import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.model.SwitchId import org.openkilda.testing.model.topology.TopologyDefinition @@ -10,6 +13,7 @@ import org.openkilda.testing.service.northbound.NorthboundServiceV2 import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value +import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.PORT_UP @@ -22,6 +26,7 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET * port was brought down and forces sleep for required 'cooldown' amount of time when one wants to bring that port 'up'. */ @Component +@Scope(SCOPE_PROTOTYPE) class PortAntiflapHelper { @Autowired @Qualifier("islandNb") NorthboundService northbound @@ -65,9 +70,12 @@ class PortAntiflapHelper { } } - def portDown(SwitchId swId, int portNo, cleanupAfter = TEST) { - cleanupManager.addAction(PORT_UP, {safePortUp(swId, portNo)}, cleanupAfter) - cleanupManager.addAction(RESET_ISLS_COST, {database.resetCosts(topology.isls)}) + def portDown(SwitchId swId, int portNo, CleanupAfter cleanupAfter = TEST, boolean isNotInScopeOfIslBreak = true) { + if(isNotInScopeOfIslBreak) { + //there is port recovery and resetting ISL cost in the scope of ISL breaking + cleanupManager.addAction(PORT_UP, {safePortUp(swId, portNo)}, cleanupAfter) + cleanupManager.addAction(RESET_ISLS_COST, {database.resetCosts(topology.isls)}) + } def response = northbound.portDown(swId, portNo) sleep(antiflapMin * 1000) history.put(new Tuple2(swId, portNo), System.currentTimeMillis()) diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy index 668ace254c3..746ae920009 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/SwitchHelper.groovy @@ -175,6 +175,11 @@ class SwitchHelper { static int getRandomAvailablePort(Switch sw, TopologyDefinition topologyDefinition, boolean useTraffgenPorts = true, List busyPort = []) { List allowedPorts = topologyDefinition.getAllowedPortsForSwitch(sw) def availablePorts = allowedPorts - busyPort + if(!availablePorts) { + //as default flow is generated with vlan, we can reuse the same port if all available ports have been used + //this is a rare case for the situation when we need to create more than 20 flows in a row + availablePorts = allowedPorts + } def port = availablePorts[new Random().nextInt(availablePorts.size())] if (useTraffgenPorts) { List tgPorts = sw.traffGens*.switchPort.findAll { availablePorts.contains(it) } @@ -604,15 +609,15 @@ class SwitchHelper { * @param sw switch which is going to be disconnected * @param waitForRelatedLinks make sure that all switch related ISLs are FAILED */ - List knockoutSwitch(Switch sw, FloodlightConnectMode mode, boolean waitForRelatedLinks) { + List knockoutSwitch(Switch sw, FloodlightConnectMode mode, boolean waitForRelatedLinks, double timeout = WAIT_OFFSET) { def blockData = lockKeeper.knockoutSwitch(sw, mode) - cleanupManager.addAction(REVIVE_SWITCH, {reviveSwitch(sw, blockData, true)}, CleanupAfter.TEST) - Wrappers.wait(WAIT_OFFSET) { + cleanupManager.addAction(REVIVE_SWITCH, { reviveSwitch(sw, blockData, true) }, CleanupAfter.TEST) + Wrappers.wait(timeout) { assert northbound.get().getSwitch(sw.dpId).state == SwitchChangeType.DEACTIVATED } if (waitForRelatedLinks) { def swIsls = topology.get().getRelatedIsls(sw) - Wrappers.wait(discoveryTimeout + WAIT_OFFSET * 2) { + Wrappers.wait(discoveryTimeout + timeout * 2) { def allIsls = northbound.get().getAllLinks() swIsls.each { assert islUtils.getIslInfo(allIsls, it).get().state == IslChangeType.FAILED } } @@ -625,6 +630,18 @@ class SwitchHelper { knockoutSwitch(sw, mode, false) } + List knockoutSwitch(Switch sw, List regions) { + def blockData = lockKeeper.knockoutSwitch(sw, regions) + cleanupManager.addAction(REVIVE_SWITCH, { reviveSwitch(sw, blockData, true) }, CleanupAfter.TEST) + return blockData + } + + List knockoutSwitchFromStatsController(Switch sw){ + def blockData = lockKeeper.knockoutSwitch(sw, FloodlightConnectMode.RO) + cleanupManager.addAction(REVIVE_SWITCH, { reviveSwitch(sw, blockData, true) }, CleanupAfter.TEST) + return blockData + } + /** * Connect a switch to controller either adding controller settings inside an OVS switch * or setting proper iptables to allow access to floodlight for a hardware switch. @@ -835,13 +852,18 @@ class SwitchHelper { def originalProps = northbound.get().getSwitchProperties(sw.dpId) if (originalProps.server42FlowRtt != isServer42FlowRttEnabled) { def s42Config = sw.prop - cleanupManager.addAction(RESTORE_SWITCH_PROPERTIES, {northbound.get().updateSwitchProperties(sw.dpId, originalProps)}) - northbound.get().updateSwitchProperties(sw.dpId, originalProps.jacksonCopy().tap { + def requiredProps = originalProps.jacksonCopy().tap { server42FlowRtt = isServer42FlowRttEnabled server42MacAddress = s42Config ? s42Config.server42MacAddress : null server42Port = s42Config ? s42Config.server42Port : null server42Vlan = s42Config ? s42Config.server42Vlan : null + } + + cleanupManager.addAction(RESTORE_SWITCH_PROPERTIES, { + northbound.get().updateSwitchProperties(sw.dpId, requiredProps.jacksonCopy().tap { server42FlowRtt = sw?.prop?.server42FlowRtt }) }) + + northbound.get().updateSwitchProperties(sw.dpId, requiredProps) } int expectedNumberOfS42Rules = (isS42ToggleOn && isServer42FlowRttEnabled) ? getExpectedS42SwitchRulesBasedOnVxlanSupport(sw.dpId) : 0 Wrappers.wait(RULES_INSTALLATION_TIME) { @@ -862,7 +884,7 @@ class SwitchHelper { static void waitForS42SwRulesSetup(boolean isS42ToggleOn = true) { List switchDetails = northboundV2.get().getAllSwitchProperties().switchProperties - + .findAll { it.switchId in getTopology().get().switches.dpId } withPool { Wrappers.wait(RULES_INSTALLATION_TIME + WAIT_OFFSET) { switchDetails.eachParallel { sw -> @@ -898,4 +920,11 @@ class SwitchHelper { ) return northboundV2.get().partialSwitchUpdate(switchId, updateDto) } + + static boolean isServer42Supported(SwitchId switchId) { + def swProps = northbound.get().getSwitchProperties(switchId) + def featureToggles = northbound.get().getFeatureToggles() + def isServer42 = swProps.server42FlowRtt && featureToggles.server42FlowRtt + return isServer42 + } } \ No newline at end of file diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy index dfba5432503..d885a214459 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/TopologyHelper.groovy @@ -57,6 +57,14 @@ class TopologyHelper { } } + SwitchTriplet getSingleSwitchTriplet(SwitchId singleSwitch) { + return getSwitchTriplets(false, true).find { + it.shared.getDpId() == singleSwitch + && it.ep1.getDpId() == singleSwitch + && it.ep2.getDpId() == singleSwitch + } + } + List getAllNotNeighbouringSwitchTriplets(boolean includeReverse = false) { return getSwitchTriplets(includeReverse).findAll { it.shared != it.ep1 && it.ep1 != it.ep2 && it.ep2 != it.shared && @@ -141,6 +149,41 @@ class TopologyHelper { && (it.pathsEp1[0].size() > 2 && it.pathsEp2[0].size() > 2) } } + SwitchTriplet findSwitchTripletServer42SupportWithSharedEpInTheMiddleOfTheChainExceptWBSw() { + def server42switches = topology.getActiveServer42Switches() + return switchTriplets.findAll(SwitchTriplet.ALL_ENDPOINTS_DIFFERENT).findAll(SwitchTriplet.NOT_WB_ENDPOINTS).find { + it.shared.dpId in server42switches.dpId + && it.ep1.dpId in server42switches.dpId + && it.ep2.dpId in server42switches.dpId + && (it.pathsEp1[0].size() == 2 && it.pathsEp2[0].size() == 2) } + } + + SwitchTriplet findSwitchTripletWithOnlySharedSwServer42Support() { + def server42switches = topology.getActiveServer42Switches() + return switchTriplets.findAll(SwitchTriplet.ALL_ENDPOINTS_DIFFERENT).find { + it.shared.dpId in server42switches.dpId + && !(it.ep1.dpId in server42switches.dpId) + && !(it.ep2.dpId in server42switches.dpId) + } + } + + SwitchTriplet findSwitchTripletWithSharedEpEp1Ep2InChainServer42Support() { + def server42switches = topology.getActiveServer42Switches() + return switchTriplets.findAll(SwitchTriplet.ALL_ENDPOINTS_DIFFERENT).find { + def areEp1Ep2AndEp1OrEp2AndShEpNeighbour + if(it.pathsEp1[0].size() == 2 && it.pathsEp2[0].size() > 2) { + //both pair sh_ep+ep1 and ep1+ep2 are neighbours, sh_ep and ep2 is not neighbour + areEp1Ep2AndEp1OrEp2AndShEpNeighbour = it.pathsEp2.find {ep2Path -> ep2Path.containsAll(it.pathsEp1[0]) && ep2Path.size() == 4 } + } else if(it.pathsEp2[0].size() == 2 && it.pathsEp1[0].size() > 2) { + //both pair sh_ep+ep2 and ep2+ep1 are neighbours, sh_ep and ep1 is not neighbour + areEp1Ep2AndEp1OrEp2AndShEpNeighbour = it.pathsEp1.find {ep1Path -> ep1Path.containsAll(it.pathsEp2[0]) && ep1Path.size() == 4} + } + it.shared.dpId in server42switches.dpId + && it.ep1.dpId in server42switches.dpId + && it.ep2.dpId in server42switches.dpId + && areEp1Ep2AndEp1OrEp2AndShEpNeighbour} + } + SwitchTriplet findSwitchTripletForHaFlowWithProtectedPaths() { return switchTriplets.find { if (!SwitchTriplet.ALL_ENDPOINTS_DIFFERENT(it)) { diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy index 3ebc4ea7ab3..f0a5460a3ed 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/FlowBuilder.groovy @@ -41,17 +41,18 @@ class FlowBuilder { this.flowExtended.source = FlowEndpointV2.builder() .switchId(srcSwitch.dpId) - .portNumber(getRandomAvailablePort(srcSwitch, topologyDefinition, useTraffgenPorts, busyEndpoints*.port)) + .portNumber(getRandomAvailablePort(srcSwitch, topologyDefinition, useTraffgenPorts, busyEndpoints.findAll { it.sw == srcSwitch.dpId }.port)) .vlanId(randomVlan(busyEndpoints*.vlan)) .detectConnectedDevices(new DetectConnectedDevicesV2(false, false)).build() if (srcSwitch.dpId == dstSwitch.dpId) { + // For a SingleSwitch flow, selected switch should have >=2 traffGens busyEndpoints << new SwitchPortVlan(flowExtended.source.switchId, flowExtended.source.portNumber, flowExtended.source.vlanId) } this.flowExtended.destination = FlowEndpointV2.builder() .switchId(dstSwitch.dpId) - .portNumber(getRandomAvailablePort(dstSwitch, topologyDefinition, useTraffgenPorts, busyEndpoints*.port)) + .portNumber(getRandomAvailablePort(dstSwitch, topologyDefinition, useTraffgenPorts, busyEndpoints.findAll { it.sw == dstSwitch.dpId }.port)) .vlanId(randomVlan(busyEndpoints*.vlan)) .detectConnectedDevices(new DetectConnectedDevicesV2(false, false)).build() diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy index a39de9105c8..ff041ece769 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/builder/YFlowBuilder.groovy @@ -182,6 +182,11 @@ class YFlowBuilder { return this } + YFlowBuilder withEp2Vlan(Integer vlan) { + yFlow.subFlows.last().endpoint.vlanId = vlan + return this + } + YFlowBuilder withEp1AndEp2Vlan(Integer subFlow1Vlan, Integer subFlow2Vlan) { yFlow.subFlows.first().endpoint.vlanId = subFlow1Vlan yFlow.subFlows.last().endpoint.vlanId = subFlow2Vlan diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FeatureToggles.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FeatureToggles.groovy index 87145ce839c..a81f1a52ba2 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FeatureToggles.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FeatureToggles.groovy @@ -1,5 +1,7 @@ package org.openkilda.functionaltests.helpers.model +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.model.cleanup.CleanupAfter import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.model.system.FeatureTogglesDto @@ -7,31 +9,45 @@ import org.openkilda.testing.service.northbound.NorthboundService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier +import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component -import javax.annotation.PostConstruct - import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_FEATURE_TOGGLE import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST @Slf4j @Component +@Scope(SCOPE_PROTOTYPE) class FeatureToggles { + private static FeatureTogglesDto initialState = FeatureTogglesDto.builder() + // all these params are specified during env set up + .createFlowEnabled(true) + .updateFlowEnabled(true) + .deleteFlowEnabled(true) + .flowsRerouteOnIslDiscoveryEnabled(true) + .useBfdForIslIntegrityCheck(true) + .floodlightRoutePeriodicSync(true) + .collectGrpcStats(true) + .server42FlowRtt(true) + .server42IslRtt(true) + .modifyYFlowEnabled(true) + .createHaFlowEnabled(true) + .modifyHaFlowEnabled(true) + .deleteHaFlowEnabled(true) + .syncSwitchOnConnect(true) + //the following toggles are disabled by default + .flowsRerouteUsingDefaultEncapType(false) + .flowLatencyMonitoringReactions(false) + .discoverNewIslsInUnderMaintenanceMode(false) + .build() @Autowired @Qualifier("islandNb") NorthboundService northbound @Autowired CleanupManager cleanupManager - FeatureTogglesDto initialState - - @PostConstruct - void init() { - initialState = getFeatureToggles() - } - FeatureTogglesDto setFeatureToggles(FeatureTogglesDto featureTogglesDto, CleanupAfter cleanupAfter= TEST) { - cleanupManager.addAction(RESTORE_FEATURE_TOGGLE, {northbound.toggleFeature(initialState)}, cleanupAfter) + cleanupManager.addAction(RESTORE_FEATURE_TOGGLE, { safeTogglesRecover() }, cleanupAfter) northbound.toggleFeature(featureTogglesDto) } @@ -124,4 +140,11 @@ class FeatureToggles { .build() ) } + + void safeTogglesRecover() { + FeatureTogglesDto currentState = getFeatureToggles() + if(currentState != initialState) { + northbound.toggleFeature(initialState) + } + } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowActionType.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowActionType.groovy index 554d5df3ff7..713e24777d4 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowActionType.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowActionType.groovy @@ -7,7 +7,10 @@ enum FlowActionType { PARTIAL_UPDATE("Flow partial updating", "Flow was updated successfully"), PARTIAL_UPDATE_ONLY_IN_DB("Flow partial updating", "Flow PATCH operation has been executed without the consecutive update."), REROUTE("Flow rerouting", "Flow was rerouted successfully"), - REROUTE_FAILED("Flow rerouting", "Failed to reroute the flow") + REROUTE_FAILED("Flow rerouting", "Failed to reroute the flow"), + CREATE_MIRROR("Flow mirror point creating", "Flow mirror point was created successfully"), + DELETE_MIRROR("Flow mirror point deleting", "Flow mirror point was deleted successfully"), + PATH_SWAP("Flow paths swap", "Flow was updated successfully") final String value final String payloadLastAction diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy index 5a166b918ad..0fdc2c15b7d 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowExtended.groovy @@ -1,14 +1,22 @@ package org.openkilda.functionaltests.helpers.model +import static groovyx.gpars.GParsPool.withPool +import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan +import static org.openkilda.functionaltests.helpers.FlowNameGenerator.FLOW import static org.openkilda.functionaltests.helpers.Wrappers.wait import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.DELETE_FLOW import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.TEST +import static org.openkilda.testing.Constants.EGRESS_RULE_MULTI_TABLE_ID import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT +import static org.openkilda.testing.Constants.INGRESS_RULE_MULTI_TABLE_ID +import static org.openkilda.testing.Constants.TRANSIT_RULE_MULTI_TABLE_ID import static org.openkilda.testing.Constants.WAIT_OFFSET import org.openkilda.functionaltests.model.cleanup.CleanupAfter import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.messaging.info.rule.FlowEntry +import org.openkilda.messaging.info.meter.FlowMeterEntries import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload import org.openkilda.messaging.payload.flow.FlowCreatePayload import org.openkilda.messaging.payload.flow.FlowEndpointPayload @@ -18,8 +26,11 @@ import org.openkilda.messaging.payload.flow.FlowPayload import org.openkilda.messaging.payload.flow.FlowReroutePayload import org.openkilda.messaging.payload.flow.FlowResponsePayload import org.openkilda.messaging.payload.flow.FlowState +import org.openkilda.model.FlowPathDirection +import org.openkilda.model.FlowPathStatus import org.openkilda.model.SwitchId import org.openkilda.northbound.dto.v1.flows.FlowConnectedDevicesResponse +import org.openkilda.northbound.dto.v1.flows.FlowPatchDto import org.openkilda.northbound.dto.v1.flows.FlowValidationDto import org.openkilda.northbound.dto.v1.flows.PathDiscrepancyDto import org.openkilda.northbound.dto.v1.flows.PingInput @@ -28,6 +39,9 @@ import org.openkilda.northbound.dto.v2.flows.DetectConnectedDevicesV2 import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 import org.openkilda.northbound.dto.v2.flows.FlowLoopPayload import org.openkilda.northbound.dto.v2.flows.FlowLoopResponse +import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointPayload +import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointResponseV2 +import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointsResponseV2 import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.northbound.dto.v2.flows.FlowRerouteResponseV2 @@ -98,7 +112,7 @@ class FlowExtended { Set diverseWithYFlows Set diverseWithHaFlows - List mirrorPointStatus + List mirrorPointStatuses String yFlowId FlowStatistics statistics @@ -163,7 +177,7 @@ class FlowExtended { this.diverseWithYFlows = flow.diverseWithYFlows this.diverseWithHaFlows = flow.diverseWithHaFlows - this.mirrorPointStatus = flow.mirrorPointStatuses + this.mirrorPointStatuses = flow.mirrorPointStatuses this.yFlowId = flow.YFlowId this.statistics = flow.statistics @@ -216,8 +230,7 @@ class FlowExtended { } FlowExtended create(FlowState expectedState = FlowState.UP, CleanupAfter cleanupAfter = TEST) { - cleanupManager.addAction(DELETE_FLOW, { delete() }, cleanupAfter) - sendCreateRequest() + sendCreateRequest(cleanupAfter) waitForBeingInState(expectedState) } @@ -283,6 +296,49 @@ class FlowExtended { .build() } + FlowMirrorPointPayload buildMirrorPointPayload(SwitchId sinkSwitchId, + Integer sinkPortNumber, + Integer sinkVlanId = randomVlan(), + FlowPathDirection mirrorDirection = FlowPathDirection.FORWARD, + SwitchId mirrorPointSwitchId = sinkSwitchId) { + FlowEndpointV2 sinkEndpoint = this.source.switchId == sinkSwitchId ? this.source.jacksonCopy() : this.destination.jacksonCopy() + sinkEndpoint.tap { + sinkEndpoint.portNumber = sinkPortNumber + sinkEndpoint.vlanId = sinkVlanId + sinkEndpoint.switchId = sinkSwitchId + } + FlowMirrorPointPayload.builder() + .mirrorPointId(FLOW.generateId()) + .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) + .mirrorPointSwitchId(mirrorPointSwitchId) + .sinkEndpoint(sinkEndpoint) + .build() + } + + FlowMirrorPointResponseV2 createMirrorPointWithPayload(FlowMirrorPointPayload mirrorPointPayload, + boolean withWait=true) { + def response = northboundV2.createMirrorPoint(flowId, mirrorPointPayload) + if (withWait) { + wait(FLOW_CRUD_TIMEOUT) { + assert retrieveDetails().mirrorPointStatuses[0].status == + FlowPathStatus.ACTIVE.toString().toLowerCase() + assert !retrieveFlowHistory().getEntriesByType(FlowActionType.CREATE_MIRROR).isEmpty() + } + } + return response + } + + FlowMirrorPointResponseV2 createMirrorPoint(SwitchId sinkSwitchId, + Integer sinkPortNumber, + Integer sinkVlanId = randomVlan(), + FlowPathDirection mirrorDirection = FlowPathDirection.FORWARD, + SwitchId mirrorPointSwitchId = sinkSwitchId, + boolean withWait=true) { + FlowMirrorPointPayload mirrorPointPayload = buildMirrorPointPayload( + sinkSwitchId, sinkPortNumber, sinkVlanId, mirrorDirection, mirrorPointSwitchId) + return createMirrorPointWithPayload(mirrorPointPayload, withWait) + } + FlowEntityPath retrieveAllEntityPaths() { FlowPathPayload flowPath = northbound.getFlowPath(flowId) new FlowEntityPath(flowPath, topologyDefinition) @@ -314,6 +370,10 @@ class FlowExtended { new FlowHistory(northbound.getFlowHistory(flowId, timeFrom, timeTo)) } + int retrieveHistoryEventsNumber() { + retrieveFlowHistory().getEventsNumber() + } + List retrieveFlowHistoryStatus(Long timeFrom = null, Long timeTo = null, Integer maxCount = null) { northboundV2.getFlowHistoryStatuses(flowId, timeFrom, timeTo, maxCount).historyStatuses.collect { new FlowHistoryStatus(it.timestamp, it.statusBecome) @@ -324,6 +384,16 @@ class FlowExtended { retrieveFlowHistoryStatus(null, null, maxCount) } + FlowMirrorPointsResponseV2 retrieveMirrorPoints() { + log.debug("Get Flow '$flowId' mirror points") + return northboundV2.getMirrorPoints(flowId) + } + + FlowMirrorPointResponseV2 deleteMirrorPoint(String mirrorPointId) { + log.debug("Deleting mirror point '$mirrorPointId' of the flow '$flowId'") + northboundV2.deleteMirrorPoint(flowId, mirrorPointId) + } + List validate() { log.debug("Validate Flow '$flowId'") northbound.validateFlow(flowId) @@ -350,8 +420,13 @@ class FlowExtended { northbound.pingFlow(flowId, pingInput) } + FlowExtended sendUpdateRequest(FlowExtended expectedEntity) { + def response = northboundV2.updateFlow(flowId, expectedEntity.convertToUpdate()) + return new FlowExtended(response, northbound, northboundV2, topologyDefinition, cleanupManager, database) + } + FlowExtended update(FlowExtended expectedEntity, FlowState flowState = FlowState.UP) { - northboundV2.updateFlow(flowId, expectedEntity.convertToUpdate()) + sendUpdateRequest(expectedEntity) return waitForBeingInState(flowState) } @@ -361,10 +436,63 @@ class FlowExtended { } FlowExtended partialUpdate(FlowPatchV2 updateRequest, FlowState flowState = FlowState.UP) { - northboundV2.partialUpdate(flowId, updateRequest) + sendPartialUpdateRequest(updateRequest) return waitForBeingInState(flowState) } + FlowReroutePayload rerouteV1() { + return northbound.rerouteFlow(flowId) + } + + FlowExtended sendPartialUpdateRequest(FlowPatchV2 updateRequest) { + def response = northboundV2.partialUpdate(flowId, updateRequest) + return new FlowExtended(response, northbound, northboundV2, topologyDefinition, cleanupManager, database) + + } + + FlowExtended partialUpdateV1(FlowPatchDto updateRequest, FlowState flowState = FlowState.UP) { + sendPartialUpdateRequestV1(updateRequest) + return waitForBeingInState(flowState) + } + + FlowExtended sendPartialUpdateRequestV1(FlowPatchDto updateRequest) { + def response = northbound.partialUpdate(flowId, updateRequest) + return new FlowExtended(response, northbound, northboundV2, topologyDefinition, cleanupManager, database) + + } + + void updateFlowBandwidthInDB(long newBandwidth) { + database.updateFlowBandwidth(flowId, newBandwidth) + } + + void updateFlowMeterIdInDB(long newMeterId) { + database.updateFlowMeterId(flowId, newMeterId) + } + + boolean isFlowAtSingleSwitch() { + return source.switchId == destination.switchId + } + + def getFlowRulesCountBySwitch(FlowDirection direction, int involvedSwitchesCount, boolean isSwitchServer42) { + def flowEndpoint = direction == FlowDirection.FORWARD ? source : destination + def swProps = northbound.getSwitchProperties(flowEndpoint.switchId) + int count = involvedSwitchesCount - 1; + + count += 1 // customer input rule + count += (flowEndpoint.vlanId != 0) ? 1 : 0 // pre ingress rule + count += 1 // multi table ingress rule + + def server42 = isSwitchServer42 && !isFlowAtSingleSwitch() + if (server42) { + count += (flowEndpoint.vlanId != 0) ? 1 : 0 // shared server42 rule + count += 2 // ingress server42 rule and server42 input rule + } + + count += (swProps.switchLldp || flowEndpoint.detectConnectedDevices.lldp) ? 1 : 0 // lldp rule + count += (swProps.switchArp || flowEndpoint.detectConnectedDevices.arp) ? 1 : 0 // arp rule + return count + } + /* This method waits for specific history event about action completion Note that the last existing event by action type is checked @@ -432,6 +560,10 @@ class FlowExtended { return flowCopy } + FlowMeterEntries resetMeters() { + northbound.resetMeters(flowId) + } + /** * Sends delete request for flow and waits for that flow to disappear from flows list */ @@ -444,7 +576,7 @@ class FlowExtended { def response = northboundV2.deleteFlow(flowId) wait(FLOW_CRUD_TIMEOUT) { assert !retrieveFlowStatus() - assert retrieveFlowHistory().getEntriesByType(FlowActionType.DELETE).first() + assert retrieveFlowHistory().getEntriesByType(FlowActionType.DELETE).last() .payload.last().action == FlowActionType.DELETE.payloadLastAction } return response @@ -462,7 +594,7 @@ class FlowExtended { def response = northbound.deleteFlow(flowId) wait(FLOW_CRUD_TIMEOUT) { assert !northbound.getFlowStatus(flowId) - assert retrieveFlowHistory().getEntriesByType(FlowActionType.DELETE).first() + assert retrieveFlowHistory().getEntriesByType(FlowActionType.DELETE).last() .payload.last().action == FlowActionType.DELETE.payloadLastAction } return response @@ -616,7 +748,7 @@ class FlowExtended { assertions.checkSucceeds { assert this.diverseWith == expectedFlowExtended.diverseWith } assertions.checkSucceeds { assert this.diverseWithYFlows == expectedFlowExtended.diverseWithYFlows } assertions.checkSucceeds { assert this.diverseWithHaFlows == expectedFlowExtended.diverseWithHaFlows } - assertions.checkSucceeds { assert this.mirrorPointStatus == expectedFlowExtended.mirrorPointStatus } + assertions.checkSucceeds { assert this.mirrorPointStatuses == expectedFlowExtended.mirrorPointStatuses } assertions.checkSucceeds { assert this.yFlowId == expectedFlowExtended.yFlowId} assertions.checkSucceeds { assert this.statistics == expectedFlowExtended.statistics } @@ -647,5 +779,65 @@ class FlowExtended { assertions.verify() } + + void verifyRulesForProtectedFlowOnSwitches(HashMap> flowInvolvedSwitchesWithRules) { + + def flowDBInfo = retrieveDetailsFromDB() + long mainForwardCookie = flowDBInfo.forwardPath.cookie.value + long mainReverseCookie = flowDBInfo.reversePath.cookie.value + long protectedForwardCookie = flowDBInfo.protectedForwardPath.cookie.value + long protectedReverseCookie = flowDBInfo.protectedReversePath.cookie.value + assert protectedForwardCookie && protectedReverseCookie, "Flow doesn't have protected path(no protected path cookies in DB)" + + def rulesOnSrcSwitch = flowInvolvedSwitchesWithRules.get(this.source.switchId) + assert rulesOnSrcSwitch.find { it.cookie == mainForwardCookie }.tableId == INGRESS_RULE_MULTI_TABLE_ID + assert rulesOnSrcSwitch.find { it.cookie == mainReverseCookie }.tableId == EGRESS_RULE_MULTI_TABLE_ID + assert rulesOnSrcSwitch.find { it.cookie == protectedReverseCookie }.tableId == EGRESS_RULE_MULTI_TABLE_ID + assert !rulesOnSrcSwitch*.cookie.contains(protectedForwardCookie) + + def rulesOnDstSwitch = flowInvolvedSwitchesWithRules.get(this.destination.switchId) + assert rulesOnDstSwitch.find { it.cookie == mainForwardCookie }.tableId == EGRESS_RULE_MULTI_TABLE_ID + assert rulesOnDstSwitch.find { it.cookie == mainReverseCookie }.tableId == INGRESS_RULE_MULTI_TABLE_ID + assert rulesOnDstSwitch.find { it.cookie == protectedForwardCookie }.tableId == EGRESS_RULE_MULTI_TABLE_ID + assert !rulesOnDstSwitch*.cookie.contains(protectedReverseCookie) + + def flowPathInfo = retrieveAllEntityPaths() + List mainFlowSwitches = flowPathInfo.flowPath.path.forward.getInvolvedSwitches() + List mainFlowTransitSwitches = flowPathInfo.flowPath.path.forward.getTransitInvolvedSwitches() + + List protectedFlowSwitches = flowPathInfo.flowPath.protectedPath.forward.getInvolvedSwitches() + List protectedFlowTransitSwitches = flowPathInfo.flowPath.protectedPath.forward.getTransitInvolvedSwitches() + + def commonSwitches = mainFlowSwitches.intersect(protectedFlowSwitches) + def commonTransitSwitches = mainFlowTransitSwitches.intersect(protectedFlowTransitSwitches) + + def uniqueTransitSwitchesMainPath = mainFlowTransitSwitches.findAll { !commonSwitches.contains(it) } + def uniqueTransitSwitchesProtectedPath = protectedFlowTransitSwitches.findAll { !commonSwitches.contains(it) } + + def transitTableId = TRANSIT_RULE_MULTI_TABLE_ID + withPool { + flowInvolvedSwitchesWithRules.findAll { it.getKey() in commonTransitSwitches }.each { rulesPerSwitch -> + assert rulesPerSwitch.getValue().find { it.cookie == mainForwardCookie }?.tableId == transitTableId + assert rulesPerSwitch.getValue().find { it.cookie == mainReverseCookie }?.tableId == transitTableId + assert rulesPerSwitch.getValue().find { it.cookie == protectedForwardCookie }?.tableId == transitTableId + assert rulesPerSwitch.getValue().find { it.cookie == protectedReverseCookie }?.tableId == transitTableId + } + } + //this loop checks rules on unique transit nodes + withPool { + flowInvolvedSwitchesWithRules.findAll { it.getKey() in uniqueTransitSwitchesProtectedPath } + .each { rulesPerSwitch -> + assert rulesPerSwitch.getValue().find { it.cookie == protectedForwardCookie }?.tableId == transitTableId + assert rulesPerSwitch.getValue().find { it.cookie == protectedReverseCookie }?.tableId == transitTableId + } + } + //this loop checks rules on unique main nodes + withPool { + flowInvolvedSwitchesWithRules.findAll { it.getKey() in uniqueTransitSwitchesMainPath }.each { rulesPerSwitch -> + assert rulesPerSwitch.getValue().find { it.cookie == mainForwardCookie }?.tableId == transitTableId + assert rulesPerSwitch.getValue().find { it.cookie == mainReverseCookie }?.tableId == transitTableId + } + } + } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowHistory.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowHistory.groovy index 12e72e69741..953bf8868ba 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowHistory.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowHistory.groovy @@ -24,4 +24,8 @@ class FlowHistory { List getEntriesByType(FlowActionType type) { entries.findAll { it.action == type.getValue() } } + + int getEventsNumber() { + entries.size() + } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowPathModel.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowPathModel.groovy index 538c1283f01..d5d22ccfbd1 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowPathModel.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowPathModel.groovy @@ -25,5 +25,12 @@ class FlowPathModel { (path.reverse.getInvolvedIsls() + protectedPath?.reverse?.getInvolvedIsls()).findAll() } + List getMainPathInvolvedIsls(Direction direction = Direction.FORWARD) { + direction == Direction.FORWARD ? path.forward.getInvolvedIsls() : path.reverse.getInvolvedIsls() + } + + List getProtectedPathInvolvedIsls(Direction direction = Direction.FORWARD) { + direction == Direction.FORWARD ? protectedPath.forward.getInvolvedIsls() : protectedPath.reverse.getInvolvedIsls() + } } \ No newline at end of file diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowStatusHistoryEvent.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowStatusHistoryEvent.groovy index 1d22841d6f3..2959c07540c 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowStatusHistoryEvent.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/FlowStatusHistoryEvent.groovy @@ -3,7 +3,9 @@ package org.openkilda.functionaltests.helpers.model enum FlowStatusHistoryEvent { UP("UP"), - DELETED("DELETED") + DELETED("DELETED"), + DEGRADED("DEGRADED"), + DOWN("DOWN") String status diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/KildaConfiguration.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/KildaConfiguration.groovy index 4ddeaee0681..310c670cbf8 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/KildaConfiguration.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/KildaConfiguration.groovy @@ -1,16 +1,17 @@ package org.openkilda.functionaltests.helpers.model +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.model.system.KildaConfigurationDto import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.PathComputationStrategy import org.openkilda.testing.service.northbound.NorthboundService -import org.openkilda.testing.service.northbound.NorthboundServiceV2 -import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier +import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component import javax.annotation.PostConstruct @@ -18,6 +19,7 @@ import javax.annotation.PostConstruct import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_KILDA_CONFIGURATION @Component +@Scope(SCOPE_PROTOTYPE) class KildaConfiguration { @Autowired @Qualifier("islandNb") diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/Path.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/Path.groovy index 1f435b813bd..068778ef81b 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/Path.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/Path.groovy @@ -4,6 +4,7 @@ import org.openkilda.messaging.payload.flow.OverlappingSegmentsStats import org.openkilda.messaging.payload.flow.PathNodePayload import org.openkilda.model.SwitchId import org.openkilda.northbound.dto.v2.flows.FlowPathV2 +import org.openkilda.northbound.dto.v2.flows.FlowPathV2.PathNodeV2 import org.openkilda.testing.model.topology.TopologyDefinition import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.service.northbound.payloads.PathDto @@ -26,12 +27,12 @@ This class has to replace *PathHelper in future class Path { PathNodes nodes TopologyDefinition topologyDefinition - Long bandwidth; - Long latency; - Long latencyNs; - Long latencyMs; - Boolean isBackupPath; - Path protectedPath; + Long bandwidth + Long latency + Long latencyNs + Long latencyMs + Boolean isBackupPath + Path protectedPath Path(PathDto pathDto, TopologyDefinition topologyDefinition) { this.nodes = new PathNodes(pathDto.nodes) @@ -61,7 +62,8 @@ class Path { } List getInvolvedIsls() { - def isls = topologyDefinition.getIsls() + topologyDefinition.getIsls().collect { it.reversed } + def isls = (topologyDefinition.getIsls() + topologyDefinition.getIsls().collect { it.reversed }) + .findAll { it.srcSwitch && it.dstSwitch } nodes.getNodes().collate(2, 1, false).collect { List pathNodes -> isls.find { it.srcSwitch.dpId == pathNodes[0].switchId && @@ -84,7 +86,12 @@ class Path { } List getInvolvedSwitches() { - nodes.nodes.switchId + nodes.nodes.switchId.unique() + } + + List getTransitInvolvedSwitches() { + List switches = getInvolvedSwitches() + switches.size() > 2 ? switches[1..-2] : [] } OverlappingSegmentsStats overlappingSegmentStats(List comparedPath) { @@ -99,4 +106,8 @@ class Path { intersectingIslSize ? intersectingIslSize / basePathIsls.size() * 100 as int : 0, intersectingSwitchSize / basePathSwitches.size() * 100 as int,) } + + List retrieveNodes() { + nodes.getNodes() + } } diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy index 461d6fcadfb..095d6716feb 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRules.groovy @@ -1,7 +1,7 @@ package org.openkilda.functionaltests.helpers.model -import org.openkilda.functionaltests.model.cleanup.CleanupActionType import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.FlowMeter @@ -9,7 +9,6 @@ import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.northbound.dto.v1.flows.PathDiscrepancyDto -import org.openkilda.northbound.dto.v2.haflows.HaFlow import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.northbound.NorthboundService diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRulesFactory.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRulesFactory.groovy index 6b87a7d0b18..5b4296d9e44 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRulesFactory.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchRulesFactory.groovy @@ -1,14 +1,18 @@ package org.openkilda.functionaltests.helpers.model +import static org.springframework.beans.factory.config.ConfigurableBeanFactory.SCOPE_PROTOTYPE + import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.model.SwitchId import org.openkilda.testing.service.database.Database import org.openkilda.testing.service.northbound.NorthboundService import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier +import org.springframework.context.annotation.Scope import org.springframework.stereotype.Component @Component +@Scope(SCOPE_PROTOTYPE) class SwitchRulesFactory { @Autowired @Qualifier("northboundServiceImpl") diff --git a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchTriplet.groovy b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchTriplet.groovy index 03d16def7a1..d1a3181decf 100644 --- a/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchTriplet.groovy +++ b/src-java/testing/functional-tests/src/main/groovy/org/openkilda/functionaltests/helpers/model/SwitchTriplet.groovy @@ -55,4 +55,8 @@ class SwitchTriplet { static Closure TRAFFGEN_CAPABLE = { SwitchTriplet swT -> !(swT.ep1.getTraffGens().isEmpty() || swT.ep2.getTraffGens().isEmpty() || swT.shared.getTraffGens().isEmpty()) } + + static Closure NOT_WB_ENDPOINTS = { + SwitchTriplet swT -> !swT.shared.wb5164 && !swT.ep1.wb5164 && !swT.ep2.wb5164 + } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/configuration/ConfigurationSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/configuration/ConfigurationSpec.groovy index d60bc08081a..4e5d1014aee 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/configuration/ConfigurationSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/configuration/ConfigurationSpec.groovy @@ -3,8 +3,10 @@ package org.openkilda.functionaltests.spec.configuration import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.NonExistingEncapsulationTypeExpectedError import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.model.FlowEncapsulationType +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Isolated import spock.lang.Narrative @@ -18,6 +20,11 @@ This spec assumes that 'transit_vlan' is always default type """) @Isolated //kilda config updates class ConfigurationSpec extends HealthCheckSpecification { + + @Autowired + @Shared + FlowFactory flowFactory + @Shared FlowEncapsulationType defaultEncapsulationType = FlowEncapsulationType.TRANSIT_VLAN @@ -28,12 +35,12 @@ class ConfigurationSpec extends HealthCheckSpecification { .neighbouring() .withBothSwitchesVxLanEnabled() .random() - def flow1 = flowHelperV2.randomFlow(switchPair) - flow1.encapsulationType = null - flowHelperV2.addFlow(flow1) + def flow1 = flowFactory.getBuilder(switchPair) + .withEncapsulationType(null).build() + .create() then: "Flow is created with current default encapsulation type(transit_vlan)" - northboundV2.getFlow(flow1.flowId).encapsulationType == defaultEncapsulationType.toString().toLowerCase() + flow1.retrieveDetails().encapsulationType.toString() == defaultEncapsulationType.toString().toLowerCase() when: "Update default flow encapsulation type" def newFlowEncapsulationType = FlowEncapsulationType.VXLAN @@ -46,12 +53,12 @@ class ConfigurationSpec extends HealthCheckSpecification { kildaConfiguration.getKildaConfiguration().flowEncapsulationType == newFlowEncapsulationType.toString().toLowerCase() when: "Create a flow without encapsulation type" - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]) - flow2.encapsulationType = null - flowHelperV2.addFlow(flow2) + def flow2 = flowFactory.getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withEncapsulationType(null).build() + .create() then: "Flow is created with new default encapsulation type(vxlan)" - northboundV2.getFlow(flow2.flowId).encapsulationType == newFlowEncapsulationType.toString().toLowerCase() + flow2.retrieveDetails().encapsulationType.toString() == newFlowEncapsulationType.toString().toLowerCase() } @Tags(LOW_PRIORITY) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy index 98e0720642a..0f3c99bab11 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/AutoRerouteSpec.groovy @@ -1,141 +1,138 @@ package org.openkilda.functionaltests.spec.flows -import groovy.util.logging.Slf4j +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.helpers.Wrappers.retry +import static org.openkilda.functionaltests.helpers.Wrappers.timedLoop +import static org.openkilda.functionaltests.helpers.Wrappers.wait +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE_FAILED +import static org.openkilda.messaging.info.event.IslChangeType.FAILED +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowNotReroutedExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper -import org.openkilda.functionaltests.helpers.model.SwitchPair -import org.openkilda.functionaltests.helpers.model.SwitchPairs +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.FlowHistoryEventExtension +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.info.event.SwitchChangeType import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.messaging.payload.history.FlowHistoryEntry import org.openkilda.model.SwitchFeature import org.openkilda.model.SwitchStatus -import org.openkilda.northbound.dto.v1.flows.PingInput -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.service.lockkeeper.model.TrafficControlData + +import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Isolated import spock.lang.Narrative +import spock.lang.Shared import java.util.concurrent.TimeUnit -import static groovyx.gpars.GParsPool.withPool -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_COMPLETE -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_SUCCESS -import static org.openkilda.functionaltests.helpers.Wrappers.retry -import static org.openkilda.functionaltests.helpers.Wrappers.timedLoop -import static org.openkilda.functionaltests.helpers.Wrappers.wait -import static org.openkilda.messaging.info.event.IslChangeType.FAILED -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW - @Slf4j @Narrative("Verify different cases when Kilda is supposed to automatically reroute certain flow(s).") class AutoRerouteSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Tags([SMOKE, ISL_RECOVER_ON_FAIL]) @IterationTag(tags = [TOPOLOGY_DEPENDENT], iterationNameRegex = /vxlan/) def "Flow is rerouted when one of the #description flow ISLs fails"() { given: "A flow with one alternative path at least" - def data = flowData(switchPairs.all().neighbouring(), 1) - FlowRequestV2 flow = data[0] - def allFlowPaths = data[1] - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + FlowExtended flow = flowFactory.getRandom(swicthPair) + def allFlowPaths = swicthPair.getPaths() + def initialPath = flow.retrieveAllEntityPaths() when: "Fail a flow ISL (bring switch port down)" Set altFlowIsls = [] - def flowIsls = pathHelper.getInvolvedIsls(flowPath) - allFlowPaths.findAll { it != flowPath }.each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } + def flowIsls = initialPath.flowPath.getInvolvedIsls() + allFlowPaths.findAll { it != initialPath.getPathNodes() } + .each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } def islToFail = flowIsls.find { !(it in altFlowIsls) && !(it.reversed in altFlowIsls) } islHelper.breakIsl(islToFail) then: "The flow was rerouted after reroute delay" wait(rerouteDelay + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) != flowPath + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveAllEntityPaths() != initialPath } where: - description | flowData - "vlan" | { SwitchPairs switchPairs, Integer minAltPathsCount -> - getFlowWithPaths(switchPairs, minAltPathsCount) - } - "vxlan" | { SwitchPairs switchPairs, Integer minAltPathsCount -> - getVxlanFlowWithPaths(switchPairs, minAltPathsCount) - } + description | swicthPair + "vlan" | switchPairs.all().neighbouring().withAtLeastNPaths(2).random() + "vxlan" | switchPairs.all().neighbouring().withBothSwitchesVxLanEnabled().withAtLeastNPaths(2).random() } @Tags([SMOKE, ISL_RECOVER_ON_FAIL]) def "Strict bandwidth true: Flow status is set to DOWN after reroute if no alternative path with enough bandwidth"() { given: "A flow with one alternative path at least" - List helperFlows = [] - def data = noIntermediateSwitchFlow(1, true) - FlowRequestV2 flow = data[0] - List> allFlowPaths = data[1] - flow.strictBandwidth = true - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() + List> allFlowPaths = switchPair.paths + + FlowExtended flow = flowFactory.getBuilder(switchPair) + .withStrictBandwidth(true).build() + .create() + def initialPath = flow.retrieveAllEntityPaths() and: "Alt path ISLs have not enough bandwidth to host the flow" - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def altPaths = allFlowPaths.findAll { it != currentPath } - def involvedIsls = pathHelper.getInvolvedIsls(currentPath) - def altIsls = altPaths.collectMany { pathHelper.getInvolvedIsls(it).findAll { !(it in involvedIsls || it.reversed in involvedIsls) } } + def altPaths = allFlowPaths.findAll { it != initialPath.getPathNodes() } + def flowIsls = initialPath.flowPath.getInvolvedIsls() + def altIsls = altPaths.collectMany { pathHelper.getInvolvedIsls(it).findAll { !(it in flowIsls || it.reversed in flowIsls) } } .unique { a, b -> (a == b || a == b.reversed) ? 0 : 1 } - altIsls.each {isl -> + List busyEndpoints = flow.occupiedEndpoints() + altIsls.each { isl -> def linkProp = islUtils.toLinkProps(isl, [cost: "1"]) pathHelper.updateIslsCost([isl], 1) - def helperFlow = flowHelperV2.randomFlow(isl.srcSwitch, isl.dstSwitch, false, [flow, *helperFlows]).tap { - maximumBandwidth = northbound.getLink(isl).availableBandwidth - flow.maximumBandwidth + 1 - } - flowHelperV2.addFlow(helperFlow) - helperFlows << helperFlow + def extraFlow = flowFactory.getBuilder(isl.srcSwitch, isl.dstSwitch, false, busyEndpoints) + .withBandwidth(northbound.getLink(isl).availableBandwidth - flow.maximumBandwidth + 1).build() + .create() + busyEndpoints.addAll(extraFlow.occupiedEndpoints()) northbound.deleteLinkProps([linkProp]) } when: "Fail a flow ISL (bring switch port down)" Set altFlowIsls = [] - def flowIsls = pathHelper.getInvolvedIsls(flowPath) - allFlowPaths.findAll { it != flowPath }.each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } + allFlowPaths.findAll { it != initialPath.getPathNodes() } + .each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } def islToFail = flowIsls.find { !(it in altFlowIsls) && !(it.reversed in altFlowIsls) } islHelper.breakIsl(islToFail) then: "Flow history shows 3 retry attempts, eventually bringing flow to Down" - List history + List rerouteEvents wait(rerouteDelay + WAIT_OFFSET * 2) { - history = northbound.getFlowHistory(flow.flowId) + rerouteEvents = flow.retrieveFlowHistory().getEntriesByType(REROUTE) verifyAll { - history.count { it.action == REROUTE_ACTION } == 4 //original + 3 retries - history.last().payload.last().details.endsWith( + rerouteEvents.size() == 4 //original + 3 retries + rerouteEvents.last().payload.last().details.endsWith( "Failed to find path with requested bandwidth=$flow.maximumBandwidth") - history.last().payload.last().action == REROUTE_FAIL + rerouteEvents.last().payload.last().action == REROUTE_FAILED.payloadLastAction } } - northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + flow.retrieveFlowStatus().status == FlowState.DOWN and: "Flow path is unchanged" - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveAllEntityPaths() == initialPath when: "Try to manually reroute the Down flow, while there is still not enough bandwidth" def manualRerouteTime = System.currentTimeSeconds() - northboundV2.rerouteFlow(flow.flowId) + flow.reroute() then: "Error is returned, stating a 'not enough bandwidth' reason" def error = thrown(HttpClientErrorException) @@ -143,43 +140,40 @@ class AutoRerouteSpec extends HealthCheckSpecification { and: "Flow history shows more reroute attempts after manual command" wait(WAIT_OFFSET * 2) { - history = northbound.getFlowHistory(flow.flowId, manualRerouteTime, null) + rerouteEvents = flow.retrieveFlowHistory(manualRerouteTime).getEntriesByType(REROUTE) verifyAll { - history.count { it.action == REROUTE_ACTION } == 4 //manual original + 3 reties - history.last().payload.last().details.endsWith( + rerouteEvents.size() == 4 //manual original + 3 reties + rerouteEvents.last().payload.last().details.endsWith( "Failed to find path with requested bandwidth=$flow.maximumBandwidth") - history.last().payload.last().action == REROUTE_FAIL + rerouteEvents.last().payload.last().action == REROUTE_FAILED.payloadLastAction } } and: "Flow remains Down and on the same path" - northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveFlowStatus().status == FlowState.DOWN + flow.retrieveAllEntityPaths() == initialPath when: "Broken ISL on the original path is back online" - def portUp = antiflap.portUp(islToFail.srcSwitch.dpId, islToFail.srcPort) + antiflap.portUp(islToFail.srcSwitch.dpId, islToFail.srcPort) - then: "Flow is rerouted to the original path to UP state" - wait(rerouteDelay + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath - } + then: "The flow state has been changed to UP without rerouting(remains initial path)" + flow.waitForBeingInState(FlowState.UP) + assert flow.retrieveAllEntityPaths() == initialPath } @Tags([ISL_RECOVER_ON_FAIL, SWITCH_RECOVER_ON_FAIL]) def "Single switch flow changes status on switch up/down events"() { given: "Single switch flow" def sw = topology.getActiveSwitches()[0] - def flow = flowHelperV2.singleSwitchFlow(sw) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(sw, sw) when: "The switch is disconnected" def blockData = switchHelper.knockoutSwitch(sw, RW) then: "Flow becomes 'Down'" wait(WAIT_OFFSET) { - def flowInfo = northboundV2.getFlow(flow.flowId) - assert flowInfo.status == FlowState.DOWN.toString() + def flowInfo = flow.retrieveDetails() + assert flowInfo.status == FlowState.DOWN assert flowInfo.statusInfo == "Switch $sw.dpId is inactive" } @@ -188,37 +182,38 @@ class AutoRerouteSpec extends HealthCheckSpecification { islHelper.breakIsl(islToFail) then: "Flow remains 'DOWN'" - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == FlowState.DOWN when: "Other isl is back online" islHelper.restoreIsl(islToFail) then: "Flow remains 'DOWN'" - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == FlowState.DOWN when: "The switch is connected back" switchHelper.reviveSwitch(sw, blockData, true) then: "Flow becomes 'Up'" wait(WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } @Tags([SMOKE, ISL_RECOVER_ON_FAIL]) def "Flow goes to 'Down' status when one of the flow ISLs fails and there is no alt path to reroute"() { given: "A flow without alternative paths" - def data = noIntermediateSwitchFlow(0, true) - FlowRequestV2 flow = data[0] - def allFlowPaths = data[1] - flow.strictBandwidth = strictBw - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) - def altPaths = allFlowPaths.findAll { it != flowPath } - def involvedIsls = pathHelper.getInvolvedIsls(flowPath) + def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(1).random() + def allFlowPaths = switchPair.paths + def flow = flowFactory.getBuilder(switchPair).withStrictBandwidth(strictBw).build() + .create() + + def initialPath = flow.retrieveAllEntityPaths() + + def altPaths = allFlowPaths.findAll { it != initialPath.getPathNodes() } + def involvedIsls = initialPath.flowPath.getInvolvedIsls() def broughtDownIsls = altPaths.collectMany { pathHelper.getInvolvedIsls(it) .findAll { !(it in involvedIsls || it.reversed in involvedIsls) } @@ -232,10 +227,10 @@ class AutoRerouteSpec extends HealthCheckSpecification { then: "The flow becomes 'Down'" wait(rerouteDelay + WAIT_OFFSET * 2) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN - def reroutes = flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION) + assert flow.retrieveFlowStatus().status == FlowState.DOWN + def reroutes = flow.retrieveFlowHistory().getEntriesByType(REROUTE) assert reroutes.size() == reroutesCount - assert reroutes.last().payload.last().action == REROUTE_FAIL + assert reroutes.last().payload.last().action == REROUTE_FAILED.payloadLastAction } when: "ISL goes back up" @@ -243,9 +238,9 @@ class AutoRerouteSpec extends HealthCheckSpecification { then: "The flow becomes 'Up'" wait(rerouteDelay + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert flowHelper.getLatestHistoryEntry(flow.flowId).payload.find { - it.action == "The flow status was reverted to UP" || it.action == REROUTE_SUCCESS + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).last().payload.find { + it.action == "The flow status was reverted to UP" || it.action == REROUTE.payloadLastAction } } @@ -258,12 +253,11 @@ class AutoRerouteSpec extends HealthCheckSpecification { @Tags([SMOKE, ISL_RECOVER_ON_FAIL]) def "Flow in 'Down' status is rerouted when discovering a new ISL"() { given: "An intermediate-switch flow with one alternative path at least" - def data = noIntermediateSwitchFlow(1, true) - FlowRequestV2 flow = data[0] - def allFlowPaths = data[1] - - flowHelperV2.addFlow(flow.tap { strictBandwidth = true }) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() + def allFlowPaths = switchPair.paths + def flow = flowFactory.getBuilder(switchPair).withStrictBandwidth(true).build() + .create() + def initialPath = flow.retrieveAllEntityPaths() when: "Bring all ports down on the source switch that are involved in the current and alternative paths" def broughtDownIsls = allFlowPaths.unique { it.first() }.collect { path -> @@ -273,47 +267,50 @@ class AutoRerouteSpec extends HealthCheckSpecification { then: "The flow goes to 'Down' status" wait(rerouteDelay + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN - assert flowHelper.getLatestHistoryEntry(flow.flowId).payload.find { it.action == REROUTE_FAIL } + assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).last().payload + .find { it.action == REROUTE_FAILED.payloadLastAction } } + wait(WAIT_OFFSET) { - def prevHistorySize = northbound.getFlowHistory(flow.flowId) + def prevHistorySize = flow.retrieveFlowHistory().entries .findAll { !(it.details =~ /Reason: ISL .* status become ACTIVE/) }.size() timedLoop(4) { //history size should no longer change for the flow, all retries should give up - def newHistorySize = northbound.getFlowHistory(flow.flowId) + def newHistorySize = flow.retrieveFlowHistory().entries .findAll { !(it.details =~ /Reason: ISL .* status become ACTIVE/) }.size() assert newHistorySize == prevHistorySize - assert northbound.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == FlowState.DOWN sleep(500) } } when: "Bring all ports up on the source switch that are involved in the alternative paths" - islHelper.restoreIsls(broughtDownIsls.findAll {it.srcPort != flowPath.first().getPortNo()}) + islHelper.restoreIsls(broughtDownIsls.findAll {it.srcPort != initialPath.flowPath.getInvolvedIsls().first().srcPort}) then: "The flow goes to 'Up' status" and: "The flow was rerouted" //rtretiak: TODO: why such a long wait required(it is indeed required)? investigate wait(rerouteDelay + discoveryInterval + WAIT_OFFSET * 3) { - timedLoop(3) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) != flowPath + timedLoop(3) {assert flow.retrieveFlowStatus().status == FlowState.UP } + assert flow.retrieveAllEntityPaths() != initialPath } } @Tags([SMOKE, ISL_RECOVER_ON_FAIL]) def "Flow in 'Up' status is not rerouted when discovering a new ISL and more preferable path is available"() { given: "A flow with one alternative path at least" - def data = noIntermediateSwitchFlow(1, true) - FlowRequestV2 flow = data[0] - def allFlowPaths = data[1] - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() + def allFlowPaths = switchPair.paths + + def flow = flowFactory.getRandom(switchPair) + def initialPath = flow.retrieveAllEntityPaths() and: "Make the current flow path less preferable than others" - allFlowPaths.findAll { it != flowPath }.each { pathHelper.makePathMorePreferable(it, flowPath) } + allFlowPaths.findAll { it != initialPath.getPathNodes() } + .each { pathHelper.makePathMorePreferable(it, initialPath.getPathNodes()) } when: "One of the links not used by flow goes down" - def involvedIsls = pathHelper.getInvolvedIsls(flowPath) + def involvedIsls = initialPath.flowPath.getInvolvedIsls() def islToFail = topology.islsForActiveSwitches.find { !involvedIsls.contains(it) && !involvedIsls.contains(it.reversed) } @@ -324,26 +321,27 @@ class AutoRerouteSpec extends HealthCheckSpecification { then: "The flow is not rerouted and doesn't use more preferable path" TimeUnit.SECONDS.sleep(rerouteDelay + WAIT_OFFSET) - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveFlowStatus().status == FlowState.UP + flow.retrieveAllEntityPaths() == initialPath } @Tags([SMOKE]) def "Flow in 'Up' status is not rerouted when connecting a new switch and more preferable path is available"() { given: "A flow with one alternative path at least" - def data = noIntermediateSwitchFlow(1, true) - FlowRequestV2 flow = data[0] - def allFlowPaths = data[1] - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() + def allFlowPaths = switchPair.paths + + def flow = flowFactory.getRandom(switchPair) + def initialPath = flow.retrieveAllEntityPaths() and: "Make the current flow path less preferable than others" - allFlowPaths.findAll { it != flowPath }.each { pathHelper.makePathMorePreferable(it, flowPath) } + allFlowPaths.findAll { it != initialPath.getPathNodes() } + .each { pathHelper.makePathMorePreferable(it, initialPath.getPathNodes()) } when: "Disconnect one of the switches not used by flow" - def involvedSwitches = pathHelper.getInvolvedSwitches(flowPath) + def involvedSwitches = initialPath.getInvolvedSwitches() def switchToDisconnect = topology.getActiveSwitches().find { !involvedSwitches.contains(it) } - def blockData = lockKeeper.knockoutSwitch(switchToDisconnect, RW) + def blockData = switchHelper.knockoutSwitch(switchToDisconnect, RW) then: "The switch is really disconnected from the controller" wait(WAIT_OFFSET) { assert !(switchToDisconnect.dpId in northbound.getActiveSwitches()*.switchId) } @@ -356,39 +354,40 @@ class AutoRerouteSpec extends HealthCheckSpecification { assert northbound.getSwitch(switchToDisconnect.dpId).state == SwitchChangeType.ACTIVATED assert northbound.getActiveLinks().size() == topology.islsForActiveSwitches.size() * 2 } - def switchIsOnline = true and: "The flow is not rerouted and doesn't use more preferable path" TimeUnit.SECONDS.sleep(rerouteDelay + WAIT_OFFSET) - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveFlowStatus().status == FlowState.UP + flow.retrieveAllEntityPaths() == initialPath } @Tags([HARDWARE, SMOKE]) def "Flow is not rerouted when one of the flow ports goes down"() { given: "An intermediate-switch flow with one alternative path at least" - def data = intermediateSwitchFlow(1, true) - FlowRequestV2 flow = data[0] - def allFlowPaths = data[1] - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNPaths(2).random() + def allFlowPaths = switchPair.paths + + def flow = flowFactory.getRandom(switchPair) + def initialPath = flow.retrieveAllEntityPaths() + and: "Make the current flow path less preferable than others" - allFlowPaths.findAll { it != flowPath }.each { pathHelper.makePathMorePreferable(it, flowPath) } + allFlowPaths.findAll { it != initialPath.getPathNodes() } + .each { pathHelper.makePathMorePreferable(it, initialPath.getPathNodes()) } when: "Bring the flow port down on the source switch" - northbound.portDown(flow.source.switchId, flow.source.portNumber) + antiflap.portDown(flow.source.switchId, flow.source.portNumber) then: "The flow is not rerouted" TimeUnit.SECONDS.sleep(rerouteDelay) - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveAllEntityPaths() == initialPath when: "Bring the flow port down on the destination switch" - northbound.portDown(flow.destination.switchId, flow.destination.portNumber) + antiflap.portDown(flow.destination.switchId, flow.destination.portNumber) then: "The flow is not rerouted" TimeUnit.SECONDS.sleep(rerouteDelay) - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveAllEntityPaths() == initialPath } @Tags(HARDWARE) @@ -400,32 +399,22 @@ class AutoRerouteSpec extends HealthCheckSpecification { .random() and: "A flow on the given switch pair" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) when: "Deactivate the src switch" def swToDeactivate = switchPair.src - def blockData = lockKeeper.knockoutSwitch(swToDeactivate, RW) // it takes more time to DEACTIVATE a switch via the 'knockoutSwitch' method on the stage env - wait(WAIT_OFFSET * 4) { - assert northbound.getSwitch(swToDeactivate.dpId).state == SwitchChangeType.DEACTIVATED - } + def blockData = switchHelper.knockoutSwitch(swToDeactivate, RW, false, WAIT_OFFSET * 4) then: "Flow is UP" - northbound.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == FlowState.UP when: "Activate the src switch" - lockKeeper.reviveSwitch(swToDeactivate, blockData) - wait(WAIT_OFFSET) { - assert northbound.getSwitch(swToDeactivate.dpId).state == SwitchChangeType.ACTIVATED - assert northbound.getAllLinks().findAll { - it.state == IslChangeType.DISCOVERED - }.size() == topology.islsForActiveSwitches.size() * 2 - } + switchHelper.reviveSwitch(swToDeactivate, blockData, true) then: "System doesn't try to reroute the flow on the switchUp event because flow is already in UP state" timedLoop(rerouteDelay + WAIT_OFFSET / 2) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).findAll { + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).findAll { !(it.details =~ /Reason: ISL .* status become ACTIVE/) }.isEmpty() } @@ -439,9 +428,10 @@ class AutoRerouteSpec extends HealthCheckSpecification { .withExactlyNIslsBetweenSwitches(1) .random() def flowPath = switchPair.paths.min { it.size() } - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + def flow = flowFactory.getRandom(switchPair) + def initialPath = flow.retrieveAllEntityPaths() + + assert initialPath.getPathNodes() == flowPath //All alternative paths for both flows are unavailable def untouchableIsls = pathHelper.getInvolvedIsls(flowPath).collectMany { [it, it.reversed] } @@ -451,21 +441,20 @@ class AutoRerouteSpec extends HealthCheckSpecification { .findAll { !untouchableIsls.contains(it) }.unique { [it, it.reversed].sort() } islHelper.breakIsls(islsToBreak) //move the flow to DOWN status - def islToBreak = pathHelper.getInvolvedIsls(flowPath).first() + def islToBreak = initialPath.flowPath.getInvolvedIsls().first() islHelper.breakIsl(islToBreak) when: "Generate switchUp event on switch which is not related to the flow" def involvedSwitches = pathHelper.getInvolvedSwitches(flowPath)*.dpId def switchToManipulate = topology.activeSwitches.find { !(it.dpId in involvedSwitches) } def blockData = switchHelper.knockoutSwitch(switchToManipulate, RW) - def isSwitchActivated = false wait(WAIT_OFFSET) { timedLoop(4) { //waiting for the last retry in the scope of flow rerouting due to the ISL failure - assert northbound.getFlowHistory(flow.flowId).findAll { - it.action == "Flow rerouting" && it.details =~ /Reason: ISL .* become INACTIVE/ && it.taskId.contains("retry #1 ignore_bw true") + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).findAll { + it.details =~ /Reason: ISL .* become INACTIVE/ && it.taskId.contains("retry #1 ignore_bw true") } - assert northbound.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == FlowState.DOWN sleep(500) } } @@ -474,9 +463,9 @@ class AutoRerouteSpec extends HealthCheckSpecification { then: "Flow is not triggered for reroute due to switchUp event because switch is not related to the flow" TimeUnit.SECONDS.sleep(rerouteDelay * 2) // it helps to be sure that the auto-reroute operation is completed - northbound.getFlowHistory(flow.flowId, expectedZeroReroutesTimestamp, System.currentTimeSeconds()).findAll { - !(it.details =~ /Reason: ISL .* status become ACTIVE/) && //exclude ISL up reasons from parallel streams - it.action == REROUTE_ACTION + flow.retrieveFlowHistory(expectedZeroReroutesTimestamp, System.currentTimeSeconds()) + .getEntriesByType(REROUTE).findAll { + !(it.details =~ /Reason: ISL .* status become ACTIVE/) //exclude ISL up reasons from parallel streams }.size() == 0 } @@ -521,8 +510,7 @@ class AutoRerouteSpec extends HealthCheckSpecification { and: "A flow over these switches that uses one of the desired paths that have common ISL" swPair.paths.findAll { it != mainPath }.each { pathHelper.makePathMorePreferable(mainPath, it) } - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) and: "A potential 'backup' path that shares common isl has the preferred cost (will be preferred during reroute)" northbound.deleteLinkProps(northbound.getLinkProps(topology.isls)) @@ -540,31 +528,31 @@ triggering one more reroute of the current path" switchHelper.shapeSwitchesTraffic([swPair.dst], new TrafficControlData(1000)) //break the second ISL when the first reroute has started and is in progress wait(WAIT_OFFSET) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).size() == 1 + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).size() == 1 } antiflap.portDown(commonIsl.srcSwitch.dpId, commonIsl.srcPort) TimeUnit.SECONDS.sleep(rerouteDelay) //first reroute should not be finished at this point, otherwise increase the latency to switches - assert ![REROUTE_SUCCESS, REROUTE_FAIL].contains( - flowHelper.getEarliestHistoryEntryByAction(flow.flowId, REROUTE_ACTION).payload.last().action) + assert ![REROUTE.payloadLastAction, REROUTE_FAILED.payloadLastAction].contains( + flow.retrieveFlowHistory().getEntriesByType(REROUTE).first().payload.last().action) then: "System reroutes the flow twice and flow ends up in UP state" wait(PATH_INSTALLATION_TIME * 2) { - def reroutes = flowHelper.getHistoryEntriesByAction(flow.getFlowId(), REROUTE_ACTION) + def reroutes = flow.retrieveFlowHistory().getEntriesByType(REROUTE) assert reroutes.size() == 2 //reroute queue, second reroute starts right after first is finished - reroutes.each { assert it.payload.last().action == REROUTE_SUCCESS } - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + reroutes.each { assert it.payload.last().action == REROUTE.payloadLastAction } + assert flow.retrieveFlowStatus().status == FlowState.UP } and: "New flow path avoids both main and backup paths as well as broken ISLs" - def actualIsls = pathHelper.getInvolvedIsls(northbound.getFlowPath(flow.flowId)) + def actualIsls = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() [commonIsl, commonIsl.reversed, mainPathUniqueIsl, mainPathUniqueIsl.reversed].each { assert !actualIsls.contains(it) } and: "Flow is pingable" retry(3, 0) { //Was unstable on Jenkins builds. Fresh env problem? - with(northbound.pingFlow(flow.flowId, new PingInput())) { + with(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -572,30 +560,6 @@ triggering one more reroute of the current path" } } - def singleSwitchFlow() { - flowHelperV2.singleSwitchFlow(topology.getActiveSwitches().first()) - } - - def noIntermediateSwitchFlow(int minAltPathsCount = 0, boolean getAllPaths = false) { - def flowWithPaths = getFlowWithPaths(switchPairs.all().neighbouring(), - minAltPathsCount) - return getAllPaths ? flowWithPaths : flowWithPaths[0] - } - - def intermediateSwitchFlow(int minAltPathsCount = 0, boolean getAllPaths = false) { - def flowWithPaths = getFlowWithPaths(switchPairs.all().nonNeighbouring(), minAltPathsCount) - return getAllPaths ? flowWithPaths : flowWithPaths[0] - } - - def getFlowWithPaths(SwitchPairs switchPairs, int minAltPathsCount) { - def switchPair = switchPairs.withAtLeastNPaths(minAltPathsCount + 1).random() - return [flowHelperV2.randomFlow(switchPair), switchPair.paths] - } - - def getVxlanFlowWithPaths(SwitchPairs switchPairs, int minAltPathsCount) { - def switchPair = switchPairs.withBothSwitchesVxLanEnabled().withAtLeastNPaths(minAltPathsCount + 1).random() - return [flowHelperV2.randomFlow(switchPair), switchPair.paths] - } } @@ -605,8 +569,11 @@ triggering one more reroute of the current path" class AutoRerouteIsolatedSpec extends HealthCheckSpecification { //isolation: global toggle flowsRerouteOnIslDiscoveryEnabled is changed + @Autowired + @Shared + FlowFactory flowFactory - @Tags(ISL_RECOVER_ON_FAIL) + @Tags([ISL_RECOVER_ON_FAIL, SWITCH_RECOVER_ON_FAIL]) def "Flow in 'Down' status is rerouted after switchUp event"() { given: "First switch pair with two parallel links and two available paths" assumeTrue(rerouteDelay * 2 < discoveryTimeout, "Reroute should be completed before link is FAILED") @@ -651,23 +618,19 @@ class AutoRerouteIsolatedSpec extends HealthCheckSpecification { pathHelper.makePathMorePreferable(firstFlowMainPath, firstFlowBackupPath) and: "First flow without transit switches" - def firstFlow = flowHelperV2.randomFlow(switchPair1) - flowHelperV2.addFlow(firstFlow) - assert PathHelper.convert(northbound.getFlowPath(firstFlow.flowId)) == firstFlowMainPath + def firstFlow = flowFactory.getRandom(switchPair1) + def initialFirstFlowPath = firstFlow.retrieveAllEntityPaths() + assert initialFirstFlowPath.getPathNodes() == firstFlowMainPath and: "Second flow with transit switch" - def secondFlow = flowHelperV2.randomFlow(switchPair2) - flowHelperV2.addFlow(secondFlow) + def secondFlow = flowFactory.getRandom(switchPair2) //we are not confident which of 2 parallel isls are picked, so just recheck it - secondFlowPath = pathHelper.convert(northbound.getFlowPath(secondFlow.flowId)) + def initialSecondFlowPath = secondFlow.retrieveAllEntityPaths() + secondFlowPath = initialSecondFlowPath.getPathNodes() when: "Disconnect the src switch of the first flow from the controller" - def islToBreak = pathHelper.getInvolvedIsls(firstFlowMainPath).first() - def blockData = lockKeeper.knockoutSwitch(switchPair1.src, RW) - wait(discoveryTimeout + WAIT_OFFSET) { - assert northbound.getSwitch(switchPair1.src.dpId).state == SwitchChangeType.DEACTIVATED - } - def isSwitchActivated = false + def islToBreak = initialFirstFlowPath.flowPath.getInvolvedIsls().first() + def blockData = switchHelper.knockoutSwitch(switchPair1.src, RW) and: "Mark the switch as ACTIVE in db" // just to reproduce #3131 database.setSwitchStatus(switchPair1.src.dpId, SwitchStatus.ACTIVE) @@ -676,13 +639,12 @@ class AutoRerouteIsolatedSpec extends HealthCheckSpecification { antiflap.portDown(islToBreak.dstSwitch.dpId, islToBreak.dstPort) then: "System tries to reroute a flow with transit switch" - def flowPathMap = [(firstFlow.flowId): firstFlowMainPath, (secondFlow.flowId): secondFlowPath] wait(WAIT_OFFSET * 3) { - def firstFlowHistory = flowHelper.getHistoryEntriesByAction(firstFlow.flowId, REROUTE_ACTION) - assert firstFlowHistory.last().payload.find { it.action == REROUTE_FAIL } + def firstFlowHistory = firstFlow.retrieveFlowHistory().getEntriesByType(REROUTE) + assert firstFlowHistory.last().payload.find { it.action == REROUTE_FAILED.payloadLastAction } //check that system doesn't retry to reroute the firstFlow (its src is down, no need to retry) assert !firstFlowHistory.find { it.taskId =~ /.+ : retry #1/ } - def secondFlowHistory = flowHelper.getHistoryEntriesByAction(secondFlow.flowId, REROUTE_ACTION) + def secondFlowHistory = secondFlow.retrieveFlowHistory().getEntriesByType(REROUTE) /*there should be original reroute + 3 retries or original reroute + 2 retries (sometimes the system does not try to retry reroute for linkDown event, because the system gets 'ISL timeout' event for other ISLs) @@ -691,8 +653,8 @@ class AutoRerouteIsolatedSpec extends HealthCheckSpecification { assert secondFlowHistory.size() == 4 || (secondFlowHistory.size() == 3 && secondFlowHistory.last().taskId.contains("ignore_bw true")) withPool { - [firstFlow.flowId, secondFlow.flowId].eachParallel { String flowId -> - assert PathHelper.convert(northbound.getFlowPath(flowId)) == flowPathMap[flowId] + [firstFlow, secondFlow].eachParallel { FlowExtended flow -> + assert flow.retrieveAllEntityPaths() == (flow.flowId == firstFlow.flowId ? initialFirstFlowPath : initialSecondFlowPath) } } } @@ -708,20 +670,20 @@ class AutoRerouteIsolatedSpec extends HealthCheckSpecification { nonRtIsls.forEach { assert islUtils.getIslInfo(allLinks, it).get().state == FAILED } } wait(WAIT_OFFSET) { - def prevHistorySizes = [firstFlow.flowId, secondFlow.flowId].collect { flowHelper.getHistorySize(it) } + def prevHistorySizes = [firstFlow, secondFlow].collect { it.retrieveHistoryEventsNumber() } timedLoop(4) { //history size should no longer change for both flows, all retries should give up - def newHistorySizes = [firstFlow.flowId, secondFlow.flowId].collect { flowHelper.getHistorySize(it) } + def newHistorySizes = [firstFlow, secondFlow].collect { it.retrieveHistoryEventsNumber() } assert newHistorySizes == prevHistorySizes withPool { - [firstFlow.flowId, secondFlow.flowId].eachParallel { String flowId -> - assert northbound.getFlowStatus(flowId).status == FlowState.DOWN + [firstFlow, secondFlow].eachParallel { FlowExtended flow -> + assert flow.retrieveFlowStatus().status == FlowState.DOWN } } sleep(500) } - assert northboundV2.getFlow(firstFlow.flowId).statusInfo =~ /ISL (.*) become INACTIVE(.*)/ - assert northboundV2.getFlow(secondFlow.flowId).statusInfo == "No path found. \ + assert firstFlow.retrieveDetails().statusInfo =~ /ISL (.*) become INACTIVE(.*)/ + assert secondFlow.retrieveDetails().statusInfo == "No path found. \ Switch $secondFlow.source.switchId doesn't have links with enough bandwidth, \ Failed to find path with requested bandwidth= ignored" } @@ -729,15 +691,14 @@ Failed to find path with requested bandwidth= ignored" when: "Connect the switch back to the controller" database.setSwitchStatus(switchPair1.src.dpId, SwitchStatus.INACTIVE) // set real status switchHelper.reviveSwitch(switchPair1.src, blockData) - isSwitchActivated = true then: "System tries to reroute the flow on switchUp event" /* there is a risk that flows won't find a path during reroute, because switch is online but ISLs are not discovered yet, that's why we check that system tries to reroute flow on the switchUp event and don't check that flow is UP */ wait(WAIT_OFFSET) { - [firstFlow, secondFlow].each { - assert flowHelper.getHistoryEntriesByAction(it.flowId, REROUTE_ACTION).find { + [firstFlow, secondFlow].each { FlowExtended flow -> + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).find { it.details == "Reason: Switch '$switchPair1.src.dpId' online" } } @@ -751,54 +712,52 @@ Failed to find path with requested bandwidth= ignored" * and test become unstable*/ def "Strict bandwidth false: Flow is rerouted even if there is no available bandwidth on alternative path, sets status to Degraded"() { given: "A flow with one alternative path at least" - List helperFlows = [] - def data = noIntermediateSwitchFlow(1, true) - FlowRequestV2 flow = data[0] - List> allFlowPaths = data[1] - flow.strictBandwidth = false - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(1).random() + def allFlowPaths = switchPair.paths + def flow = flowFactory.getBuilder(switchPair).withStrictBandwidth(false).build() + .create() + def initialPath = flow.retrieveAllEntityPaths() and: "Alt path ISLs have not enough bandwidth to host the flow" - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def involvedIsls = pathHelper.getInvolvedIsls(currentPath) + def involvedIsls = initialPath.flowPath.getInvolvedIsls() def altIsls = topology.getRelatedIsls(topologyHelper.getSwitch(flow.getSource().getSwitchId())) - involvedIsls + List busyEndpoints = flow.occupiedEndpoints() altIsls.each {isl -> def linkProp = islUtils.toLinkProps(isl, [cost: "1"]) pathHelper.updateIslsCost([isl], 1) - def helperFlow = flowHelperV2.randomFlow(isl.srcSwitch, isl.dstSwitch, false, [flow, *helperFlows]).tap { - maximumBandwidth = northbound.getLink(isl).availableBandwidth - flow.maximumBandwidth + 1 - } - flowHelperV2.addFlow(helperFlow) - helperFlows << helperFlow + def extraFlow = flowFactory.getBuilder(isl.srcSwitch, isl.dstSwitch, false, busyEndpoints) + .withBandwidth(northbound.getLink(isl).availableBandwidth - flow.maximumBandwidth + 1).build() + .create() + busyEndpoints.addAll(extraFlow.occupiedEndpoints()) northbound.deleteLinkProps([linkProp]) } when: "Fail a flow ISL (bring switch port down)" Set altFlowIsls = [] - allFlowPaths.findAll { it != flowPath }.each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } + allFlowPaths.findAll { it != initialPath.getPathNodes() }.each { altFlowIsls.addAll(pathHelper.getInvolvedIsls(it)) } def islToFail = involvedIsls.get(0) islHelper.breakIsl(islToFail) then: "Flow history shows two reroute attempts, second one succeeds with ignore bw" - List history - wait(rerouteDelay + WAIT_OFFSET) { - history = northbound.getFlowHistory(flow.flowId) + List history + wait(rerouteDelay + WAIT_OFFSET * 2) { + history = flow.retrieveFlowHistory().getEntriesByType(REROUTE) verifyAll { - history[-2].payload.last().action == REROUTE_FAIL + history[-2].payload.last().action == REROUTE_FAILED.payloadLastAction history[-2].payload.last().details.endsWith( "Failed to find path with requested bandwidth=$flow.maximumBandwidth") - history[-1].payload.last().action == REROUTE_COMPLETE + history[-1].payload.last().action == "Flow reroute completed" } } and: "The flow has changed path and has DEGRADED status" - northboundV2.getFlowStatus(flow.flowId).status == FlowState.DEGRADED - List pathAfterReroute1 = PathHelper.convert(northbound.getFlowPath(flow.flowId)) - pathAfterReroute1 != flowPath + flow.retrieveFlowStatus().status == FlowState.DEGRADED + def pathAfterReroute1 = flow.retrieveAllEntityPaths() + pathAfterReroute1 != initialPath when: "Try to manually reroute the degraded flow, while there is still not enough bandwidth" - northboundV2.rerouteFlow(flow.flowId) + def manualRerouteTime = System.currentTimeSeconds() + flow.reroute() then: "Error is returned, stating a readable reason" def error = thrown(HttpClientErrorException) @@ -806,12 +765,12 @@ Failed to find path with requested bandwidth= ignored" and: "Flow remains DEGRADED and on the same path" wait(rerouteDelay + WAIT_OFFSET) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).findAll { + assert flow.retrieveFlowHistory(manualRerouteTime).getEntriesByType(REROUTE).findAll { it.details == "Reason: initiated via Northbound" }.size() == 2 //reroute + retry - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DEGRADED + assert flow.retrieveFlowStatus().status == FlowState.DEGRADED } - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == pathAfterReroute1 + flow.retrieveAllEntityPaths() == pathAfterReroute1 when: "Trigger auto reroute by blinking not involved(in flow path) isl" def islToBlink = topology.islsForActiveSwitches.find { @@ -823,31 +782,21 @@ Failed to find path with requested bandwidth= ignored" then: "System tries to reroute the DEGRADED flow" and: "Flow remains DEGRADED and on the same path" wait(rerouteDelay + WAIT_OFFSET) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).findAll { + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).findAll { it.details.contains("status become ACTIVE") }.size() == 2 //reroute + retry - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DEGRADED + assert flow.retrieveFlowStatus().status == FlowState.DEGRADED } - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == pathAfterReroute1 + flow.retrieveAllEntityPaths() == pathAfterReroute1 when: "Broken ISL on the original path is back online" islHelper.restoreIsl(islToFail) then: "Flow is rerouted to the original path to UP state" wait(rerouteDelay + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + assert flow.retrieveFlowStatus().status == FlowState.UP } } - def noIntermediateSwitchFlow(int minAltPathsCount = 0, boolean getAllPaths = false) { - def flowWithPaths = getFlowWithPaths(switchPairs.all().neighbouring().getSwitchPairs(), minAltPathsCount) - return getAllPaths ? flowWithPaths : flowWithPaths[0] - } - - def getFlowWithPaths(List switchPairs, int minAltPathsCount) { - def switchPair = switchPairs.find { it.paths.size() > minAltPathsCount } ?: - assumeTrue(false, "No suiting switches found") - return [flowHelperV2.randomFlow(switchPair), switchPair.paths] - } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ConnectedDevicesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ConnectedDevicesSpec.groovy index 82b84ad7730..fda53732a96 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ConnectedDevicesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ConnectedDevicesSpec.groovy @@ -287,7 +287,8 @@ srcDevices=#newSrcEnabled, dstDevices=#newDstEnabled"() { then: "Error is returned" def e = thrown(HttpClientErrorException) - new FlowNotFoundExpectedError(flow.getFlowId(), ~/Could not get connected devices for non existent flow/).matches(e) + new FlowNotFoundExpectedError("Flow ${flow.flowId} not found", + ~/Could not get connected devices for non existent flow/).matches(e) } def "Able to swap flow paths with connected devices (srcDevices=#srcEnabled, dstDevices=#dstEnabled)"() { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowLoopSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowLoopSpec.groovy index c6234af8d12..bcf3a4e7f2e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowLoopSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowLoopSpec.groovy @@ -214,7 +214,7 @@ class FlowLoopSpec extends HealthCheckSpecification { .withBothSwitchesVxLanEnabled() .random(), expectedFlowEntity: { SwitchPair swPair -> - flowFactory.getBuilder(swPair).withEncapsulationType(FlowEncapsulationType.VXLAN) + flowFactory.getBuilder(swPair).withEncapsulationType(FlowEncapsulationType.VXLAN).build() } ], [ diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowMonitoringSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowMonitoringSpec.groovy index 8e593b58a55..4e0df22945c 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowMonitoringSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowMonitoringSpec.groovy @@ -5,7 +5,6 @@ import static org.openkilda.functionaltests.ResourceLockConstants.S42_TOGGLE import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL import static org.openkilda.functionaltests.helpers.Wrappers.wait -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.CLEAN_LINK_DELAY import static org.openkilda.functionaltests.model.stats.Direction.FORWARD import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RTT import static org.openkilda.functionaltests.model.stats.Origin.FLOW_MONITORING @@ -18,9 +17,9 @@ import org.openkilda.functionaltests.helpers.model.FlowActionType import org.openkilda.functionaltests.helpers.model.PathComputationStrategy import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.model.cleanup.CleanupAfter -import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.functionaltests.model.stats.FlowStats import org.openkilda.messaging.info.event.PathNode +import org.openkilda.messaging.model.system.FeatureTogglesDto import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.springframework.beans.factory.annotation.Autowired @@ -67,10 +66,10 @@ class FlowMonitoringSpec extends HealthCheckSpecification { mainIsls = pathHelper.getInvolvedIsls(mainPath) alternativeIsls = pathHelper.getInvolvedIsls(alternativePath) //deactivate other paths for more clear experiment - def isls = mainIsls + alternativeIsls - islsToBreak = switchPair.paths.findAll { !paths.contains(it) } - .collect { pathHelper.getInvolvedIsls(it).find { !isls.contains(it) && !isls.contains(it.reversed) } } - .unique { [it, it.reversed].sort() } + def isls = mainIsls.collectMany { [it, it.reversed]} + alternativeIsls.collectMany { [it, it.reversed]} + islsToBreak = switchPair.paths.findAll{ !(it.containsAll(mainPath) || it.containsAll(alternativePath))} + .collectMany{ pathHelper.getInvolvedIsls(it)}.unique() + .collectMany{ [it, it.reversed] }.findAll { !isls.contains(it)} islHelper.breakIsls(islsToBreak, CleanupAfter.CLASS) } @@ -133,8 +132,9 @@ and flowLatencyMonitoringReactions is disabled in featureToggle"() { and: "flowLatencyMonitoringReactions is disabled in featureToggle" and: "Disable s42 in featureToggle for generating flow-monitoring stats" - featureToggles.flowLatencyMonitoringReactions(false) - featureToggles.server42FlowRtt(false) + featureToggles.toggleMultipleFeatures(FeatureTogglesDto.builder() + .flowLatencyMonitoringReactions(false) + .server42FlowRtt(false).build()) and : "A flow with max_latency 210" def flow = flowFactory.getBuilder(switchPair) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowValidationNegativeSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowValidationNegativeSpec.groovy index 731f7c58f9c..9994341ca00 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowValidationNegativeSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/FlowValidationNegativeSpec.groovy @@ -1,18 +1,25 @@ package org.openkilda.functionaltests.spec.flows -import groovy.util.logging.Slf4j +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.messaging.payload.flow.FlowState.UP +import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.IterationTag +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.error.MessageError import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.northbound.dto.v1.flows.FlowValidationDto + +import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative - -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID +import spock.lang.Shared @Slf4j @Narrative("""The specification covers the following scenarios: @@ -26,38 +33,39 @@ import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID class FlowValidationNegativeSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + + @IterationTag(tags = [SMOKE], iterationNameRegex = /reverse/) def "Flow and switch validation should fail in case of missing rules with #flowConfig configuration [#flowType]"() { given: "Two flows with #flowConfig configuration" - def flowToBreak = flowHelperV2.randomFlow(switchPair, false) - def intactFlow = flowHelperV2.randomFlow(switchPair, false, [flowToBreak]) - - flowHelperV2.addFlow(flowToBreak) - flowHelperV2.addFlow(intactFlow) + def flowToBreak = flowFactory.getRandom(switchPair, false) + def intactFlow = flowFactory.getRandom( + switchPair, false, UP, flowToBreak.occupiedEndpoints()) and: "Both flows have the same switches in path" - def damagedFlowSwitches = pathHelper.getInvolvedSwitches(flowToBreak.flowId)*.dpId - def intactFlowSwitches = pathHelper.getInvolvedSwitches(intactFlow.flowId)*.dpId - assert damagedFlowSwitches.equals(intactFlowSwitches) + def damagedFlowSwitches = flowToBreak.retrieveAllEntityPaths().getInvolvedSwitches() + def intactFlowSwitches = intactFlow.retrieveAllEntityPaths().getInvolvedSwitches() + assert damagedFlowSwitches == intactFlowSwitches when: "#flowType flow rule from first flow on #switchNo switch gets deleted" - def cookieToDelete = flowType == "forward" ? database.getFlow(flowToBreak.flowId).forwardPath.cookie.value : - database.getFlow(flowToBreak.flowId).reversePath.cookie.value + def cookieToDelete = flowType == "forward" ? flowToBreak.retrieveDetailsFromDB().forwardPath.cookie.value : + flowToBreak.retrieveDetailsFromDB().reversePath.cookie.value SwitchId damagedSwitch = damagedFlowSwitches[item] - switchHelper.deleteSwitchRules(damagedSwitch, cookieToDelete) + def swRules = switchRulesFactory.get(damagedSwitch) + swRules.delete(cookieToDelete) then: "Intact flow should be validated successfully" - def intactFlowValidation = northbound.validateFlow(intactFlow.flowId) - intactFlowValidation.each { direction -> - assert direction.discrepancies.empty - assert direction.asExpected - } + intactFlow.validateAndCollectDiscrepancies().isEmpty() and: "Damaged #flowType flow validation should fail, while other direction should be validated successfully" - def brokenFlowValidation = northbound.validateFlow(flowToBreak.flowId) - brokenFlowValidation.findAll { it.discrepancies.empty && it.asExpected }.size() == 1 - def damagedDirection = brokenFlowValidation.findAll { !it.discrepancies.empty && !it.asExpected } - damagedDirection.size() == 1 + flowToBreak.validateAndCollectDiscrepancies().size() == 1 + def damagedDirection = flowToBreak.validate().findAll { !it.discrepancies.empty && !it.asExpected } and: "Flow rule discrepancy should contain dpID of the affected switch and cookie of the damaged flow" def rules = findRulesDiscrepancies(damagedDirection[0]) @@ -149,48 +157,41 @@ class FlowValidationNegativeSpec extends HealthCheckSpecification { def "Able to detect discrepancies for a flow with protected path"() { when: "Create a flow with protected path" def switchPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .build().create() then: "Flow with protected path is created" - northbound.getFlowPath(flow.flowId).protectedPath + flow.retrieveAllEntityPaths().getPathNodes(Direction.FORWARD, true) and: "Validation of flow with protected path must be successful" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Delete rule of protected path on the srcSwitch" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def protectedPath = flowPathInfo.protectedPath.forwardPath - def rules = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - } - + def flowPathInfo = flow.retrieveAllEntityPaths() + def protectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + def swRules = switchRulesFactory.get(switchPair.src.dpId) + def rules = swRules.getRules().findAll { !new Cookie(it.cookie).serviceFlag } def ruleToDelete = rules.find { - it.instructions?.applyActions?.flowOutput == protectedPath[0].inputPort.toString() && - it.match.inPort == protectedPath[0].outputPort.toString() + it.match.inPort == protectedPath[0].portNo.toString() }.cookie - - switchHelper.deleteSwitchRules(switchPair.src.dpId, ruleToDelete) + swRules.delete(ruleToDelete) then: "Flow validate detects discrepancies" //TODO(andriidovhan) try to extend this test when the issues/2302 is fixed - def responseValidateFlow = northbound.validateFlow(flow.flowId).findAll { !it.discrepancies.empty }*.discrepancies - assert responseValidateFlow.size() == 1 - responseValidateFlow[0].expectedValue[0].contains(ruleToDelete.toString()) + def validateFlow = flow.validateAndCollectDiscrepancies() + assert validateFlow.size() == 1 + validateFlow.toString().contains(ruleToDelete.toString()) when: "Delete all rules except default on the all involved switches" - def mainPath = flowPathInfo.forwardPath - def involvedSwitchIds = (mainPath*.switchId + protectedPath*.switchId).unique() + def flowPath = flow.retrieveAllEntityPaths() + def involvedSwitchIds = flowPath.getInvolvedSwitches() involvedSwitchIds.each { switchId -> switchHelper.deleteSwitchRules(switchId, DeleteRulesAction.IGNORE_DEFAULTS) } then: "Flow validate detects discrepancies for all deleted rules" - def responseValidateFlow2 = northbound.validateFlow(flow.flowId).findAll { !it.discrepancies.empty }*.discrepancies - assert responseValidateFlow2.size() == 4 + flow.validateAndCollectDiscrepancies().size() == 4 } /** diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/IntentionalRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/IntentionalRerouteSpec.groovy index ec77c879ce3..485e5be6e56 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/IntentionalRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/IntentionalRerouteSpec.groovy @@ -1,14 +1,25 @@ package org.openkilda.functionaltests.spec.flows +import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.testing.Constants.DEFAULT_COST +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static spock.util.matcher.HamcrestSupport.expect + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowEncapsulationType import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder + import org.springframework.beans.factory.annotation.Autowired import spock.lang.Narrative import spock.lang.See @@ -16,43 +27,37 @@ import spock.lang.Shared import javax.inject.Provider -import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs -import static groovyx.gpars.GParsPool.withPool -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.testing.Constants.DEFAULT_COST -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static spock.util.matcher.HamcrestSupport.expect - @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/hub-and-spoke/reroute") @Narrative("Verify that on-demand reroute operations are performed accurately.") class IntentionalRerouteSpec extends HealthCheckSpecification { - @Autowired @Shared + @Autowired + @Shared Provider traffExamProvider + @Autowired + @Shared + FlowFactory flowFactory @Tags(ISL_PROPS_DB_RESET) def "Not able to reroute to a path with not enough bandwidth available"() { given: "A flow with alternate paths available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = 10000 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(10000) + .build().create() - def currentPathDto = northbound.getFlowPath(flow.flowId) - def currentPath = PathHelper.convert(currentPathDto) - def currentPathNodesV2 = PathHelper.convertToNodesV2(currentPathDto) + def currentPathEntities = flow.retrieveAllEntityPaths() + def currentPath = currentPathEntities.getPathNodes() + def currentPathNodesV2 = currentPathEntities.flowPath.path.forward.nodes.toPathNodeV2() when: "Make the current path less preferable than alternatives" def alternativePaths = switchPair.paths.findAll { it != currentPath } alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } and: "Make all alternative paths to have not enough bandwidth to handle the flow" - def currentIsls = pathHelper.getInvolvedIsls(currentPath) - def changedIsls = alternativePaths.collect { altPath -> + def currentIsls = currentPathEntities.flowPath.getInvolvedIsls() + alternativePaths.collect { altPath -> def thinIsl = pathHelper.getInvolvedIsls(altPath).find { !currentIsls.contains(it) && !currentIsls.contains(it.reversed) } @@ -65,24 +70,25 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Init a reroute to a more preferable path" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() then: "The flow is NOT rerouted because of not enough bandwidth on alternative paths" - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } !rerouteResponse.rerouted rerouteResponse.path.nodes == currentPathNodesV2 - PathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + flow.retrieveAllEntityPaths().getPathNodes() == currentPath } @Tags(ISL_PROPS_DB_RESET) def "Able to reroute to a better path if it has enough bandwidth"() { given: "A flow with alternate paths available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = 10000 - flow.encapsulationType = FlowEncapsulationType.TRANSIT_VLAN - flowHelperV2.addFlow(flow) - def currentPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(10000) + .withEncapsulationType(FlowEncapsulationType.TRANSIT_VLAN) + .build().create() + def currentPathEntities = flow.retrieveAllEntityPaths() + def currentPath = currentPathEntities.getPathNodes() when: "Make one of the alternative paths to be the most preferable among all others" def preferableAltPath = switchPair.paths.find { it != currentPath } @@ -91,7 +97,7 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Make the future path to have exact bandwidth to handle the flow" - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def currentIsls = currentPathEntities.flowPath.getInvolvedIsls() def thinIsl = pathHelper.getInvolvedIsls(preferableAltPath).find { !currentIsls.contains(it) && !currentIsls.contains(it.reversed) } @@ -101,21 +107,21 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Init a reroute of the flow" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } then: "The flow is successfully rerouted and goes through the preferable path" - def pathDto = northbound.getFlowPath(flow.flowId) - def newPath = PathHelper.convert(pathDto) - def newPathNodesV2 = PathHelper.convertToNodesV2(pathDto) + def newPathEntities = flow.retrieveAllEntityPaths() + def newPath = newPathEntities.getPathNodes() + def newPathNodesV2 = newPathEntities.flowPath.path.forward.nodes.toPathNodeV2() rerouteResponse.rerouted rerouteResponse.path.nodes == newPathNodesV2 newPath == preferableAltPath - pathHelper.getInvolvedIsls(newPath).contains(thinIsl) - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + newPathEntities.flowPath.getInvolvedIsls().contains(thinIsl) + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } and: "'Thin' ISL has 0 available bandwidth left" Wrappers.wait(WAIT_OFFSET) { assert islUtils.getIslInfo(thinIsl).get().availableBandwidth == 0 } @@ -130,7 +136,7 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { given: "An unmetered flow going through a long not preferable path(reroute potential)" //will be available on virtual as soon as we get the latest iperf installed in lab-service images assumeTrue(topology.activeTraffGens.size() >= 2, -"There should be at least two active traffgens for test execution") + "There should be at least two active traffgens for test execution") def src = topology.activeSwitches.find { it.traffGens } def dst = (topology.activeSwitches - src).find { it.traffGens } @@ -140,11 +146,11 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { def changedIsls = allPaths.findAll { it != longestPath } .collect { pathHelper.makePathMorePreferable(longestPath, it) }.findAll() //and create the flow that uses the long path - def flow = flowHelperV2.randomFlow(src, dst) - flow.maximumBandwidth = 0 - flow.ignoreBandwidth = true - flowHelperV2.addFlow(flow) - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == longestPath + def flowEntity = flowFactory.getBuilder(src, dst) + .withBandwidth(0) + .withIgnoreBandwidth(true) + def flow = flowEntity.build().create() + assert flow.retrieveAllEntityPaths().getPathNodes() == longestPath //now make another long path more preferable, for reroute to rebuild the rules on other switches in the future pathHelper.updateIslsCost((changedIsls + changedIsls*.reversed) as List, DEFAULT_COST) def potentialNewPath = allPaths.findAll { it != longestPath }.max { it.size() } @@ -153,7 +159,7 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { when: "Start traffic examination" def traffExam = traffExamProvider.get() def bw = 100000 // 100 Mbps - def exam = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam(flowHelperV2.toV1(flow), bw, 20) + def exam = flow.traffExam(traffExam, bw, 20) [exam.forward, exam.reverse].each { direction -> direction.udp = true def resources = traffExam.startExam(direction) @@ -162,13 +168,13 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { and: "While traffic flow is active, request a flow reroute" [exam.forward, exam.reverse].each { assert !traffExam.isFinished(it) } - def reroute = northboundV2.rerouteFlow(flow.flowId) + def reroute = flow.reroute() then: "Flow is rerouted" reroute.rerouted expect reroute.path.nodes, sameBeanAs(PathHelper.convertToNodesV2(potentialNewPath)) .ignoring("segmentLatency") - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Traffic examination result shows acceptable packet loss percentage" def examReports = [exam.forward, exam.reverse].collect { traffExam.waitExam(it) } @@ -184,21 +190,22 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { def "Able to reroute to a path with not enough bandwidth available in case ignoreBandwidth=true"() { given: "A flow with alternate paths available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = 10000 - flow.ignoreBandwidth = true - flow.encapsulationType = FlowEncapsulationType.TRANSIT_VLAN - flowHelperV2.addFlow(flow) - def currentPathDto = northbound.getFlowPath(flow.flowId) - def currentPath = PathHelper.convert(currentPathDto) - def currentPathNodesV2 = PathHelper.convertToNodesV2(currentPathDto) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(10000) + .withIgnoreBandwidth(true) + .withEncapsulationType(FlowEncapsulationType.TRANSIT_VLAN) + .build().create() + + def currentPathEntities = flow.retrieveAllEntityPaths() + def currentPath = currentPathEntities.getPathNodes() + def currentPathNodesV2 = currentPathEntities.flowPath.path.forward.nodes.toPathNodeV2() when: "Make the current path less preferable than alternatives" def alternativePaths = switchPair.paths.findAll { it != currentPath } alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } and: "Make all alternative paths to have not enough bandwidth to handle the flow" - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def currentIsls = currentPathEntities.flowPath.getInvolvedIsls() def newBw = flow.maximumBandwidth - 1 def changedIsls = alternativePaths.collect { altPath -> def thinIsl = pathHelper.getInvolvedIsls(altPath).find { @@ -212,17 +219,17 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Init a reroute to a more preferable path" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() then: "The flow is rerouted because ignoreBandwidth=true" rerouteResponse.rerouted rerouteResponse.path.nodes != currentPathNodesV2 - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } - def updatedPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def updatedPath = flow.retrieveAllEntityPaths().getPathNodes() updatedPath != currentPath - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Available bandwidth was not changed while rerouting due to ignoreBandwidth=true" def allLinks = northbound.getAllLinks() @@ -239,19 +246,20 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().neighbouring().withBothSwitchesVxLanEnabled().withTraffgensOnBothEnds().random() def availablePaths = switchPair.paths.findAll { pathHelper.getInvolvedSwitches(it).find { it.noviflow }} - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = 0 - flow.ignoreBandwidth = true - flow.encapsulationType = FlowEncapsulationType.VXLAN - flowHelperV2.addFlow(flow) - def altPaths = availablePaths.findAll { it != pathHelper.convert(northbound.getFlowPath(flow.flowId)) } + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(0) + .withIgnoreBandwidth(true) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build().create() + def currentPath = flow.retrieveAllEntityPaths().getPathNodes() + def altPaths = availablePaths.findAll { it != currentPath } def potentialNewPath = altPaths[0] availablePaths.findAll { it != potentialNewPath }.each { pathHelper.makePathMorePreferable(potentialNewPath, it) } when: "Start traffic examination" def traffExam = traffExamProvider.get() def bw = 100000 // 100 Mbps - def exam = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam(flowHelperV2.toV1(flow), bw) + def exam = flow.traffExam(traffExam, bw, null) withPool { [exam.forward, exam.reverse].eachParallel { direction -> direction.udp = true @@ -262,11 +270,11 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { and: "While traffic flow is active, request a flow reroute" [exam.forward, exam.reverse].each { assert !traffExam.isFinished(it) } - def reroute = northboundV2.rerouteFlow(flow.flowId) + def reroute = flow.reroute() then: "Flow is rerouted" reroute.rerouted - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Traffic examination result shows acceptable packet loss percentage" def examReports = [exam.forward, exam.reverse].collect { traffExam.waitExam(it) } @@ -281,17 +289,18 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { def "Not able to reroute to a path with not enough bandwidth available [v1 api]"() { given: "A flow with alternate paths available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelper.randomFlow(switchPair) - flow.maximumBandwidth = 10000 - flowHelper.addFlow(flow) - def currentPath = PathHelper.convert(northbound.getFlowPath(flow.id)) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(10000) + .build().create() + def currentPathEntityPaths = flow.retrieveAllEntityPaths() + def currentPath = currentPathEntityPaths.getPathNodes() when: "Make the current path less preferable than alternatives" def alternativePaths = switchPair.paths.findAll { it != currentPath } alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } and: "Make all alternative paths to have not enough bandwidth to handle the flow" - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def currentIsls = currentPathEntityPaths.flowPath.getInvolvedIsls() def changedIsls = alternativePaths.collect { altPath -> def thinIsl = pathHelper.getInvolvedIsls(altPath).find { !currentIsls.contains(it) && !currentIsls.contains(it.reversed) @@ -305,25 +314,26 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Init a reroute to a more preferable path" - def rerouteResponse = northbound.rerouteFlow(flow.id) + def rerouteResponse = flow.rerouteV1() then: "The flow is NOT rerouted because of not enough bandwidth on alternative paths" - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.id).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } !rerouteResponse.rerouted rerouteResponse.path.path == currentPath int seqId = 0 rerouteResponse.path.path.each { assert it.seqId == seqId++ } - PathHelper.convert(northbound.getFlowPath(flow.id)) == currentPath + flow.retrieveAllEntityPaths().getPathNodes() == currentPath } @Tags([LOW_PRIORITY, ISL_PROPS_DB_RESET]) def "Able to reroute to a better path if it has enough bandwidth [v1 api]"() { given: "A flow with alternate paths available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelper.randomFlow(switchPair) - flow.maximumBandwidth = 10000 - flowHelper.addFlow(flow) - def currentPath = PathHelper.convert(northbound.getFlowPath(flow.id)) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(10000) + .build().create() + def currentEntityPaths = flow.retrieveAllEntityPaths() + def currentPath = currentEntityPaths.getPathNodes() when: "Make one of the alternative paths to be the most preferable among all others" def preferableAltPath = switchPair.paths.find { it != currentPath } @@ -332,7 +342,7 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Make the future path to have exact bandwidth to handle the flow" - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def currentIsls = currentEntityPaths.flowPath.getInvolvedIsls() def thinIsl = pathHelper.getInvolvedIsls(preferableAltPath).find { !currentIsls.contains(it) && !currentIsls.contains(it.reversed) } @@ -342,11 +352,12 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { } and: "Init a reroute of the flow" - def rerouteResponse = northbound.rerouteFlow(flow.id) - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.id).status == FlowState.UP } + def rerouteResponse = flow.rerouteV1() + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } then: "The flow is successfully rerouted and goes through the preferable path" - def newPath = PathHelper.convert(northbound.getFlowPath(flow.id)) + def newPathEntities = flow.retrieveAllEntityPaths() + def newPath = newPathEntities.getPathNodes() int seqId = 0 rerouteResponse.rerouted @@ -354,10 +365,10 @@ class IntentionalRerouteSpec extends HealthCheckSpecification { rerouteResponse.path.path.each { assert it.seqId == seqId++ } newPath == preferableAltPath - pathHelper.getInvolvedIsls(newPath).contains(thinIsl) + newPathEntities.flowPath.getInvolvedIsls().contains(thinIsl) and: "'Thin' ISL has 0 available bandwidth left" Wrappers.wait(WAIT_OFFSET) { assert islUtils.getIslInfo(thinIsl).get().availableBandwidth == 0 } - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.id).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } } -} +} \ No newline at end of file diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MaxLatencySpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MaxLatencySpec.groovy index 8e3260d3863..f09b16d530c 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MaxLatencySpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MaxLatencySpec.groovy @@ -1,25 +1,30 @@ package org.openkilda.functionaltests.spec.flows +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.helpers.Wrappers.wait +import static org.openkilda.messaging.payload.flow.FlowState.DEGRADED +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowNotCreatedWithMissingPathExpectedError import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType +import org.openkilda.functionaltests.helpers.model.PathComputationStrategy import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.model.cleanup.CleanupAfter +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.PathComputationStrategy import org.openkilda.model.StatusInfo import org.openkilda.testing.model.topology.TopologyDefinition.Isl + +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative import spock.lang.See import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_SUCCESS -import static org.openkilda.functionaltests.helpers.Wrappers.wait -import static org.openkilda.messaging.payload.flow.FlowState.DEGRADED -import static org.openkilda.testing.Constants.WAIT_OFFSET @See(["https://github.com/telstra/open-kilda/blob/develop/docs/design/pce/design.md", "https://github.com/telstra/open-kilda/blob/develop/docs/design/pce/max-latency-issue/README.md"]) @@ -50,6 +55,10 @@ class MaxLatencySpec extends HealthCheckSpecification { List mainIsls, alternativeIsls, islsToBreak @Shared SwitchPair switchPair + @Autowired + @Shared + FlowFactory flowFactory + def setupSpec() { //setup: Two active switches with two diverse paths @@ -60,10 +69,10 @@ class MaxLatencySpec extends HealthCheckSpecification { mainIsls = pathHelper.getInvolvedIsls(mainPath) alternativeIsls = pathHelper.getInvolvedIsls(alternativePath) //deactivate other paths for more clear experiment - def isls = mainIsls + alternativeIsls - islsToBreak = switchPair.paths.findAll { !paths.contains(it) } - .collect { pathHelper.getInvolvedIsls(it).find { !isls.contains(it) && !isls.contains(it.reversed) } } - .unique { [it, it.reversed].sort() } + def isls = mainIsls.collectMany { [it, it.reversed]} + alternativeIsls.collectMany { [it, it.reversed]} + islsToBreak = switchPair.paths.findAll{ !(it.containsAll(mainPath) || it.containsAll(alternativePath))} + .collectMany{ pathHelper.getInvolvedIsls(it)}.unique() + .collectMany{ [it, it.reversed] }.findAll { !isls.contains(it)} islHelper.breakIsls(islsToBreak, CleanupAfter.CLASS) } @@ -72,18 +81,17 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(10, 15) when: "Create a flow with protected path, max_latency 16 and max_latency_tier_2 18" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = true - maxLatency = 16 - maxLatencyTier2 = 18 - pathComputationStrategy = PathComputationStrategy.MAX_LATENCY.toString() - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(true) + .withMaxLatency(16) + .withMaxLatencyTier2(18) + .withPathComputationStrategy(PathComputationStrategy.MAX_LATENCY) + .build().create() then: "Flow is created, main path is the 15 latency path, protected is 10 latency" - def path = northbound.getFlowPath(flow.flowId) - pathHelper.convert(path) == alternativePath - pathHelper.convert(path.protectedPath) == mainPath + def flowPath = flow.retrieveAllEntityPaths() + flowPath.getPathNodes(Direction.FORWARD, false) == alternativePath + flowPath.getPathNodes(Direction.FORWARD, true) == mainPath } @Tags([LOW_PRIORITY]) @@ -92,12 +100,11 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(10, 9) when: "Create a flow with protected path and max_latency #testMaxLatency" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = true - maxLatency = testMaxLatency - pathComputationStrategy = PathComputationStrategy.MAX_LATENCY.toString() - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(true) + .withMaxLatency(testMaxLatency) + .withPathComputationStrategy(PathComputationStrategy.MAX_LATENCY) + .build().create() then: "Flow is not created, error returned describing that no paths found" def e = thrown(HttpClientErrorException) @@ -114,19 +121,18 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(10, 15) when: "Create a flow with protected path, maxLatency 11 and maxLatencyTier2 16" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = true - maxLatency = 11 - maxLatencyTier2 = 16 // maxLatency < pathLatency < maxLatencyTier2 - pathComputationStrategy = PathComputationStrategy.MAX_LATENCY.toString() - } - flowHelperV2.addFlow(flow, DEGRADED) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(true) + .withMaxLatency(11) + .withMaxLatencyTier2(16) // maxLatency < pathLatency < maxLatencyTier2 + .withPathComputationStrategy(PathComputationStrategy.MAX_LATENCY) + .build().create(DEGRADED) then: "Flow is created, main path is the 10 latency path, protected is 15 latency" and: "Flow goes to DEGRADED state" - def path = northbound.getFlowPath(flow.flowId) - pathHelper.convert(path) == mainPath - pathHelper.convert(path.protectedPath) == alternativePath + def flowPath = flow.retrieveAllEntityPaths() + flowPath.getPathNodes(Direction.FORWARD, false) == mainPath + flowPath.getPathNodes(Direction.FORWARD, true) == alternativePath } @Tags([LOW_PRIORITY]) @@ -135,16 +141,15 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(11, 15) when: "Create a flow with max_latency 11 and max_latency_tier2 16" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 11 - maxLatencyTier2 = 16 - pathComputationStrategy = PathComputationStrategy.MAX_LATENCY.toString() - } - flowHelperV2.addFlow(flow, DEGRADED) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(11) + .withMaxLatencyTier2(16) + .withPathComputationStrategy(PathComputationStrategy.MAX_LATENCY) + .build().create(DEGRADED) then: "Flow is created, flow path is the 15 latency path" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == alternativePath + flow.retrieveAllEntityPaths().getPathNodes() == alternativePath } @Tags([LOW_PRIORITY]) @@ -153,31 +158,31 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(10, 15) when: "Create a flow with max_latency 11 and max_latency_tier2 16" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 11 - maxLatencyTier2 = 16 - pathComputationStrategy = PathComputationStrategy.MAX_LATENCY.toString() - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(11) + .withMaxLatencyTier2(16) + .withPathComputationStrategy(PathComputationStrategy.MAX_LATENCY) + .build().create() //flow path is the 10 latency path - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + assert flow.retrieveAllEntityPaths().getPathNodes() == mainPath and: "Update the flow(maxLatency: 10)" def newMaxLatency = 10 - northboundV2.updateFlow(flow.flowId, flow.tap { maxLatency = newMaxLatency }) + def flowWithNewMaxLatency = flow.deepCopy().tap { it.maxLatency = 10} + flow.update(flowWithNewMaxLatency, DEGRADED) then: "Flow is updated and goes to the DEGRADED state" wait(WAIT_OFFSET) { - def flowInfo = northboundV2.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetails() assert flowInfo.maxLatency == newMaxLatency - assert flowInfo.status == DEGRADED.toString() + assert flowInfo.status == DEGRADED assert flowInfo.statusInfo == StatusInfo.BACK_UP_STRATEGY_USED /*[0..1] - can be more than two statuses due to running this test in a parallel mode. for example: reroute can be triggered by blinking/activating any isl (not involved in flow path)*/ assert northboundV2.getFlowHistoryStatuses(flow.flowId).historyStatuses*.statusBecome[0..1] == ["UP", "DEGRADED"] } - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == alternativePath + assert flow.retrieveAllEntityPaths().getPathNodes() == alternativePath } def "Able to reroute a MAX_LATENCY flow if maxLatencyTier2 > pathLatency > maxLatency"() { @@ -185,14 +190,13 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(10, 15) when: "Create a flow with max_latency 11 and max_latency_tier2 16" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 11 - maxLatencyTier2 = 16 - pathComputationStrategy = PathComputationStrategy.MAX_LATENCY.toString() - } - flowHelperV2.addFlow(flow) - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(11) + .withMaxLatencyTier2(16) + .withPathComputationStrategy(PathComputationStrategy.MAX_LATENCY) + .build().create() + assert flow.retrieveAllEntityPaths().getPathNodes() == mainPath and: "Init auto reroute (bring port down on the src switch)" setLatencyForPaths(10, 15) @@ -201,15 +205,16 @@ class MaxLatencySpec extends HealthCheckSpecification { then: "Flow is rerouted and goes to the DEGRADED state" wait(rerouteDelay + WAIT_OFFSET) { - def flowHistory = flowHelper.getLatestHistoryEntry(flow.flowId) - flowHistory.payload.last().action == REROUTE_SUCCESS + def flowLastHistoryEntry = flow.retrieveFlowHistory() + .getEntriesByType(FlowActionType.REROUTE).last() + assert flowLastHistoryEntry.payload.last().action == "Flow reroute completed" // https://github.com/telstra/open-kilda/issues/4049 - flowHistory.payload.last().details == "Flow reroute completed with status DEGRADED and error: The primary path status is DEGRADED" - def flowInfo = northboundV2.getFlow(flow.flowId) - assert flowInfo.status == DEGRADED.toString() + flowLastHistoryEntry.payload.last().details == "Flow reroute completed with status DEGRADED and error: The primary path status is DEGRADED" + def flowInfo = flow.retrieveDetails() + assert flowInfo.status == DEGRADED assert flowInfo.statusInfo == StatusInfo.BACK_UP_STRATEGY_USED } - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == alternativePath + flow.retrieveAllEntityPaths().getPathNodes() == alternativePath } def "Able to create DEGRADED flow with LATENCY strategy if max_latency_tier_2 > flowPath > max_latency"() { @@ -217,18 +222,16 @@ class MaxLatencySpec extends HealthCheckSpecification { setLatencyForPaths(11, 15) when: "Create a flow, maxLatency 10 and maxLatencyTier2 12" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 10 - maxLatencyTier2 = 12 - pathComputationStrategy = PathComputationStrategy.LATENCY.toString() - } - flowHelperV2.addFlow(flow, DEGRADED) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(10) + .withMaxLatencyTier2(12) + .withPathComputationStrategy(PathComputationStrategy.LATENCY) + .build().create(DEGRADED) then: "Flow is created in DEGRADED state because flowPath doesn't satisfy max_latency value \ but satisfies max_latency_tier2" - def path = northbound.getFlowPath(flow.flowId) - pathHelper.convert(path) == mainPath + flow.retrieveAllEntityPaths().getPathNodes() == mainPath } @Tags([LOW_PRIORITY]) @@ -237,17 +240,15 @@ but satisfies max_latency_tier2" setLatencyForPaths(9, 15) when: "Create a flow, maxLatency 9 and maxLatencyTier2 12" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 9 - maxLatencyTier2 = 12 - pathComputationStrategy = PathComputationStrategy.LATENCY.toString() - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(9) + .withMaxLatencyTier2(12) + .withPathComputationStrategy(PathComputationStrategy.LATENCY) + .build().create() then: "Flow is created in UP" - def path = northbound.getFlowPath(flow.flowId) - pathHelper.convert(path) == mainPath + flow.retrieveAllEntityPaths().getPathNodes() == mainPath } @Tags([LOW_PRIORITY]) @@ -256,13 +257,12 @@ but satisfies max_latency_tier2" setLatencyForPaths(12, 13) when: "Create a flow, maxLatency 10 and maxLatencyTier2 11" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 10 - maxLatencyTier2 = 11 - pathComputationStrategy = PathComputationStrategy.LATENCY.toString() - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(10) + .withMaxLatencyTier2(11) + .withPathComputationStrategy(PathComputationStrategy.LATENCY) + .build().create() then: "Flow is not created, human readable error is returned" def e = thrown(HttpClientErrorException) @@ -275,14 +275,14 @@ but satisfies max_latency_tier2" setLatencyForPaths(11, 15) and: "A flow with maxLatency 11 and maxLatencyTier2 14 on the path with 11 latency" - def flow = flowHelperV2.randomFlow(switchPair).tap { - allocateProtectedPath = false - maxLatency = 11 - maxLatencyTier2 = 14 - pathComputationStrategy = PathComputationStrategy.LATENCY.toString() - } - flowHelperV2.addFlow(flow) - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + def flow = flowFactory.getBuilder(switchPair.src, switchPair.dst) + .withProtectedPath(false) + .withMaxLatency(11) + .withMaxLatencyTier2(14) + .withPathComputationStrategy(PathComputationStrategy.LATENCY) + .build().create() + + assert flow.retrieveAllEntityPaths().getPathNodes() == mainPath when: "Break the flow path to init autoReroute" def islToBreak = pathHelper.getInvolvedIsls(mainPath).first() @@ -290,12 +290,12 @@ but satisfies max_latency_tier2" then: "Flow is not rerouted and moved to the DOWN state" wait(WAIT_OFFSET) { - with(northboundV2.getFlow(flow.flowId)) { - it.status == FlowState.DOWN.toString() + with(flow.retrieveDetails()) { + it.status == FlowState.DOWN it.statusInfo.contains("No path found.") } } - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + assert flow.retrieveAllEntityPaths().getPathNodes() == mainPath } def setLatencyForPaths(int mainPathLatency, int alternativePathLatency) { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy index 539d74fc8c2..6a6880b9189 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MirrorEndpointsSpec.groovy @@ -1,8 +1,20 @@ package org.openkilda.functionaltests.spec.flows -import groovy.transform.AutoClone -import groovy.transform.Memoized -import groovy.util.logging.Slf4j +import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs +import static org.junit.jupiter.api.Assumptions.assumeFalse +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan +import static org.openkilda.functionaltests.helpers.FlowNameGenerator.FLOW +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER +import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RAW_BYTES +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static spock.util.matcher.HamcrestSupport.expect + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.AbstractExpectedError import org.openkilda.functionaltests.error.flow.FlowNotCreatedWithConflictExpectedError @@ -13,13 +25,17 @@ import org.openkilda.functionaltests.error.switchproperties.SwitchPropertiesNotU import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.FlowHistoryConstants import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.helpers.model.SwitchPair +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.functionaltests.model.stats.FlowStats import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.FlowPathDirection import org.openkilda.model.FlowPathStatus import org.openkilda.model.SwitchId @@ -29,33 +45,22 @@ import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointPayload import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.traffexam.TraffExamService import org.openkilda.testing.service.traffexam.model.Exam import org.openkilda.testing.service.traffexam.model.FlowBidirectionalExam -import org.openkilda.testing.tools.FlowTrafficExamBuilder import org.openkilda.testing.tools.TraffgenStats + +import groovy.transform.AutoClone +import groovy.transform.Memoized +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.See import spock.lang.Shared -import javax.inject.Provider import java.util.regex.Pattern - -import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs -import static org.junit.jupiter.api.Assumptions.assumeFalse -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER -import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RAW_BYTES -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static spock.util.matcher.HamcrestSupport.expect +import javax.inject.Provider @Slf4j @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/flow-traffic-mirroring") @@ -65,11 +70,18 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { @Autowired @Shared Provider traffExamProvider - - @Autowired @Shared + @Autowired + @Shared FlowStats flowStats - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory def setupSpec() { deleteAnyFlowsLeftoversIssue5480() @@ -79,57 +91,47 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { def "Able to CRUD a mirror endpoint on the src switch, mirror to the same switch diff port [#swPair.src.hwSwString, #mirrorDirection]#trafficDisclaimer"() { given: "A flow" assumeTrue(swPair as boolean, "Unable to find a switch pair") - def flow = flowHelperV2.randomFlow(swPair).tap { maximumBandwidth = 100000 } + def flowEntity = flowFactory.getBuilder(swPair).withBandwidth(100000) if (profile == 'virtual') { // ovs switch doesn't support mirroring for the vxlan flows - flow.encapsulationType = FlowEncapsulationType.TRANSIT_VLAN + flowEntity.withEncapsulationType(FlowEncapsulationType.TRANSIT_VLAN) } - flowHelperV2.addFlow(flow) + def flow = flowEntity.build().create() when: "Create a mirror point on src switch, pointing to a different port, random vlan" def mirrorTg = swPair.src.traffGens ?[1] def mirrorPort = mirrorTg?.switchPort ?: (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(mirrorPort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + def mirrorPointPayload = flow.buildMirrorPointPayload( + flow.source.switchId, mirrorPort, randomVlan(), mirrorDirection as FlowPathDirection + ) + flow.createMirrorPointWithPayload(mirrorPointPayload) then: "Mirror status changes to Active" Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlow(flow.flowId).mirrorPointStatuses[0].status == + assert flow.retrieveDetails().mirrorPointStatuses[0].status == FlowPathStatus.ACTIVE.toString().toLowerCase() } and: "Flow history reports a successful mirror creation" - Wrappers.wait(WAIT_OFFSET) { - with(flowHelper.getLatestHistoryEntry(flow.getFlowId())) { - action == FlowHistoryConstants.CREATE_MIRROR_ACTION - it.payload.last().action == FlowHistoryConstants.CREATE_MIRROR_SUCCESS - } - } + flow.waitForHistoryEvent(FlowActionType.CREATE_MIRROR) and: "Mirror endpoint is visible in 'get flows', 'get single flow' and 'get mirror endpoint' APIs" def allFlows = northboundV2.getAllFlows() - def gotFlow = northboundV2.getFlow(flow.flowId) + def gotFlow = flow.retrieveDetails() allFlows.size() == 1 - [allFlows[0], gotFlow].each { - assert it.mirrorPointStatuses.size() == 1 - assert it.mirrorPointStatuses[0].status == FlowPathStatus.ACTIVE.toString().toLowerCase() - assert it.mirrorPointStatuses[0].mirrorPointId == mirrorEndpoint.mirrorPointId + [allFlows[0].mirrorPointStatuses, gotFlow.mirrorPointStatuses].each { mirrorPointsDetails -> + assert mirrorPointsDetails.size() == 1 + assert mirrorPointsDetails[0].status == FlowPathStatus.ACTIVE.toString().toLowerCase() + assert mirrorPointsDetails[0].mirrorPointId == mirrorPointPayload.mirrorPointId } - with(northboundV2.getMirrorPoints(flow.flowId)) { - flowId == flow.flowId + with(flow.retrieveMirrorPoints()) { points.size() == 1 - expect points[0], sameBeanAs(mirrorEndpoint) + expect points[0], sameBeanAs(mirrorPointPayload) } and: "Mirror flow rule has an OF group action and higher prio than flow rule" - def rules = getFlowRules(swPair.src.dpId) + def swRules = switchRulesFactory.get(swPair.src.dpId) + def rules = swRules.getRules() def mirrorRule = findMirrorRule(rules, mirrorDirection) def flowRule = findFlowRule(rules, mirrorDirection) def groupId = mirrorRule.instructions.applyActions.group @@ -143,18 +145,19 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { findMirrorRule(rules, oppositeDirection) == null and: "Related switches and flow pass validation" - switchHelper.synchronizeAndCollectFixedDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + def flowInvolvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(flowInvolvedSwitches).isEmpty() + flow.validateAndCollectDiscrepancies().isEmpty() when: "Traffic briefly runs through the flow" def traffExam = traffExamProvider.get() - def mirrorPortStats = mirrorTg ? new TraffgenStats(traffExam, mirrorTg, [mirrorEndpoint.sinkEndpoint.vlanId]) : null + def mirrorPortStats = mirrorTg ? new TraffgenStats(traffExam, mirrorTg, [mirrorPointPayload.sinkEndpoint.vlanId]) : null if (mirrorPortStats) { cleanupManager.addAction(OTHER, {mirrorPortStats.close()}) } def rxPacketsBefore = mirrorPortStats?.get()?.rxPackets if (!trafficDisclaimer) { - verifyTraffic(traffExam, flow, mirrorDirection) + sendTrafficAndVerifyOnMainFlow(traffExam, flow, mirrorDirection) statsHelper."force kilda to collect stats"() } @@ -175,7 +178,7 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { and: "System collects stat for mirror cookie in tsdb" if (!trafficDisclaimer) { Wrappers.wait(statsRouterRequestInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, mirrorRule.cookie).hasNonZeroValues() + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, mirrorRule.cookie).hasNonZeroValues() } } @@ -186,20 +189,15 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { } when: "Delete the mirror point" - northboundV2.deleteMirrorPoint(flow.flowId, mirrorEndpoint.mirrorPointId) + flow.deleteMirrorPoint(mirrorPointPayload.mirrorPointId) then: "'Mirror point delete' operation is present in flow history" - Wrappers.wait(WAIT_OFFSET) { - with(flowHelper.getLatestHistoryEntry(flow.flowId)) { - action == FlowHistoryConstants.DELETE_MIRROR_ACTION - payload.last().action == FlowHistoryConstants.DELETE_MIRROR_SUCCESS - } - } + flow.waitForHistoryEvent(FlowActionType.DELETE_MIRROR) and: "Mirror point is no longer present in flow and mirror APIs" - assert northboundV2.getFlow(flow.flowId).mirrorPointStatuses.empty + assert flow.retrieveDetails().mirrorPointStatuses.empty northboundV2.getAllFlows()[0].mirrorPointStatuses.empty - northboundV2.getMirrorPoints(flow.flowId).points.empty + flow.retrieveMirrorPoints().points.empty and: "Mirror flow rule is removed and flow rule is intact" def rulesAfterRemove = getFlowRules(swPair.src.dpId) @@ -211,10 +209,10 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { and: "Src switch and flow pass validation" !switchHelper.synchronizeAndCollectFixedDiscrepancies(swPair.src.dpId).isPresent() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Src switch pass validation" !switchHelper.synchronizeAndCollectFixedDiscrepancies(swPair.src.dpId).isPresent() @@ -234,44 +232,40 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { .withAtLeastNTraffgensOnSource(2) .withAtLeastNNonOverlappingPaths(2) .random() - def flow = flowHelperV2.randomFlow(swPair).tap { it.allocateProtectedPath = true } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withProtectedPath(true) + .build().create() when: "Create a mirror point" - def mirrorTg = swPair.src.traffGens[1] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(mirrorTg.switchPort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + def mirrorTg = swPair.src.traffGens.find { it.switchPort != flow.source.portNumber } // try to take TG at the same switch but not used in the flow + assumeTrue(mirrorTg != null) ?: "Could not find a free traffgen port which is not equal to the flow source port" + def mirrorPointPayload = flow.buildMirrorPointPayload( + flow.source.switchId, mirrorTg.switchPort, randomVlan(), mirrorDirection) + flow.createMirrorPointWithPayload(mirrorPointPayload) then: "Mirror point is created and Active" and: "Flow and switch pass validation" !switchHelper.synchronizeAndCollectFixedDiscrepancies(swPair.src.dpId).isPresent() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Swap flow paths" northbound.swapFlowPath(flow.flowId) Wrappers.wait(WAIT_OFFSET) { - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == FlowState.UP } then: "Flow and switch both pass validation" !switchHelper.synchronizeAndCollectFixedDiscrepancies(swPair.src.dpId).isPresent() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "Flow passes main traffic" def traffExam = traffExamProvider.get() - def mirrorPortStats = new TraffgenStats(traffExam, mirrorTg, [mirrorEndpoint.sinkEndpoint.vlanId]) + def mirrorPortStats = new TraffgenStats(traffExam, mirrorTg, [mirrorPointPayload.sinkEndpoint.vlanId]) if (mirrorPortStats) { cleanupManager.addAction(OTHER, {mirrorPortStats.close()}) } def rxPacketsBefore = mirrorPortStats.get().rxPackets - verifyTraffic(traffExam, flow, mirrorDirection) + sendTrafficAndVerifyOnMainFlow(traffExam, flow, mirrorDirection) and: "Flow passes mirrored traffic" mirrorPortStats.get().rxPackets - rxPacketsBefore > 0 @@ -285,35 +279,32 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { def "Can create mirror point on a VXLAN flow [#swPair.src.hwSwString, #mirrorDirection]#trafficDisclaimer"() { given: "A VXLAN flow" assumeTrue(swPair as boolean, "Unable to find required vxlan-enabled switches with traffgens") - def flow = flowHelperV2.randomFlow(swPair).tap { encapsulationType = FlowEncapsulationType.VXLAN } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build().create() when: "Create a mirror point" - def mirrorTg = swPair.src.traffGens[1] + def mirrorTg = swPair.src.traffGens.find { it.switchPort != flow.source.portNumber } // try to take TG at the same switch but not used in the flow + assumeTrue(mirrorTg != null) ?: "Could not find a free traffgen port which is not equal to the flow source port" def mirrorPort = mirrorTg?.switchPort ?: (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(mirrorPort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + def mirrorEpVlan = randomVlan() + flow.createMirrorPoint(flow.source.switchId, mirrorPort, + mirrorEpVlan, mirrorDirection as FlowPathDirection) then: "Mirror point is created and Active" and: "Related switches and flow pass validation" - switchHelper.synchronizeAndCollectFixedDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + def flowInvolvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(flowInvolvedSwitches).isEmpty() + flow.validateAndCollectDiscrepancies().isEmpty() and: "Flow passes traffic on main path as well as to the mirror (if possible to check)" def traffExam = traffExamProvider.get() - def mirrorPortStats = mirrorTg ? new TraffgenStats(traffExam, mirrorTg, [mirrorEndpoint.sinkEndpoint.vlanId]) : null + def mirrorPortStats = mirrorTg ? new TraffgenStats(traffExam, mirrorTg, [mirrorEpVlan]) : null if (mirrorPortStats) { cleanupManager.addAction(OTHER, {mirrorPortStats.close()}) } def rxPacketsBefore = mirrorPortStats?.get()?.rxPackets - verifyTraffic(traffExam, flow, mirrorDirection) + sendTrafficAndVerifyOnMainFlow(traffExam, flow, mirrorDirection) //https://github.com/telstra/open-kilda/issues/5420 if (mirrorTg && !swPair.src.isWb5164()) { assert mirrorPortStats.get().rxPackets - rxPacketsBefore > 0 @@ -331,28 +322,22 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { // ovs switch doesn't support mirroring for the vxlan flows assumeFalse("virtual" == profile && data.encap == FlowEncapsulationType.VXLAN) assumeTrue(swPair as boolean, "Unable to find enough switches for a $data.encap flow") - def flow = flowHelperV2.randomFlow(swPair).tap { encapsulationType = data.encap } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withEncapsulationType(data.encap) + .build().create() def freePort = (topology.getAllowedPortsForSwitch(swPair.dst) - flow.destination.portNumber)[0] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.dst.dpId).portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + flow.createMirrorPoint(flow.destination.switchId, freePort, randomVlan(), mirrorDirection as FlowPathDirection) when: "Call flow sync for the flow" - northbound.synchronizeFlow(flow.flowId) + flow.sync() Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + assert flow.retrieveFlowStatus().status == FlowState.UP } then: "Related switches and flow pass validation" - switchHelper.synchronizeAndCollectFixedDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + def flowInvolvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(flowInvolvedSwitches).isEmpty() + flow.validateAndCollectDiscrepancies().isEmpty() where: [data, mirrorDirection] << [ @@ -374,29 +359,20 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { def "Can create mirror point on unmetered pinned flow, #mirrorDirection"() { given: "An unmetered pinned flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair).tap { - pinned = true - maximumBandwidth = 0 - ignoreBandwidth = true - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withPinned(true) + .withBandwidth(0) + .withIgnoreBandwidth(true) + .build().create() when: "Create a mirror point on src" def freePort = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + flow.createMirrorPoint(flow.source.switchId, freePort, randomVlan(), mirrorDirection) then: "Mirror point is created and Active" and: "Flow and src switch both pass validation" !switchHelper.synchronizeAndCollectFixedDiscrepancies(swPair.src.dpId).isPresent() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() where: mirrorDirection << [FlowPathDirection.FORWARD, FlowPathDirection.REVERSE] @@ -405,26 +381,21 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { @Tags([TOPOLOGY_DEPENDENT]) def "Can create a mirror point on the same port as flow, different vlan [#swPair.dst.hwSwString, #encapType, #mirrorDirection]"() { given: "A flow" - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow).tap { encapsulationType = encapType } + def flow = flowFactory.getBuilder(swPair) + .withEncapsulationType(encapType) + .build().create() when: "Add a mirror point on the same port with flow, different vlan" - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(flow.destination.portNumber) - .vlanId(flow.destination.vlanId - 1) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorPoint) + flow.createMirrorPoint( + flow.destination.switchId, flow.destination.portNumber, + flow.destination.vlanId - 1, mirrorDirection as FlowPathDirection + ) then: "Mirror point is successfully created" - northboundV2.getMirrorPoints(flow.flowId).points.size() == 1 + flow.retrieveMirrorPoints().points.size() == 1 when: "Delete the flow without deleting its mirror point" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Mirror point is also deleted from db" database.getMirrorPoints().empty @@ -456,38 +427,19 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { def "Can create multiple mirror points for the same flow and switch"() { given: "A flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) when: "Add a Forward mirror point on the same port with flow, different vlan" - def mirrorPointFw = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(flow.destination.portNumber) - .vlanId(flow.destination.vlanId - 1) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorPointFw) + flow.createMirrorPoint(flow.destination.switchId, flow.destination.portNumber, flow.destination.vlanId - 1) then: "Mirror point is created" - northboundV2.getMirrorPoints(flow.flowId).points.size() == 1 + flow.retrieveMirrorPoints().points.size() == 1 when: "Add one more Forward mirror point on the same port with flow, different vlan" - def mirrorPointFw2 = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(flow.destination.portNumber) - .vlanId(flow.destination.vlanId - 2) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorPointFw2) + flow.createMirrorPoint(flow.destination.switchId, flow.destination.portNumber, flow.destination.vlanId - 2) then: "Mirror point is created" - northboundV2.getMirrorPoints(flow.flowId).points.size() == 2 + flow.retrieveMirrorPoints().points.size() == 2 and: "Mirrorring group for forward path has 3 buckets (main flow + 2 mirrors)" def fwGroupId = findMirrorRule(getFlowRules(swPair.dst.dpId), FlowPathDirection.FORWARD).instructions.applyActions.group @@ -496,21 +448,15 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { fwMirrorGroup.bucketCounters.size() == 3 when: "Add a Reverse mirror point on the different port with flow" - def mirrorPointRv = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.REVERSE.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(flow.destination.portNumber) - .vlanId(0) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPointRv) + def mirrorPointRvPayload = flow.buildMirrorPointPayload( + flow.destination.switchId, flow.destination.portNumber, 0, FlowPathDirection.REVERSE + ) + flow.createMirrorPointWithPayload(mirrorPointRvPayload) then: "Mirror point is created" Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlow(flow.flowId).mirrorPointStatuses.find { - it.mirrorPointId == mirrorPointRv.mirrorPointId + assert flow.retrieveDetails().mirrorPointStatuses.find { + it.mirrorPointId == mirrorPointRvPayload.mirrorPointId }.status == FlowPathStatus.ACTIVE.toString().toLowerCase() } @@ -523,24 +469,14 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { def "System also updates mirror rule after flow partial update"() { given: "A flow with mirror point" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) def freePort = (topology.getAllowedPortsForSwitch(swPair.dst) - flow.destination.portNumber)[0] - def mirrorPointFw = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(freePort) - .vlanId(flow.destination.vlanId - 1) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorPointFw) + flow.createMirrorPoint(flow.destination.switchId, freePort, flow.destination.vlanId - 1) when: "Update flow port and vlan on the same endpoint where mirror is" def newFlowPort = (topology.getAllowedPortsForSwitch(swPair.dst) - flow.destination.portNumber - freePort)[0] def newFlowVlan = flow.destination.vlanId - 2 - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + flow.partialUpdate(new FlowPatchV2().tap { destination = new FlowPatchEndpoint().tap { portNumber = newFlowPort vlanId = newFlowVlan @@ -549,7 +485,7 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { then: "Flow and affected switch are valid" !switchHelper.synchronizeAndCollectFixedDiscrepancies(swPair.dst.dpId).isPresent() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "Mirror rule has updated port/vlan values" def mirrorRule = findMirrorRule(getFlowRules(swPair.dst.dpId), FlowPathDirection.FORWARD) @@ -569,34 +505,31 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { def swPair = switchPairs.all() .withTraffgensOnBothEnds() .withAtLeastNTraffgensOnSource(2).random() - def flow = flowHelperV2.randomFlow(swPair).tap { source.vlanId = 0 } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withSourceVlan(0) + .build().create() when: "Create a mirror point" - def mirrorTg = swPair.src.traffGens[1] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(mirrorTg.switchPort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + def mirrorTg = swPair.src.traffGens.find { it.switchPort != flow.source.portNumber } // try to take TG at the same switch but not used in the flow + assumeTrue(mirrorTg != null) ?: "Could not find a free traffgen port which is not equal to the flow source port" + def mirrorVlanId = randomVlan() + flow.createMirrorPoint( + flow.source.switchId, mirrorTg.switchPort, mirrorVlanId, mirrorDirection) then: "Mirror point is created and Active" and: "Related switches and flow pass validation" - switchHelper.synchronizeAndCollectFixedDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + def flowInvolvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(flowInvolvedSwitches).isEmpty() + flow.validateAndCollectDiscrepancies().isEmpty() and: "Flow passes traffic on main path as well as to the mirror" def traffExam = traffExamProvider.get() - def mirrorPortStats = new TraffgenStats(traffExam, mirrorTg, [mirrorEndpoint.sinkEndpoint.vlanId]) + def mirrorPortStats = new TraffgenStats(traffExam, mirrorTg, [mirrorVlanId]) if (mirrorPortStats) { cleanupManager.addAction(OTHER, {mirrorPortStats.close()}) } def rxPacketsBefore = mirrorPortStats.get().rxPackets - verifyTraffic(traffExam, flow, mirrorDirection) + sendTrafficAndVerifyOnMainFlow(traffExam, flow, mirrorDirection) mirrorPortStats.get().rxPackets - rxPacketsBefore > 0 where: @@ -610,36 +543,30 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { .withTraffgensOnBothEnds() .withAtLeastNTraffgensOnSource(2) .random() - def flow = flowHelperV2.randomFlow(swPair).tap { - source.innerVlanId = 100 - destination.innerVlanId = 200 - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withSourceInnerVlan(100) + .withDestinationInnerVlan(200) + .build().create() when: "Create a mirror point" - def mirrorTg = swPair.src.traffGens[1] - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString().toLowerCase()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(mirrorTg.switchPort) - .vlanId(flowHelperV2.randomVlan([flow.source.vlanId, flow.source.innerVlanId])) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + def mirrorTg = swPair.src.traffGens.find { it.switchPort != flow.source.portNumber } // try to take TG at the same switch but not used in the flow + assumeTrue(mirrorTg != null) ?: "Could not find a free traffgen port which is not equal to the flow source port" + def mirrorVlanId = randomVlan([flow.source.vlanId, flow.source.innerVlanId]) + flow.createMirrorPoint(flow.source.switchId, mirrorTg.switchPort, mirrorVlanId, mirrorDirection) then: "Mirror point is created, flow and switches are valid" - switchHelper.synchronizeAndCollectFixedDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + def flowInvolvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(flowInvolvedSwitches).isEmpty() + flow.validateAndCollectDiscrepancies().isEmpty() and: "Traffic examination reports packets on mirror point" def traffExam = traffExamProvider.get() - def mirrorPortStats = new TraffgenStats(traffExam, mirrorTg, [mirrorEndpoint.sinkEndpoint.vlanId]) + def mirrorPortStats = new TraffgenStats(traffExam, mirrorTg, [mirrorVlanId]) if (mirrorPortStats) { cleanupManager.addAction(OTHER, {mirrorPortStats.close()}) } def rxPacketsBefore = mirrorPortStats.get().rxPackets - verifyTraffic(traffExam, flow, mirrorDirection) + sendTrafficAndVerifyOnMainFlow(traffExam, flow, mirrorDirection) mirrorPortStats.get().rxPackets - rxPacketsBefore > 0 where: @@ -655,20 +582,17 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { involvedSwitches = pathHelper.getInvolvedSwitches(it) involvedSwitches.size() == 3 } - def flow = flowHelperV2.randomFlow(swPair) + // Sometimes a pair has >3 involvedSwitches and the required path cannot be found + assumeTrue(path != null, "Could not find a path with 1 transit switch.") swPair.paths.findAll { it != path }.each { pathHelper.makePathMorePreferable(path, it) } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair).build().create() when: "Try to add a mirror endpoint on the transit switch" def freePort = (topology.getAllowedPortsForSwitch(swPair.dst) - flow.destination.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(data.mirrorPointSwitch(involvedSwitches).dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(data.sinkEndpointSwitch(involvedSwitches).dpId) - .portNumber(freePort).vlanId(flowHelperV2.randomVlan()).build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint) + SwitchId mirrorEpSinkSwitch = data.sinkEndpointSwitch(involvedSwitches).dpId + SwitchId mirrorPointSwitch = data.mirrorPointSwitch(involvedSwitches).dpId + flow.createMirrorPoint(mirrorEpSinkSwitch, freePort, randomVlan(), + FlowPathDirection.FORWARD, mirrorPointSwitch, false) then: "Error is returned, cannot create mirror point on given sw" def error = thrown(HttpClientErrorException) @@ -704,13 +628,15 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { } @Tags([LOW_PRIORITY]) - def "Test possible error scenarios during mirror point creation: [#testData.testName, #testData.mirrorPoint.mirrorPointDirection]"(MirrorErrorTestData testData) { + def "Test possible error scenarios during mirror point creation: [#testData.testName, #testData.mirrorPointDirection]"(MirrorErrorTestData testData) { given: "A flow" - def flow = testData.flow - flowHelperV2.addFlow(flow) + def flow = testData.flow.create() when: "Try adding a mirror point with conflict" - northboundV2.createMirrorPoint(flow.flowId, testData.mirrorPoint) + flow.createMirrorPoint( + testData.mirrorSinkEndpointSwitchId, testData.port, randomVlan(), + testData.mirrorPointDirection, testData.mirrorPointSwitchId, false + ) then: "Error is returned, cannot create mirror point with given params" def error = thrown(HttpClientErrorException) @@ -720,56 +646,40 @@ class MirrorEndpointsSpec extends HealthCheckSpecification { testData << [ new MirrorErrorTestData("Unable to create a mirror endpoint on the src sw and sink back to dst sw", { def swPair = switchPairs.all().random() - it.flow = flowHelperV2.randomFlow(swPair) - def freePort = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - it.mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.src.dpId).portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() + it.flow = flowFactory.getBuilder(swPair).build() + it.port = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] + it.mirrorSinkEndpointSwitchId = flow.source.switchId + it.mirrorPointDirection = FlowPathDirection.FORWARD + it.mirrorPointSwitchId = swPair.dst.dpId it.expectedError = new FlowMirrorPointNotCreatedExpectedError( ~/Invalid sink endpoint switch id: $swPair.src.dpId. In the current implementation, \ the sink switch id cannot differ from the mirror point switch id./) }), new MirrorErrorTestData("Unable to create a mirror point with isl conflict", { def swPair = switchPairs.all().random() - it.flow = flowHelperV2.randomFlow(swPair) - def islPort = topology.getBusyPortsForSwitch(swPair.dst)[0] - it.mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.dst.dpId) - .portNumber(islPort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - it.expectedError = new FlowMirrorPointNotCreatedExpectedError(~/The port $islPort on the switch \ + it.flow = flowFactory.getBuilder(swPair).build() + it.port = topology.getBusyPortsForSwitch(swPair.dst)[0] + it.mirrorSinkEndpointSwitchId = flow.destination.switchId + it.mirrorPointDirection = FlowPathDirection.FORWARD + it.mirrorPointSwitchId = swPair.dst.dpId + it.expectedError = new FlowMirrorPointNotCreatedExpectedError(~/The port $it.port on the switch \ \'$swPair.dst.dpId\' is occupied by an ISL \(destination endpoint collision\)./) }), new MirrorErrorTestData("Unable to create a mirror point with s42Port conflict", { def swPair = switchPairs.all().withDestinationSwitchConnectedToServer42().random() - it.flow = flowHelperV2.randomFlow(swPair) + it.flow = flowFactory.getBuilder(swPair).build() def s42Port = swPair.dst.prop.server42Port - it.mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swPair.dst.dpId) - .portNumber(s42Port) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() + it.port = s42Port + it.mirrorSinkEndpointSwitchId = flow.destination.switchId + it.mirrorPointDirection = FlowPathDirection.FORWARD + it.mirrorPointSwitchId = swPair.dst.dpId it.expectedError = new FlowMirrorPointNotCreatedExpectedError(~/Server 42 port in the switch \ properties for switch \'$swPair.dst.dpId\' is set to \'$s42Port\'. It is not possible to create or update an endpoint \ with these parameters./) }) ].collectMany { - [it.tap { mirrorPoint.mirrorPointDirection = FlowPathDirection.FORWARD.toString() }, - it.clone().tap { mirrorPoint.mirrorPointDirection = FlowPathDirection.REVERSE.toString() }] + [it.tap { mirrorPointDirection = FlowPathDirection.FORWARD }, + it.clone().tap { mirrorPointDirection = FlowPathDirection.REVERSE }] } } @@ -777,27 +687,18 @@ with these parameters./) def "Unable to create a mirror point with existing flow conflict, #mirrorDirection"() { given: "A flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - def otherFlow = flowHelperV2.randomFlow(swPair, false, [flow]) - flowHelperV2.addFlow(flow) - flowHelperV2.addFlow(otherFlow) + def flow = flowFactory.getRandom(swPair) + def otherFlow = flowFactory.getBuilder(swPair, false, flow.occupiedEndpoints()).build().create() when: "Try adding a mirror point that conflicts with other existing flow" - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(otherFlow.source.switchId) - .portNumber(otherFlow.source.portNumber) - .vlanId(otherFlow.source.vlanId) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint) + def mirrorPointPayload = flow.buildMirrorPointPayload( + otherFlow.source.switchId, otherFlow.source.portNumber, otherFlow.source.vlanId, mirrorDirection) + flow.createMirrorPointWithPayload(mirrorPointPayload, false) then: "Error is returned, cannot create mirror point with given params" def error = thrown(HttpClientErrorException) new FlowMirrorPointNotCreatedWithConflictExpectedError( - getEndpointConflictError(mirrorPoint, otherFlow, "source")).matches(error) + getEndpointConflictError(mirrorPointPayload, otherFlow, "source")).matches(error) where: mirrorDirection << [FlowPathDirection.FORWARD, FlowPathDirection.REVERSE] @@ -807,35 +708,30 @@ with these parameters./) def "Unable to create a flow that conflicts with mirror point, #mirrorDirection"() { given: "A flow with mirror point" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) def freePort = (topology.getAllowedPortsForSwitch(swPair.dst) - flow.destination.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint) + def mirrorPointPayload = flow.buildMirrorPointPayload( + flow.destination.switchId, freePort, randomVlan(), mirrorDirection, swPair.dst.dpId) + flow.createMirrorPointWithPayload(mirrorPointPayload) + Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlow(flow.flowId).mirrorPointStatuses[0].status == + assert flow.retrieveDetails().mirrorPointStatuses[0].status == FlowPathStatus.ACTIVE.toString().toLowerCase() } when: "Try adding a flow that conflicts with existing mirror point" - def otherFlow = flowHelperV2.randomFlow(swPair, false, [flow]).tap { - it.destination.portNumber = mirrorPoint.sinkEndpoint.portNumber - it.destination.vlanId = mirrorPoint.sinkEndpoint.vlanId - } - flowHelperV2.addFlow(otherFlow) + def busyEndpoints = [] + busyEndpoints.addAll(flow.occupiedEndpoints()) + def otherFlow = flowFactory.getBuilder(swPair, false, busyEndpoints) + .withDestinationPort(mirrorPointPayload.sinkEndpoint.portNumber) + .withDestinationVlan(mirrorPointPayload.sinkEndpoint.vlanId) + .build() + otherFlow.create() then: "Error is returned, cannot create flow that conflicts with mirror point" def error = thrown(HttpClientErrorException) new FlowNotCreatedWithConflictExpectedError( - getEndpointConflictError(otherFlow.destination, mirrorPoint)).matches(error) + getEndpointConflictError(otherFlow.destination, mirrorPointPayload)).matches(error) where: mirrorDirection << [FlowPathDirection.FORWARD, FlowPathDirection.REVERSE] @@ -845,23 +741,15 @@ with these parameters./) def "Unable to create mirror point with connected devices enabled, #mirrorDirection"() { given: "A flow with connected devices enabled" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair).tap { - source.detectConnectedDevices = new DetectConnectedDevicesV2(true, true) - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withDetectedDevicesOnSrc(true, true) + .build().create() when: "Try to create a mirror for the flow (on the same endpoint)" def freePort = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.source.switchId) - .portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint) + flow.createMirrorPoint( + flow.source.switchId, freePort, randomVlan(), + mirrorDirection, flow.source.switchId, false) then: "Error is returned, cannot create create mirror for a flow with devices" def error = thrown(HttpClientErrorException) @@ -877,26 +765,19 @@ flow mirror point cannot be created this flow/).matches(error) def "Unable to update flow and enable connected devices if mirror is present, #mirrorDirection"() { given: "A flow with a mirror point" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) def freePort = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.source.switchId) - .portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorPoint) + flow.createMirrorPoint( + flow.source.switchId, freePort, randomVlan(), + mirrorDirection, flow.source.switchId, false) when: "Try to partial update the flow and enable connected devices" - northboundV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def flowPatch = new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { detectConnectedDevices = new DetectConnectedDevicesV2(true, true) } - }) + } + flow.partialUpdate(flowPatch) then: "Error is returned, cannot enable devices on a flow with mirror" def error = thrown(HttpClientErrorException) @@ -911,19 +792,9 @@ flow mirror point cannot be created this flow/).matches(error) def "Cannot enable connected devices on switch if mirror is present"() { given: "A flow with a mirror endpoint" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) def freePort = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.source.switchId) - .portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorPoint) + flow.createMirrorPoint(flow.source.switchId, freePort) when: "Try to enable connected devices for switch where mirror is created" def originalProps = switchHelper.getCachedSwProps(swPair.src.dpId) @@ -949,19 +820,10 @@ flow mirror point cannot be created this flow/).matches(error) }) when: "Try to create a mirror on the switch with devices" - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) def freePort = (topology.getAllowedPortsForSwitch(swPair.src) - flow.source.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.REVERSE.toString()) - .mirrorPointSwitchId(swPair.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.source.switchId) - .portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint) + flow.createMirrorPoint(flow.source.switchId, freePort, randomVlan(), + FlowPathDirection.REVERSE, flow.source.switchId, false) then: "Error is returned, cannot create flow that conflicts with mirror point" def error = thrown(HttpClientErrorException) @@ -973,40 +835,31 @@ flow mirror point cannot be created this flow/).matches(error) def "Unable to create a mirror point with existing mirror point conflict, #mirrorDirection"() { given: "A flow with mirror point" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) def freePort = (topology.getAllowedPortsForSwitch(swPair.dst) - flow.destination.portNumber)[0] - def mirrorPoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(swPair.dst.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(flow.destination.switchId) - .portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint) + def mirrorPoint = flow.deepCopy().destination.tap { + it.vlanId = randomVlan() + it.portNumber = freePort + } + def mirrorPointPayload = flow.buildMirrorPointPayload( + mirrorPoint.switchId, mirrorPoint.portNumber, mirrorPoint.vlanId, mirrorDirection) + flow.createMirrorPointWithPayload(mirrorPointPayload) Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlow(flow.flowId).mirrorPointStatuses[0].status == + assert flow.retrieveDetails().mirrorPointStatuses[0].status == FlowPathStatus.ACTIVE.toString().toLowerCase() } when: "Try adding one more mirror point that conflicts with existing mirror point" - def mirrorPoint2 = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(mirrorDirection.toString()) - .mirrorPointSwitchId(mirrorPoint.mirrorPointSwitchId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(mirrorPoint.mirrorPointSwitchId) - .portNumber(mirrorPoint.sinkEndpoint.portNumber) - .vlanId(mirrorPoint.sinkEndpoint.vlanId) - .build()) - .build() - northboundV2.createMirrorPoint(flow.flowId, mirrorPoint2) + def mirrorPoint2 = mirrorPoint.jacksonCopy() + def mirrorPoint2Payload = mirrorPointPayload.jacksonCopy().tap { + it.mirrorPointId = FLOW.generateId() + } + flow.createMirrorPointWithPayload(mirrorPoint2Payload, false) then: "Error is returned, cannot create flow that conflicts with mirror point" def error = thrown(HttpClientErrorException) new FlowMirrorPointNotCreatedWithConflictExpectedError( - getEndpointConflictError(mirrorPoint2.sinkEndpoint, mirrorPoint)).matches(error) + getEndpointConflictError(mirrorPoint2, mirrorPointPayload)).matches(error) where: mirrorDirection << [FlowPathDirection.FORWARD, FlowPathDirection.REVERSE] @@ -1014,8 +867,8 @@ flow mirror point cannot be created this flow/).matches(error) List getFlowRules(SwitchId swId) { - northbound.getSwitchRules(swId).flowEntries - .findAll { new FlowSegmentCookie(it.cookie).direction != FlowPathDirection.UNDEFINED } + def swRules = switchRulesFactory.get(swId).getRules() + swRules.findAll { new FlowSegmentCookie(it.cookie).direction != FlowPathDirection.UNDEFINED } } static FlowEntry findRule(List rules, FlowPathDirection pathDirection, boolean isMirror) { @@ -1033,7 +886,7 @@ flow mirror point cannot be created this flow/).matches(error) findRule(rules, pathDirection, true) } - static Pattern getEndpointConflictError(FlowMirrorPointPayload mirrorEp, FlowRequestV2 existingFlow, String srcOrDst) { + static Pattern getEndpointConflictError(FlowMirrorPointPayload mirrorEp, FlowExtended existingFlow, String srcOrDst) { FlowEndpointV2 flowEndpoint = existingFlow."$srcOrDst" ~/Requested flow \'$mirrorEp.mirrorPointId\' conflicts with existing flow \'$existingFlow.flowId\'. Details: \ requested flow \'$mirrorEp.mirrorPointId\' destination: switchId=\"${mirrorEp.sinkEndpoint.switchId}\"\ @@ -1041,15 +894,6 @@ requested flow \'$mirrorEp.mirrorPointId\' destination: switchId=\"${mirrorEp.si $srcOrDst: switchId=\"${flowEndpoint.switchId}\" port=${flowEndpoint.portNumber} vlanId=${flowEndpoint.vlanId}/ } - static Pattern getEndpointConflictError(FlowRequestV2 reqFlow, FlowMirrorPointPayload existingMirror, String srcOrDst) { - FlowEndpointV2 flowEndpoint = reqFlow."$srcOrDst" - ~/Requested flow \'$reqFlow.flowId\' conflicts with existing flow \'$existingMirror.mirrorPointId\'. Details: \ -requested flow \'$reqFlow.flowId\' $srcOrDst: switchId=\"${flowEndpoint.switchId}\" port=${flowEndpoint.portNumber} \ -vlanId=${flowEndpoint.vlanId}, existing flow \'$existingMirror.mirrorPointId\' \ -destination: switchId=\"${existingMirror.sinkEndpoint.switchId}\" port=${existingMirror.sinkEndpoint.portNumber} \ -vlanId=${existingMirror.sinkEndpoint.vlanId}/ - } - static Pattern getEndpointConflictError(FlowEndpointV2 flowEp, FlowMirrorPointPayload existingMirror) { ~/Requested endpoint \'switchId=\"$flowEp.switchId\" port=$flowEp.portNumber vlanId=$flowEp.vlanId\' conflicts \ with existing flow mirror point \'$existingMirror.mirrorPointId\'./ @@ -1059,9 +903,8 @@ with existing flow mirror point \'$existingMirror.mirrorPointId\'./ direction == FlowPathDirection.FORWARD ? biExam.forward : biExam.reverse } - private void verifyTraffic(TraffExamService traffExam, FlowRequestV2 flow, FlowPathDirection mirrorDirection) { - FlowBidirectionalExam biExam = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flow), 300, 1) + private static void sendTrafficAndVerifyOnMainFlow(TraffExamService traffExam, FlowExtended flow, FlowPathDirection mirrorDirection) { + def biExam = flow.traffExam(traffExam, 300, 1) getExam(biExam, mirrorDirection).with { udp = true bufferLength = 500 //due to MTU size issue on TG after mirror encapsulation @@ -1077,8 +920,10 @@ with existing flow mirror point \'$existingMirror.mirrorPointId\'./ */ @Memoized List getUniqueSwitchPairs(Closure additionalConditions = { true }) { - def unpickedUniqueTgSwitches = topology.activeSwitches.findAll { it.traffGens } - .unique(false) { it.hwSwString } + def allTgSwitches = topology.activeSwitches.findAll { it.traffGens } + //switches that have 2+ traffgens go to the beginning of the list + .sort { a, b -> b.traffGens.size() <=> a.traffGens.size() } + def unpickedUniqueTgSwitches = allTgSwitches.unique(false) { it.hwSwString } def tgPairs = switchPairs.all().getSwitchPairs().findAll { additionalConditions(it) } @@ -1087,8 +932,6 @@ with existing flow mirror point \'$existingMirror.mirrorPointId\'./ while (!unpickedUniqueTgSwitches.empty) { def pairs = tgPairs.findAll { it.src in unpickedUniqueTgSwitches - }.sort(false) { swPair -> //switches that have 2+ traffgens go to the end of the list - swPair.src.traffGens.size() > 1 } if (pairs) { //pick a highest score pair, update list of unpicked switches, re-run @@ -1117,8 +960,11 @@ with existing flow mirror point \'$existingMirror.mirrorPointId\'./ @AutoClone private static class MirrorErrorTestData { String testName - FlowRequestV2 flow - FlowMirrorPointPayload mirrorPoint + FlowExtended flow + Integer port + SwitchId mirrorPointSwitchId + SwitchId mirrorSinkEndpointSwitchId + FlowPathDirection mirrorPointDirection AbstractExpectedError expectedError MirrorErrorTestData(String testName, Closure init) { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MultiRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MultiRerouteSpec.groovy index 69c0e808ea2..4ec117de4f1 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MultiRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/MultiRerouteSpec.groovy @@ -1,36 +1,46 @@ package org.openkilda.functionaltests.spec.flows +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.helpers.Wrappers.wait +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.tools.SoftAssertions +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Shared + import java.util.concurrent.TimeUnit -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.helpers.Wrappers.wait -import static org.openkilda.testing.Constants.WAIT_OFFSET class MultiRerouteSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Tags([ISL_RECOVER_ON_FAIL, ISL_PROPS_DB_RESET]) def "Simultaneous reroute of multiple flows should not oversubscribe any ISLs"() { given: "Many flows on the same path, with alt paths available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(3).first() - List flows = [] + List flows = [] def currentPath = switchPair.paths.first() switchPair.paths.findAll { it != currentPath }.each { pathHelper.makePathMorePreferable(currentPath, it) } 30.times { - def flow = flowHelperV2.randomFlow(switchPair, false, flows) - flow.maximumBandwidth = 10000 - flowHelperV2.addFlow(flow) + // do not use busyEndpoints argument here since the flows are not full-port flows, all flows are tagged + def flow = flowFactory.getBuilder(switchPair, false) + .withBandwidth(10000) + .build().create() flows << flow } //ensure all flows are on the same path flows[1..-1].each { - assert pathHelper.convert(northbound.getFlowPath(it.flowId)) == currentPath + assert it.retrieveAllEntityPaths().getPathNodes() == currentPath } when: "Make another path more preferable" @@ -67,10 +77,10 @@ class MultiRerouteSpec extends HealthCheckSpecification { wait(WAIT_OFFSET * 3) { def assertions = new SoftAssertions() flowsOnPrefPath = flows.findAll { - pathHelper.convert(northbound.getFlowPath(it.flowId)) == prefPath + it.retrieveAllEntityPaths().getPathNodes() == prefPath } flowsOnPrefPath.each { flow -> - assertions.checkSucceeds { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + assertions.checkSucceeds { assert flow.retrieveFlowStatus().status == FlowState.UP } } assertions.checkSucceeds { assert flowsOnPrefPath.size() == halfOfFlows.size() } assertions.verify() @@ -81,8 +91,8 @@ class MultiRerouteSpec extends HealthCheckSpecification { wait(WAIT_OFFSET * 2) { def assertions = new SoftAssertions() restFlows.each { flow -> - assertions.checkSucceeds { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } - assertions.checkSucceeds { assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) != prefPath } + assertions.checkSucceeds { assert flow.retrieveFlowStatus().status == FlowState.UP } + assertions.checkSucceeds { assert flow.retrieveAllEntityPaths().getPathNodes() != prefPath } } assertions.verify() } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PartialUpdateSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PartialUpdateSpec.groovy index 0b3c02b25ac..1f9e34cfc9e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PartialUpdateSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PartialUpdateSpec.groovy @@ -1,41 +1,41 @@ package org.openkilda.functionaltests.spec.flows -import com.fasterxml.jackson.annotation.JsonInclude.Include -import com.fasterxml.jackson.databind.DeserializationFeature -import com.fasterxml.jackson.databind.ObjectMapper -import com.fasterxml.jackson.databind.SerializationFeature +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.model.cookie.CookieBase.CookieType.SERVICE_OR_FLOW_SEGMENT +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.assertj.core.api.Assertions.assertThat +import static org.junit.jupiter.api.Assumptions.assumeTrue + import org.openkilda.functionaltests.HealthCheckSpecification -import org.openkilda.functionaltests.error.flow.FlowNotUpdatedExpectedError -import org.openkilda.functionaltests.error.flow.FlowNotUpdatedWithConflictExpectedError import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.helpers.model.PathComputationStrategy +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowEncapsulationType -import org.openkilda.model.PathComputationStrategy import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.northbound.dto.v1.flows.FlowPatchDto -import org.openkilda.northbound.dto.v1.flows.PingInput import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.model.topology.TopologyDefinition.Isl +import org.openkilda.functionaltests.error.flow.FlowNotUpdatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotUpdatedWithConflictExpectedError + +import com.fasterxml.jackson.annotation.JsonInclude.Include +import com.fasterxml.jackson.databind.DeserializationFeature +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.SerializationFeature +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative import spock.lang.Shared -import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs -import static org.assertj.core.api.Assertions.assertThat -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.model.cookie.CookieBase.CookieType.SERVICE_OR_FLOW_SEGMENT -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static spock.util.matcher.HamcrestSupport.expect - @Narrative(""" Covers PATCH /api/v2/flows/:flowId and PATCH /api/v1/flows/:flowId This API allows to partially update a flow, i.e. update a flow without specifying a full flow payload. @@ -43,38 +43,44 @@ Depending on changed fields flow will be either updated+rerouted or just have it """) class PartialUpdateSpec extends HealthCheckSpecification { + + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + def amountOfFlowRules = 2 def "Able to partially update flow '#data.field' without reinstalling its rules"() { given: "A flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flow.tap{ - pathComputationStrategy = "cost" - maxLatency = 1000 - } - flowHelperV2.addFlow(flow) - def originalCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + def flow = flowFactory.getBuilder(swPair) + .withPathComputationStrategy(PathComputationStrategy.COST) + .withMaxLatency(1000) + .build().create() + + def originalCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag || cookie.type == CookieType.MULTI_TABLE_INGRESS_RULES }*.cookie when: "Request a flow partial update for a #data.field field" def updateRequest = new FlowPatchV2().tap { it."$data.field" = data.newValue } - def response = northboundV2.partialUpdate(flow.flowId, updateRequest) + def response = flow.sendPartialUpdateRequest(updateRequest) then: "Update response reflects the changes" + flow.waitForBeingInState(FlowState.UP) response."$data.field" == data.newValue and: "Changes actually took place" - Wrappers.wait(WAIT_OFFSET) { - def flowInfo = northboundV2.getFlow(flow.flowId) - assert flowInfo.status == FlowState.UP.toString() - assert flowInfo."$data.field" == data.newValue - } + def flowInfo = flow.retrieveDetails() + assert flowInfo.status == FlowState.UP + assert flowInfo."$data.field" == data.newValue and: "Flow rules have not been reinstalled" - assertThat(northbound.getSwitchRules(swPair.src.dpId).flowEntries*.cookie.toArray()).containsAll(originalCookies) + assertThat(switchRulesFactory.get(swPair.src.dpId).getRules()*.cookie.toArray()).containsAll(originalCookies) where: data << [ @@ -117,25 +123,25 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Able to partially update flow #data.field without reinstalling its rules(v1)"() { given: "A flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) - def originalCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + def flow = flowFactory.getRandom(swPair) + def originalCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag || cookie.type == CookieType.MULTI_TABLE_INGRESS_RULES }*.cookie when: "Request a flow partial update for a #data.field field" def updateRequest = new FlowPatchDto().tap { it."$data.field" = data.newValue } - def response = northbound.partialUpdate(flow.flowId, updateRequest) + def response = flow.sendPartialUpdateRequestV1(updateRequest) then: "Update response reflects the changes" + flow.waitForBeingInState(FlowState.UP) response."$data.field" == data.newValue and: "Changes actually took place" - northboundV2.getFlow(flow.flowId)."$data.field" == data.newValue + flow.retrieveDetails()."$data.field" == data.newValue and: "Flow rules have not been reinstalled" - northbound.getSwitchRules(swPair.src.dpId).flowEntries*.cookie.containsAll(originalCookies) + switchRulesFactory.get(swPair.src.dpId).getRules()*.cookie.containsAll(originalCookies) where: data << [ @@ -161,27 +167,27 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Able to partially update flow #data.field which causes a reroute"() { given: "A flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) - def originalCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { !new Cookie(it.cookie).serviceFlag } + def flow = flowFactory.getRandom(swPair) + def originalCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { !new Cookie(it.cookie).serviceFlag } when: "Request a flow partial update for a #data.field field" def newValue = data.getNewValue(flow."$data.field") def updateRequest = new FlowPatchV2().tap { it."$data.field" = newValue } - def response = flowHelperV2.partialUpdate(flow.flowId, updateRequest) + def response = flow.sendPartialUpdateRequest(updateRequest) then: "Update response reflects the changes" + flow.waitForBeingInState(FlowState.UP) response."$data.field" == newValue and: "Changes actually took place" - northboundV2.getFlow(flow.flowId)."$data.field" == newValue + flow.retrieveDetails()."$data.field" == newValue and: "Flow rules have been reinstalled" //system doesn't reinstall shared rule on reroute action - def newCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { !new Cookie(it.cookie).serviceFlag } + def newCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { !new Cookie(it.cookie).serviceFlag } newCookies.find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW } == originalCookies.find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW } - !newCookies.findAll { new Cookie(it.cookie).getType() != CookieType.SHARED_OF_FLOW } + !newCookies.findAll { new Cookie(it.cookie).getType() != CookieType.SHARED_OF_FLOW } .any { it in originalCookies.findAll { new Cookie(it.cookie).getType() != CookieType.SHARED_OF_FLOW } } where: @@ -202,29 +208,29 @@ class PartialUpdateSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withAtLeastNNonOverlappingPaths(2).random() when: "Create 2 not diverse flows going through these switches" - def flow1 = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow1) - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]) - flowHelperV2.addFlow(flow2) - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) + def flow1 = flowFactory.getRandom(switchPair) + def flow1Path = flow1.retrieveAllEntityPaths().getPathNodes() + def flow2 = flowFactory.getRandom(switchPair, false, FlowState.UP, flow1.occupiedEndpoints()) + def flow2Path = flow2.retrieveAllEntityPaths().getPathNodes() then: "Both flows use the same path" flow1Path == flow2Path when: "Update second flow to become diverse with the first flow (partial update)" - flowHelperV2.partialUpdate(flow2.flowId, new FlowPatchV2().tap { diverseFlowId = flow1.flowId }) + def updateRequest = new FlowPatchV2().tap { diverseFlowId = flow1.flowId } + flow2.partialUpdate(updateRequest) then: "Flows use diverse paths" - pathHelper.getInvolvedIsls(flow1.flowId).intersect(pathHelper.getInvolvedIsls(flow2.flowId)).empty + def flow1InvolvedIsls = flow1.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + def flow2InvolvedIsls = flow2.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + flow1InvolvedIsls.intersect(flow2InvolvedIsls).empty } def "Able to do partial update on a single-switch flow"() { given: "A single-switch flow" def swPair = switchPairs.singleSwitch().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) - def originalCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + def flow = flowFactory.getRandom(swPair) + def originalCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag || cookie.type == CookieType.MULTI_TABLE_INGRESS_RULES }*.cookie @@ -232,16 +238,17 @@ class PartialUpdateSpec extends HealthCheckSpecification { when: "Request a flow partial update for a 'priority' field" def newPriority = 777 def updateRequest = new FlowPatchV2().tap { it.priority = newPriority } - def response = northboundV2.partialUpdate(flow.flowId, updateRequest) + def response = flow.sendPartialUpdateRequest(updateRequest) then: "Update response reflects the changes" + flow.waitForBeingInState(FlowState.UP) response.priority == newPriority and: "Changes actually took place" - northboundV2.getFlow(flow.flowId).priority == newPriority + flow.retrieveDetails().priority == newPriority and: "Flow rules have not been reinstalled" - northbound.getSwitchRules(swPair.src.dpId).flowEntries*.cookie.containsAll(originalCookies) + switchRulesFactory.get(swPair.src.dpId).getRules()*.cookie.containsAll(originalCookies) } def "Able to update a flow port and vlan using partial update"() { @@ -252,30 +259,30 @@ class PartialUpdateSpec extends HealthCheckSpecification { def dstSwitch = allSwitches[1] and: "A vlan flow" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch, false) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch, false) when: "Update the flow: port number and vlan id on the src endpoint" def newPortNumber = topology.getAllowedPortsForSwitch(topology.activeSwitches.find { it.dpId == flow.source.switchId }).last() def newVlanId = flow.destination.vlanId + 1 - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { portNumber = newPortNumber vlanId = newVlanId } - }) + } + flow.partialUpdate(updateRequest) then: "Flow is really updated" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.source.portNumber == newPortNumber it.source.vlanId == newVlanId } and: "Flow is valid and pingable" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - with(northbound.pingFlow(flow.flowId, new PingInput())) { + flow.validateAndCollectDiscrepancies().isEmpty() + with(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -296,33 +303,33 @@ class PartialUpdateSpec extends HealthCheckSpecification { //pick a port that is free both on current dst switch and on future updated dst switch def port = topology.getAllowedPortsForSwitch(dstSwitch) .intersect(topology.getAllowedPortsForSwitch(newDstSwitch)).first() - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch, false).tap { - it.destination.portNumber = port - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch, false) + .withDestinationPort(port) + .build().create() when: "Update the flow: switch id on the dst endpoint" - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { destination = new FlowPatchEndpoint().tap { switchId = newDstSwitch.dpId } - }) + } + flow.partialUpdate(updateRequest) then: "Flow is really updated" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.destination.switchId == newDstSwitch.dpId } and: "Flow rules are installed on the new dst switch" Wrappers.wait(RULES_INSTALLATION_TIME) { - assert northbound.getSwitchRules(newDstSwitch.dpId).flowEntries.findAll { def cookie = new Cookie(it.cookie) + assert switchRulesFactory.get(newDstSwitch.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag && cookie.type == SERVICE_OR_FLOW_SEGMENT }.size() == amountOfFlowRules } and: "Flow is valid and pingable" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - with(northbound.pingFlow(flow.flowId, new PingInput())) { + flow.validateAndCollectDiscrepancies().isEmpty() + with(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -336,28 +343,30 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Able to update flow encapsulationType using partial update"() { given: "A flow with a 'transit_vlan' encapsulation" def switchPair = switchPairs.all().neighbouring().withBothSwitchesVxLanEnabled().random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.encapsulationType = FlowEncapsulationType.TRANSIT_VLAN - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.TRANSIT_VLAN) + .build().create() - def originalCookies = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { + def originalCookies = switchRulesFactory.get(switchPair.src.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag && cookie.type == SERVICE_OR_FLOW_SEGMENT } when: "Request a flow partial update for an encapsulationType field(vxlan)" - def newEncapsulationTypeValue = FlowEncapsulationType.VXLAN.toString().toLowerCase() + def newEncapsulationTypeValue = FlowEncapsulationType.VXLAN def updateRequest = new FlowPatchV2().tap { it.encapsulationType = newEncapsulationTypeValue } - def response = flowHelperV2.partialUpdate(flow.flowId, updateRequest) + def response = flow.sendPartialUpdateRequest(updateRequest) then: "Update response reflects the changes" + flow.waitForBeingInState(FlowState.UP) response.encapsulationType == newEncapsulationTypeValue and: "Changes actually took place" - northboundV2.getFlow(flow.flowId).encapsulationType == newEncapsulationTypeValue + flow.retrieveDetails().encapsulationType == newEncapsulationTypeValue and: "Flow rules have been reinstalled" - !northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { def cookie = new Cookie(it.cookie) + !switchRulesFactory.get(switchPair.src.dpId).getRules().findAll { + def cookie = new Cookie(it.cookie) !cookie.serviceFlag && cookie.type == SERVICE_OR_FLOW_SEGMENT }.any { it in originalCookies } } @@ -366,36 +375,36 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Able to update a flow port and vlan for a single-switch flow using partial update"() { given: "An active single-switch flow (different ports)" def sw = topology.activeSwitches.first() - def flow = flowHelperV2.singleSwitchFlow(sw) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(sw, sw) when: "Update the flow: port number and vlanId on the src endpoint" - def flowInfoFromDb = database.getFlow(flow.flowId) + def flowInfoFromDb = flow.retrieveDetailsFromDB() def ingressCookie = flowInfoFromDb.forwardPath.cookie.value def egressCookie = flowInfoFromDb.reversePath.cookie.value def newPortNumber = (topology.getAllowedPortsForSwitch(topology.activeSwitches.find { it.dpId == flow.source.switchId }) - flow.source.portNumber - flow.destination.portNumber).last() def newVlanId = flow.source.vlanId - 1 - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { portNumber = newPortNumber vlanId = newVlanId } - }) + } + flow.partialUpdate(updateRequest) then: "Flow is really updated" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.source.portNumber == newPortNumber it.source.vlanId == newVlanId } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "The ingress/egress rules are really updated" Wrappers.wait(RULES_INSTALLATION_TIME + WAIT_OFFSET) { - def swRules = northbound.getSwitchRules(flow.source.switchId).flowEntries + def swRules = switchRulesFactory.get(flow.source.switchId).getRules() with(swRules.find { it.cookie == ingressCookie }) { it.match.inPort == newPortNumber.toString() it.instructions.applyActions.flowOutput == flow.destination.portNumber.toString() @@ -416,18 +425,17 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Able to update a flow port and vlan for a single-switch single-port flow using partial update"() { given: "An active single-switch single-port flow" def sw = topology.activeSwitches.first() - def flow = flowHelperV2.singleSwitchSinglePortFlow(sw) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(sw, sw) when: "Update the flow: new port number on src+dst and new vlanId on the src endpoint" - def flowInfoFromDb = database.getFlow(flow.flowId) + def flowInfoFromDb = flow.retrieveDetailsFromDB() def ingressCookie = flowInfoFromDb.forwardPath.cookie.value def egressCookie = flowInfoFromDb.reversePath.cookie.value def newPortNumber = (topology.getAllowedPortsForSwitch(topology.activeSwitches.find { it.dpId == flow.source.switchId }) - flow.source.portNumber).last() def newVlanId = flow.source.vlanId - 1 - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { portNumber = newPortNumber vlanId = newVlanId @@ -435,21 +443,22 @@ class PartialUpdateSpec extends HealthCheckSpecification { destination = new FlowPatchEndpoint().tap { portNumber = newPortNumber } - }) + } + flow.partialUpdate(updateRequest) then: "Flow is really updated" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.source.portNumber == newPortNumber it.destination.portNumber == newPortNumber it.source.vlanId == newVlanId } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "The ingress/egress rules are really updated" Wrappers.wait(RULES_INSTALLATION_TIME + WAIT_OFFSET) { - def swRules = northbound.getSwitchRules(flow.source.switchId).flowEntries + def swRules = switchRulesFactory.get(flow.source.switchId).getRules() with(swRules.find { it.cookie == ingressCookie }) { it.match.inPort == newPortNumber.toString() it.instructions.applyActions.flowOutput == "in_port" @@ -470,58 +479,47 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Partial update with empty body does not actually update flow in any way(v1)"() { given: "A flow" def swPair = switchPairs.all().random() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) - def originalCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + def flow = flowFactory.getRandom(swPair) + def originalCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag || cookie.type == CookieType.MULTI_TABLE_INGRESS_RULES }*.cookie when: "Request a flow partial update without specifying any fields" - def flowBeforeUpdate = northboundV2.getFlow(flow.flowId) - northbound.partialUpdate(flow.flowId, new FlowPatchDto()) + def flowBeforeUpdate = flow.retrieveDetails() + flow.partialUpdateV1(new FlowPatchDto()) then: "Flow is left intact" - expect northboundV2.getFlow(flow.flowId), sameBeanAs(flowBeforeUpdate) - .ignoring("lastUpdated") - .ignoring("diverseWith") - .ignoring("latencyLastModifiedTime") - .ignoring("forwardPathLatencyNs") - .ignoring("reversePathLatencyNs") + flow.retrieveDetails().hasTheSamePropertiesAs(flowBeforeUpdate) and: "Flow rules have not been reinstalled" - northbound.getSwitchRules(swPair.src.dpId).flowEntries*.cookie.containsAll(originalCookies) + switchRulesFactory.get(swPair.src.dpId).getRules()*.cookie.containsAll(originalCookies) } def "Partial update with empty body does not actually update flow in any way"() { given: "A flow" def swPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(2).random() - def helperFlow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(helperFlow) - def flow = flowHelperV2.randomFlow(swPair).tap { - pinned = true - periodicPings = true - diverseFlowId = helperFlow.flowId - } - flowHelperV2.addFlow(flow) - def originalCookies = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + def helperFlow = flowFactory.getRandom(swPair) + def flow = flowFactory.getBuilder(swPair) + .withPinned(true) + .withPeriodicPing(true) + .withDiverseFlow(helperFlow.flowId) + .build().create() + + def originalCookies = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { def cookie = new Cookie(it.cookie) !cookie.serviceFlag || cookie.type == CookieType.MULTI_TABLE_INGRESS_RULES }*.cookie when: "Request a flow partial update without specifying any fields" - def flowBeforeUpdate = northboundV2.getFlow(flow.flowId) - northboundV2.partialUpdate(flow.flowId, new FlowPatchV2()) + def flowBeforeUpdate = flow.retrieveDetails() + flow.partialUpdate(new FlowPatchV2()) then: "Flow is left intact" - expect northboundV2.getFlow(flow.flowId), sameBeanAs(flowBeforeUpdate) - .ignoring("lastUpdated") - .ignoring("latencyLastModifiedTime") - .ignoring("forwardPathLatencyNs") - .ignoring("reversePathLatencyNs") + flow.retrieveDetails().hasTheSamePropertiesAs(flowBeforeUpdate) and: "Flow rules have not been reinstalled" - northbound.getSwitchRules(swPair.src.dpId).flowEntries*.cookie.containsAll(originalCookies) + switchRulesFactory.get(swPair.src.dpId).getRules()*.cookie.containsAll(originalCookies) } def "Unable to partial update a flow in case new port is an isl port on a #data.switchType switch"() { @@ -530,13 +528,13 @@ class PartialUpdateSpec extends HealthCheckSpecification { assumeTrue(isl as boolean, "Unable to find required isl") and: "A flow" - def flow = flowHelperV2.randomFlow(isl.srcSwitch, isl.dstSwitch) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(isl.srcSwitch, isl.dstSwitch) when: "Try to edit port to isl port" - northboundV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { it."$data.switchType" = new FlowPatchEndpoint().tap { it.portNumber = isl."$data.port" } - }) + } + flow.partialUpdate(updateRequest) then: "Error is returned" def exc = thrown(HttpClientErrorException) @@ -564,16 +562,16 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Unable to partial update flow when there are conflicting vlans (#data.conflict)"() { given: "Two potential flows" def swPair = switchPairs.all().random() - def flow1 = flowHelperV2.randomFlow(swPair, false) - def flow2 = flowHelperV2.randomFlow(swPair, false, [flow1]) + def flow1 = flowFactory.getBuilder(swPair, false).build() + def flow2 = flowFactory.getBuilder(swPair, false, flow1.occupiedEndpoints()).build() FlowPatchV2 patch = data.getPatch(flow1) when: "Create two flows" - flowHelperV2.addFlow(flow1) - flowHelperV2.addFlow(flow2) + flow1.create() + flow2.create() and: "Try updating the second flow which should conflict with the first one (partial update)" - northboundV2.partialUpdate(flow2.flowId, patch) + flow2.partialUpdate(patch) then: "Error is returned, stating a readable reason of conflict" def error = thrown(HttpClientErrorException) @@ -583,55 +581,55 @@ class PartialUpdateSpec extends HealthCheckSpecification { data <<[ [ conflict: "the same vlans on the same port on src switch", - getPatch: { FlowRequestV2 dominantFlow -> + getPatch: { FlowExtended dominantFlow -> new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { - portNumber = dominantFlow.source.portNumber - vlanId = dominantFlow.source.vlanId - } + portNumber = dominantFlow.source.portNumber + vlanId = dominantFlow.source.vlanId + } } }, - errorDescription: { FlowRequestV2 dominantFlow, FlowRequestV2 flowToConflict, FlowPatchV2 patchDto -> + errorDescription: { FlowExtended dominantFlow, FlowExtended flowToConflict, FlowPatchV2 patchDto -> errorDescription(dominantFlow, "source", flowToConflict, "source", patchDto) } ], [ conflict: "the same vlans on the same port on dst switch", - getPatch: { FlowRequestV2 dominantFlow -> + getPatch: { FlowExtended dominantFlow -> new FlowPatchV2().tap { destination = new FlowPatchEndpoint().tap { - portNumber = dominantFlow.destination.portNumber - vlanId = dominantFlow.destination.vlanId - } + portNumber = dominantFlow.destination.portNumber + vlanId = dominantFlow.destination.vlanId + } } }, - errorDescription: { FlowRequestV2 dominantFlow, FlowRequestV2 flowToConflict, FlowPatchV2 patchDto -> + errorDescription: { FlowExtended dominantFlow, FlowExtended flowToConflict, FlowPatchV2 patchDto -> errorDescription(dominantFlow, "destination", flowToConflict, "destination", patchDto) } ], [ conflict: "no vlan, both flows are on the same port on src switch", - getPatch: { FlowRequestV2 dominantFlow -> + getPatch: { FlowExtended dominantFlow -> dominantFlow.source.vlanId = 0 new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { - portNumber = dominantFlow.source.portNumber - vlanId = dominantFlow.source.vlanId - } + portNumber = dominantFlow.source.portNumber + vlanId = dominantFlow.source.vlanId + } } }, - errorDescription: { FlowRequestV2 dominantFlow, FlowRequestV2 flowToConflict, FlowPatchV2 patchDto -> + errorDescription: { FlowExtended dominantFlow, FlowExtended flowToConflict, FlowPatchV2 patchDto -> errorDescription(dominantFlow, "source", flowToConflict, "source", patchDto) } ], [ conflict: "no vlan, both flows are on the same port on dst switch", - getPatch: { FlowRequestV2 dominantFlow -> + getPatch: { FlowExtended dominantFlow -> dominantFlow.destination.vlanId = 0 new FlowPatchV2().tap { destination = new FlowPatchEndpoint().tap { - portNumber = dominantFlow.destination.portNumber - vlanId = dominantFlow.destination.vlanId - } + portNumber = dominantFlow.destination.portNumber + vlanId = dominantFlow.destination.vlanId + } } }, - errorDescription: { FlowRequestV2 dominantFlow, FlowRequestV2 flowToConflict, FlowPatchV2 patchDto -> + errorDescription: { FlowExtended dominantFlow, FlowExtended flowToConflict, FlowPatchV2 patchDto -> errorDescription(dominantFlow, "destination", flowToConflict, "destination", patchDto) } ] @@ -640,17 +638,17 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Unable to update a flow to have both strict_bandwidth and ignore_bandwidth flags at the same time"() { given: "An existing flow without flag conflicts" - def flow = flowHelperV2.randomFlow(switchPairs.all().random()).tap { - ignoreBandwidth = initialIgnore - strictBandwidth = initialStrict - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPairs.all().random()) + .withIgnoreBandwidth(initialIgnore) + .withStrictBandwidth(initialStrict) + .build().create() when: "Partial update the flow to have strict_bw-ignore_bw conflict" - northboundV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { ignoreBandwidth = updateIgnore strictBandwidth = updateStrict - }) + } + flow.partialUpdate(updateRequest) then: "Bad Request response is returned" def error = thrown(HttpClientErrorException) @@ -668,27 +666,27 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Able to update vlanId via partialUpdate in case vlanId==0 and innerVlanId!=0"() { given: "A default flow" def swPair = switchPairs.all().random() - def defaultFlow = flowHelperV2.randomFlow(swPair).tap { - source.vlanId = 0 - source.innerVlanId = 0 - } - flowHelperV2.addFlow(defaultFlow) + def defaultFlow = flowFactory.getBuilder(swPair) + .withSourceVlan(0) + .withSourceInnerVlan(0) + .build().create() when: "Update innerVlanId only via partialUpdate" Integer newSrcInnerVlanId = 234 // a flow will be updated as vlan!=0 and innerVlan==0 - def response = flowHelperV2.partialUpdate(defaultFlow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { innerVlanId = newSrcInnerVlanId } - }) + } + def response = defaultFlow.sendPartialUpdateRequest(updateRequest) then: "Partial update response reflects the changes" response.source.vlanId == newSrcInnerVlanId response.source.innerVlanId == 0 and: "Changes actually took place" - with(northboundV2.getFlow(defaultFlow.flowId)) { + with(defaultFlow.retrieveDetails()) { it.source.vlanId == newSrcInnerVlanId it.source.innerVlanId == defaultFlow.source.vlanId } @@ -696,18 +694,17 @@ class PartialUpdateSpec extends HealthCheckSpecification { def "Unable to partial update flow with maxLatency incorrect value(#description)"() { given: "Two potential flows" - def flow = flowHelperV2.randomFlow(switchPairs.all().random()).tap { - maxLatency = maxLatencyBefore - maxLatencyTier2 = maxLatencyT2Before - - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPairs.all().random()) + .withMaxLatency(maxLatencyBefore) + .withMaxLatencyTier2(maxLatencyT2Before) + .build().create() when: "Partial update the flow " - northboundV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { + def updateRequest = new FlowPatchV2().tap { maxLatency = maxLatencyAfter maxLatencyTier2 = maxLatencyT2After - }) + } + flow.partialUpdate(updateRequest) then: "Bad Request response is returned" def error = thrown(HttpClientErrorException) @@ -725,9 +722,9 @@ class PartialUpdateSpec extends HealthCheckSpecification { } @Shared - def errorDescription = { FlowRequestV2 flow, String endpoint, FlowRequestV2 conflictingFlow, + def errorDescription = { FlowExtended flow, String endpoint, FlowExtended conflictingFlow, String conflictingEndpoint, FlowPatchV2 patch -> - def requestedFlow = jacksonMerge(conflictingFlow, patch) + def requestedFlow = jacksonMerge(conflictingFlow.convertToUpdate(), patch) ~/Requested flow \'$conflictingFlow.flowId\' conflicts with existing flow \'$flow.flowId\'. \ Details: requested flow \'$requestedFlow.flowId\' $conflictingEndpoint: switchId=\"\ ${requestedFlow."$conflictingEndpoint".switchId}\" port=${requestedFlow."$conflictingEndpoint".portNumber}\ diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PinnedFlowSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PinnedFlowSpec.groovy index 6cfdec78b63..4daf7252341 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PinnedFlowSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/PinnedFlowSpec.groovy @@ -1,25 +1,32 @@ package org.openkilda.functionaltests.spec.flows +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotUpdatedExpectedError import org.openkilda.functionaltests.error.PinnedFlowNotReroutedExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.error.MessageError +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.cookie.Cookie import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative +import spock.lang.Shared import java.time.Instant import java.util.concurrent.TimeUnit -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.testing.Constants.WAIT_OFFSET @Narrative("""A new flag of flow that indicates that flow shouldn't be rerouted in case of auto-reroute. - In case of isl down such flow should be marked as DOWN. @@ -28,22 +35,29 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET class PinnedFlowSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + + def "Able to CRUD pinned flow"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.pinned = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withPinned(true).build().create() then: "Pinned flow is created" - def flowInfo = northboundV2.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetails() flowInfo.pinned when: "Update the flow (pinned=false)" - northboundV2.updateFlow(flowInfo.flowId, flowHelperV2.toRequest(flowInfo.tap { it.pinned = false })) + flow.update(flow.deepCopy().tap { it.pinned = false}) then: "The pinned option is disabled" - def newFlowInfo = northboundV2.getFlow(flow.flowId) + def newFlowInfo = flow.retrieveDetails() !newFlowInfo.pinned Instant.parse(flowInfo.lastUpdated) < Instant.parse(newFlowInfo.lastUpdated) } @@ -51,21 +65,21 @@ class PinnedFlowSpec extends HealthCheckSpecification { def "Able to CRUD unmetered one-switch pinned flow"() { when: "Create a flow" def sw = topology.getActiveSwitches().first() - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.maximumBandwidth = 0 - flow.ignoreBandwidth = true - flow.pinned = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw) + .withBandwidth(0) + .withIgnoreBandwidth(true) + .withPinned(true).build().create() then: "Pinned flow is created" - def flowInfo = northboundV2.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetails() flowInfo.pinned when: "Update the flow (pinned=false)" - northboundV2.updateFlow(flowInfo.flowId, flowHelperV2.toRequest(flowInfo.tap { it.pinned = false })) + def flowNotPinned = flow.deepCopy().tap { it.pinned = false} + flow.update(flowNotPinned) then: "The pinned option is disabled" - def newFlowInfo = northboundV2.getFlow(flow.flowId) + def newFlowInfo = flow.retrieveDetails() !newFlowInfo.pinned Instant.parse(flowInfo.lastUpdated) < Instant.parse(newFlowInfo.lastUpdated) } @@ -77,30 +91,32 @@ class PinnedFlowSpec extends HealthCheckSpecification { List> allPaths = database.getPaths(switchPair.src.dpId, switchPair.dst.dpId)*.path def longestPath = allPaths.max { it.size() } allPaths.findAll { it != longestPath }.collect { pathHelper.makePathMorePreferable(longestPath, it) } - def flow = flowHelperV2.randomFlow(switchPair) - flow.pinned = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withPinned(true) + .build().create() - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) + def allEntityPath = flow.retrieveAllEntityPaths() + def currentPath = allEntityPath.getPathNodes() def altPath = switchPair.paths.findAll { it != currentPath }.min { it.size() } - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) + def involvedSwitches = allEntityPath.getInvolvedSwitches() when: "Make alt path more preferable than current path" northbound.deleteLinkProps(northbound.getLinkProps(topology.isls)) switchPair.paths.findAll { it != altPath }.each { pathHelper.makePathMorePreferable(altPath, it) } and: "Init reroute by bringing current path's ISL down one by one" - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def currentIsls = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() def newIsls = pathHelper.getInvolvedIsls(altPath) def islsToBreak = currentIsls.findAll { !newIsls.contains(it) } def cookiesMap = involvedSwitches.collectEntries { sw -> - [sw.dpId, northbound.getSwitchRules(sw.dpId).flowEntries.findAll { + [sw.id, switchRulesFactory.get(sw).getRules().findAll { !new Cookie(it.cookie).serviceFlag }*.cookie] } - def metersMap = involvedSwitches.findAll { it.ofVersion != "OF_12" }.collectEntries { sw -> - [sw.dpId, northbound.getAllMeters(sw.dpId).meterEntries.findAll { + def metersMap = involvedSwitches + .findAll { northbound.getSwitch(it).ofVersion != "OF_12" }.collectEntries { sw -> + [sw.id, northbound.getAllMeters(sw).meterEntries.findAll { it.meterId > MAX_SYSTEM_RULE_METER_ID }*.meterId] } @@ -110,21 +126,23 @@ class PinnedFlowSpec extends HealthCheckSpecification { then: "Flow is not rerouted and marked as DOWN when the first ISL is broken" Wrappers.wait(WAIT_OFFSET) { Wrappers.timedLoop(2) { - assert northboundV2.getFlow(flow.flowId).status == FlowState.DOWN.toString() + assert flow.retrieveFlowStatus().status == FlowState.DOWN //do not check history here. In parallel environment it may be overriden by 'up' event on another island - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + assert flow.retrieveAllEntityPaths().getPathNodes() == currentPath } } islHelper.breakIsls(islsToBreak[1..-1]) and: "Rules and meters are not changed" def cookiesMapAfterReroute = involvedSwitches.collectEntries { sw -> - [sw.dpId, northbound.getSwitchRules(sw.dpId).flowEntries.findAll { + [sw.id, northbound.getSwitchRules(sw).flowEntries.findAll { !new Cookie(it.cookie).serviceFlag }*.cookie] } - def metersMapAfterReroute = involvedSwitches.findAll { it.ofVersion != "OF_12" }.collectEntries { sw -> - [sw.dpId, northbound.getAllMeters(sw.dpId).meterEntries.findAll { + + def metersMapAfterReroute = involvedSwitches.findAll { + northbound.getSwitch(it).ofVersion != "OF_12" }.collectEntries { sw -> + [sw.id, northbound.getAllMeters(sw).meterEntries.findAll { it.meterId > MAX_SYSTEM_RULE_METER_ID }*.meterId] } @@ -137,8 +155,8 @@ class PinnedFlowSpec extends HealthCheckSpecification { TimeUnit.SECONDS.sleep(rerouteDelay) Wrappers.wait(WAIT_OFFSET + discoveryInterval) { islsToBreak[0..-2].each { assert islUtils.getIslInfo(it).get().state == IslChangeType.DISCOVERED } - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert flow.retrieveAllEntityPaths().getPathNodes() == currentPath } and: "Restore the last ISL" @@ -146,47 +164,49 @@ class PinnedFlowSpec extends HealthCheckSpecification { then: "Flow is marked as UP when the last ISL is restored" Wrappers.wait(WAIT_OFFSET * 2) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveAllEntityPaths().getPathNodes() == currentPath } } def "System is not rerouting pinned flow when 'reroute link flows' is called"() { given: "A pinned flow with alt path available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair).tap { it.pinned = true } - flowHelperV2.addFlow(flow) - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) + def flow = flowFactory.getBuilder(switchPair) + .withPinned(true) + .build().create() + def initialPath = flow.retrieveAllEntityPaths() when: "Make another path more preferable" - def newPath = switchPair.paths.find { it != currentPath } + def newPath = switchPair.paths.find { it != initialPath.getPathNodes() } switchPair.paths.findAll { it != newPath }.each { pathHelper.makePathMorePreferable(newPath, it) } and: "Init reroute of all flows that go through pinned flow's isl" - def isl = pathHelper.getInvolvedIsls(currentPath).first() + def isl = initialPath.flowPath.getInvolvedIsls().first() def affectedFlows = northbound.rerouteLinkFlows(isl.srcSwitch.dpId, isl.srcPort, isl.dstSwitch.dpId, isl.dstPort) then: "Flow is not rerouted (but still present in reroute response)" affectedFlows == [flow.flowId] Wrappers.timedLoop(4) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveAllEntityPaths().getPathNodes() == initialPath.getPathNodes() } } def "System returns error if trying to intentionally reroute a pinned flow"() { given: "A pinned flow with alt path available" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair).tap { it.pinned = true } - flowHelperV2.addFlow(flow) - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) + def flow = flowFactory.getBuilder(switchPair) + .withPinned(true) + .build().create() + def currentPath = flow.retrieveAllEntityPaths().getPathNodes() when: "Make another path more preferable" def newPath = switchPair.paths.find { it != currentPath } switchPair.paths.findAll { it != newPath }.each { pathHelper.makePathMorePreferable(newPath, it) } and: "Init manual reroute" - northboundV2.rerouteFlow(flow.flowId) + flow.reroute() then: "Error is returned" def e = thrown(HttpClientErrorException) @@ -196,70 +216,58 @@ class PinnedFlowSpec extends HealthCheckSpecification { def "System doesn't allow to create pinned and protected flow at the same time"() { when: "Try to create pinned and protected flow" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.pinned = true - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withPinned(true) + .build().create() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Flow flags are not valid, unable to process pinned protected flow" + new FlowNotCreatedExpectedError(~/Flow flags are not valid, unable to process pinned protected flow/).matches(exc) } def "System doesn't allow to enable the protected path flag on a pinned flow"() { given: "A pinned flow" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.pinned = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withPinned(true) + .build().create() when: "Update flow: enable the allocateProtectedPath flag(allocateProtectedPath=true)" - northboundV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = true }) + flow.update(flow.tap{ it.allocateProtectedPath = true }) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not update flow" - errorDetails.errorDescription == "Flow flags are not valid, unable to process pinned protected flow" + new FlowNotUpdatedExpectedError(~/Flow flags are not valid, unable to process pinned protected flow/).matches(exc) } @Tags([LOW_PRIORITY]) def "System doesn't allow to create pinned and protected flow at the same time [v1 api]"() { when: "Try to create pinned and protected flow" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelper.randomFlow(switchPair) - flow.pinned = true - flow.allocateProtectedPath = true - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withPinned(true) + .withProtectedPath(true) + .build().create() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Flow flags are not valid, unable to process pinned protected flow" + new FlowNotCreatedExpectedError(~/Flow flags are not valid, unable to process pinned protected flow/).matches(exc) } @Tags([LOW_PRIORITY]) def "System doesn't allow to enable the protected path flag on a pinned flow [v1 api]"() { given: "A pinned flow" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelper.randomFlow(switchPair) - flow.pinned = true - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withPinned(true) + .build().create() when: "Update flow: enable the allocateProtectedPath flag(allocateProtectedPath=true)" - northbound.updateFlow(flow.id, flow.tap { it.allocateProtectedPath = true }) + flow.update(flow.tap { it.allocateProtectedPath = true}) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not update flow" - errorDetails.errorDescription == "Flow flags are not valid, unable to process pinned protected flow" + new FlowNotUpdatedExpectedError(~/Flow flags are not valid, unable to process pinned protected flow/).matches(exc) } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy index eae8eadd2c4..4eec25f35e8 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathSpec.groovy @@ -1,46 +1,58 @@ package org.openkilda.functionaltests.spec.flows -import groovy.util.logging.Slf4j +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE_FAILED +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.model.cookie.CookieBase.CookieType.SERVICE_OR_FLOW_SEGMENT +import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.PROTECTED_PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotCreatedWithMissingPathExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotUpdatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowPathNotSwappedExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.error.MessageError +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowEntityPath +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.FlowStatusHistoryEvent +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.model.stats.Direction +import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.StatusInfo import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.northbound.dto.v2.switches.SwitchPatchDto import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder + +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired +import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException +import spock.lang.Issue import spock.lang.Narrative import spock.lang.See import spock.lang.Shared import javax.inject.Provider -import static groovyx.gpars.GParsPool.withPool -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.model.cookie.CookieBase.CookieType.SERVICE_OR_FLOW_SEGMENT -import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.PROTECTED_PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET - @Slf4j @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/solutions/protected-paths") @Narrative("""Protected path - it is pre-calculated, reserved, and deployed (except ingress rule), @@ -59,9 +71,18 @@ Main and protected paths can't use the same link.""") class ProtectedPathSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + @Autowired @Shared Provider traffExamProvider + public static final Closure REQUIRED_COOKIE = { Long cookie -> !new Cookie(cookie).serviceFlag && new Cookie(cookie).type == SERVICE_OR_FLOW_SEGMENT } @Tags(LOW_PRIORITY) def "Able to create a flow with protected path when maximumBandwidth=#bandwidth, vlan=#vlanId"() { @@ -69,24 +90,27 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() when: "Create flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flow.source.vlanId = vlanId - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0) + .withSourceVlan(vlanId).build() + .create() + then: "Flow is created with protected path" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - flowPathInfo.protectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + !flowPathInfo.flowPath.protectedPath.isPathAbsent() and: "Rules for main and protected paths are created" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } + Wrappers.wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } and: "Validation of flow must be successful" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - } + flow.validateAndCollectDiscrepancies().isEmpty() where: bandwidth | vlanId @@ -102,118 +126,134 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() when: "Create flow without protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = false - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(false).build().create() then: "Flow is created without protected path" - !northbound.getFlowPath(flow.flowId).protectedPath - def flowInfo = northboundV2.getFlow(flow.flowId) - !flowInfo.statusDetails + def flowPathInfo = flow.retrieveAllEntityPaths() + def initialFlowInfo = flow.retrieveDetails() + verifyAll { + !flowPathInfo.flowPath.protectedPath + !initialFlowInfo.statusDetails + !initialFlowInfo.allocateProtectedPath + } + + and: "Source switch passes validation" + def initialSrcValidation = switchHelper.validate(switchPair.src.dpId) + initialSrcValidation.isAsExpected() + + and: "Cookies are created by flow" + HashMap initialAmountOfFlowRules = [switchPair.src.dpId, switchPair.dst.dpId] + .collectEntries {swId -> + def createdCookies = switchRulesFactory.get(swId).getRules() + .findAll { !new Cookie(it.cookie).serviceFlag }*.cookie + + def swProps = switchHelper.getCachedSwProps(swId) + def amountOfServer42Rules = 0 + + if(swProps.server42FlowRtt){ + amountOfServer42Rules +=1 + swId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules + swId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules + } + def amountOfFlowRules = 3 + amountOfServer42Rules + assert createdCookies.size() == amountOfFlowRules + [(swId): amountOfFlowRules] + } when: "Update flow: enable protected path(allocateProtectedPath=true)" - def currentLastUpdate = flowInfo.lastUpdated - flowHelperV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = true }) + def updatedFlow = flow.update(flow.tap { it.allocateProtectedPath = true }) - then: "Protected path is enabled" - def flowPathInfoAfterUpdating = northbound.getFlowPath(flow.flowId) - flowPathInfoAfterUpdating.protectedPath - northboundV2.getFlow(flow.flowId).statusDetails - def flowInfoFromDb = database.getFlow(flow.flowId) - def protectedForwardCookie = flowInfoFromDb.protectedForwardPath.cookie.value - def protectedReverseCookie = flowInfoFromDb.protectedReversePath.cookie.value + then: "Flow has been updated successfully and protected path is enabled" + updatedFlow.statusDetails + initialFlowInfo.lastUpdated < updatedFlow.lastUpdated + + def flowPathInfoAfterUpdating = updatedFlow.retrieveAllEntityPaths() + !flowPathInfoAfterUpdating.flowPath.protectedPath.isPathAbsent() + def protectedPathSwitches = flowPathInfoAfterUpdating.flowPath.protectedPath.forward.getInvolvedSwitches() - currentLastUpdate < northboundV2.getFlow(flow.flowId).lastUpdated + def flowInfoFromDb = flow.retrieveDetailsFromDB() + def protectedFlowCookies = [flowInfoFromDb.protectedForwardPath.cookie.value, flowInfoFromDb.protectedReversePath.cookie.value] and: "Rules for main and protected paths are created" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } + Wrappers.wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + + def cookiesAfterEnablingProtectedPath = flowInvolvedSwitchesWithRules.get(switchPair.src.dpId) + .findAll { !new Cookie(it.cookie).serviceFlag }*.cookie + // initialAmountOfFlowRules was collected for flow without protected path + one for protected path + assert cookiesAfterEnablingProtectedPath.size() == initialAmountOfFlowRules.get(switchPair.src.dpId) + 1 + } + + def srcValidation = switchHelper.validate(switchPair.src.dpId) + srcValidation.isAsExpected() + srcValidation.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() == initialSrcValidation.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() + 1 when: "Update flow: disable protected path(allocateProtectedPath=false)" - def protectedFlowPath = northbound.getFlowPath(flow.flowId).protectedPath.forwardPath - flowHelperV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = false }) + updatedFlow = updatedFlow.update(updatedFlow.tap { it.allocateProtectedPath = false}) - then: "Protected path is disabled" - !northbound.getFlowPath(flow.flowId).protectedPath - !northboundV2.getFlow(flow.flowId).statusDetails + then: "Flow has been updated successfully and protected path is disabled" + !updatedFlow.statusDetails + !updatedFlow.retrieveAllEntityPaths().flowPath.protectedPath and: "Rules for protected path are deleted" Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - protectedFlowPath.each { sw -> - def rules = northbound.getSwitchRules(sw.switchId).flowEntries.findAll { + assert flow.retrieveFlowStatus().status == FlowState.UP + protectedPathSwitches.each { sw -> + def rules = switchRulesFactory.get(sw).getRules().findAll { !new Cookie(it.cookie).serviceFlag } - assert rules.every { it != protectedForwardCookie && it != protectedReverseCookie } + assert rules.findAll { it.cookie in protectedFlowCookies }.isEmpty() } } } @Tags(SMOKE) def "Able to swap main and protected paths manually"() { - given: "A simple flow" + when: "Flow with protected path has been created successfully" def switchPair = switchPairs.all() .nonNeighbouring() .withTraffgensOnBothEnds() .withPathHavingAtLeastNSwitches(4) .random() - def flow = flowHelperV2.randomFlow(switchPair, true) - flow.allocateProtectedPath = false - flowHelperV2.addFlow(flow) - assert !northbound.getFlowPath(flow.flowId).protectedPath - and: "Cookies are created by flow" - def createdCookiesSrcSw = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - }*.cookie - def createdCookiesDstSw = northbound.getSwitchRules(switchPair.dst.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - }*.cookie - def srcSwProps = switchHelper.getCachedSwProps(switchPair.src.dpId) - def amountOfserver42Rules = srcSwProps.server42FlowRtt ? 1 : 0 - def amountOfFlowRulesSrcSw = 3 + amountOfserver42Rules - if (srcSwProps.server42FlowRtt && flow.source.vlanId) { - amountOfFlowRulesSrcSw += 1 - } - assert createdCookiesSrcSw.size() == amountOfFlowRulesSrcSw - def dstSwProps = switchHelper.getCachedSwProps(switchPair.dst.dpId) - def amountOfserver42RulesDstSw = dstSwProps.server42FlowRtt ? 1 : 0 - def amountOfFlowRulesDstSw = 3 + amountOfserver42RulesDstSw - if (dstSwProps.server42FlowRtt && flow.destination.vlanId) { - amountOfFlowRulesDstSw += 1 - } - assert createdCookiesDstSw.size() == amountOfFlowRulesDstSw - - when: "Update flow: enable protected path(allocateProtectedPath=true)" - flowHelperV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = true }) + def flow = flowFactory.getBuilder(switchPair, true) + .withProtectedPath(true).build() + .create() then: "Protected path is enabled" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - flowPathInfo.protectedPath - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - currentPath != currentProtectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + !flowPathInfo.flowPath.protectedPath.isPathAbsent() + + def initialMainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath =flowPathInfo.getPathNodes(Direction.FORWARD, true) + initialMainPath != initialProtectedPath and: "Rules for main and protected paths are created" Wrappers.wait(WAIT_OFFSET) { - flowHelper.verifyRulesOnProtectedFlow(flow.flowId) - def cookiesAfterEnablingProtectedPath = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - }*.cookie - // amountOfFlowRules for main path + one for protected path - assert cookiesAfterEnablingProtectedPath.size() == amountOfFlowRulesSrcSw + 1 + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) } + and: "Number of flow-related cookies has been collected for both source and destination switch" + HashMap initialAmountOfFlowRules = [switchPair.src.dpId, switchPair.dst.dpId] + .collectEntries { + [(it): switchHelper.validate(it).rules.proper.cookie.findAll(REQUIRED_COOKIE).size()] + } as HashMap + and: "No rule discrepancies on every switch of the flow on the main path" - def mainSwitches = pathHelper.getInvolvedSwitches(currentPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(mainSwitches*.getDpId()).isEmpty() + def mainPathSwitches = flowPathInfo.flowPath.path.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(mainPathSwitches).isEmpty() and: "No rule discrepancies on every switch of the flow on the protected path)" - def protectedSwitches = pathHelper.getInvolvedSwitches(currentProtectedPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(protectedSwitches*.getDpId()).isEmpty() + def protectedPathSwitches = flowPathInfo.flowPath.protectedPath.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(protectedPathSwitches).isEmpty() and: "The flow allows traffic(on the main path)" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam(flowHelperV2.toV1(flow), 1000, 5) + def exam = flow.traffExam(traffExam, 1000, 5) withPool { [exam.forward, exam.reverse].eachParallel { direction -> def resources = traffExam.startExam(direction) @@ -225,20 +265,30 @@ class ProtectedPathSpec extends HealthCheckSpecification { when: "Swap flow paths" def srcSwitchCreatedMeterIds = getCreatedMeterIds(switchPair.src.dpId) def dstSwitchCreatedMeterIds = getCreatedMeterIds(switchPair.dst.dpId) - def currentLastUpdate = northboundV2.getFlow(flow.flowId).lastUpdated - northbound.swapFlowPath(flow.flowId) + def flowLastUpdate = flow.retrieveDetails().lastUpdated + + flow.swapFlowPath() then: "Flow paths are swapped" - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } - def flowPathInfoAfterSwapping = northbound.getFlowPath(flow.flowId) - def newCurrentPath = pathHelper.convert(flowPathInfoAfterSwapping) - def newCurrentProtectedPath = pathHelper.convert(flowPathInfoAfterSwapping.protectedPath) - newCurrentPath != currentPath - newCurrentPath == currentProtectedPath - newCurrentProtectedPath != currentProtectedPath - newCurrentProtectedPath == currentPath + flow.waitForBeingInState(FlowState.UP) + def flowPathInfoAfterSwapping = flow.retrieveAllEntityPaths() + def newMainPath = flowPathInfoAfterSwapping.getPathNodes(Direction.FORWARD, false) + def newProtectedPath = flowPathInfoAfterSwapping.getPathNodes(Direction.FORWARD, true) + verifyAll { + assert newMainPath == initialProtectedPath + assert newProtectedPath == initialMainPath + } + flowLastUpdate < flow.retrieveDetails().lastUpdated - currentLastUpdate < northboundV2.getFlow(flow.flowId).lastUpdated + and: "No rule discrepancies when doing flow validation" + flow.validateAndCollectDiscrepancies().isEmpty() + + and: "Rules are updated" + Wrappers.wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfoAfterSwapping.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } and: "New meter is created on the src and dst switches" def newSrcSwitchCreatedMeterIds = getCreatedMeterIds(switchPair.src.dpId) @@ -247,42 +297,30 @@ class ProtectedPathSpec extends HealthCheckSpecification { newSrcSwitchCreatedMeterIds.sort() != srcSwitchCreatedMeterIds.sort() || srcSwitchCreatedMeterIds.empty newDstSwitchCreatedMeterIds.sort() != dstSwitchCreatedMeterIds.sort() || dstSwitchCreatedMeterIds.empty - and: "Rules are updated" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } and: "Old meter is deleted on the src and dst switches" - Wrappers.wait(WAIT_OFFSET) { - [switchPair.src.dpId, switchPair.dst.dpId].each { switchId -> - def switchValidateInfo = switchHelper.validate(switchId) - if(switchValidateInfo.meters) { - assert switchValidateInfo.meters.proper.findAll({dto -> !isDefaultMeter(dto)}).size() == 1 - } - assert switchValidateInfo.rules.proper.findAll { def cookie = new Cookie(it.getCookie()) - !cookie.serviceFlag && cookie.type == SERVICE_OR_FLOW_SEGMENT }.size() == - (switchId == switchPair.src.dpId) ? amountOfFlowRulesSrcSw + 1 : amountOfFlowRulesDstSw + 1 - switchValidateInfo.isAsExpected() + [switchPair.src.dpId, switchPair.dst.dpId].each { switchId -> + def switchValidateInfo = switchHelper.validate(switchId) + if (switchValidateInfo.meters) { + assert switchValidateInfo.meters.proper.findAll({ dto -> !isDefaultMeter(dto) }).size() == 1 } + assert switchValidateInfo.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() == initialAmountOfFlowRules.get(switchId) + assert switchValidateInfo.isAsExpected() } and: "Transit switches store the correct info about rules and meters" - def involvedTransitSwitches = (currentPath[1..-2].switchId + currentProtectedPath[1..-2].switchId).unique() + List involvedTransitSwitches = (initialMainPath[1..-2].switchId + initialProtectedPath[1..-2].switchId).unique() Wrappers.wait(WAIT_OFFSET) { assert switchHelper.validateAndCollectFoundDiscrepancies(involvedTransitSwitches).isEmpty() } - and: "No rule discrepancies when doing flow validation" - northbound.validateFlow(flow.flowId).each { assert it.discrepancies.empty } - - and: "All rules for main and protected paths are updated" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } - and: "No rule discrepancies on every switch of the flow on the main path" - def newMainSwitches = pathHelper.getInvolvedSwitches(newCurrentPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(newMainSwitches*.getDpId()).isEmpty() + def newMainSwitches = flowPathInfoAfterSwapping.flowPath.path.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(newMainSwitches).isEmpty() and: "No rule discrepancies on every switch of the flow on the protected path)" - def newProtectedSwitches = pathHelper.getInvolvedSwitches(newCurrentProtectedPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(newProtectedSwitches*.getDpId()).isEmpty() + def newProtectedSwitches = flowPathInfoAfterSwapping.flowPath.protectedPath.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(newProtectedSwitches).isEmpty() and: "The flow allows traffic(on the protected path)" withPool { @@ -302,32 +340,34 @@ class ProtectedPathSpec extends HealthCheckSpecification { def uniquePathCount = switchPair.paths.unique(false) { a, b -> a.intersect(b) == [] ? 1 : 0 }.size() when: "Create 5 flows with protected paths" - List flows = [] + List flows = [] + List busyEndpoints = [] 5.times { - flows << flowHelperV2.randomFlow(switchPair, false, flows).tap { - maximumBandwidth = bandwidth - ignoreBandwidth = bandwidth == 0 - allocateProtectedPath = true - } + def flow = flowFactory.getBuilder(switchPair, false, busyEndpoints) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0) + .withProtectedPath(true).build() + .create() + busyEndpoints.addAll(flow.occupiedEndpoints()) + flows << flow } - flows.each { flowHelperV2.addFlow(it) } then: "Flows are created with protected path" - def flowPathsInfo = flows.collect { northbound.getFlowPath(it.flowId) } - flowPathsInfo.each { assert it.protectedPath } + def flowPathsInfo = flows.collect {it.retrieveAllEntityPaths() } + flowPathsInfo.each { assert !it.flowPath.protectedPath.isPathAbsent()} and: "Current paths are not equal to protected paths" - def currentPath = pathHelper.convert(flowPathsInfo[0]) - def currentProtectedPath = pathHelper.convert(flowPathsInfo[0].protectedPath) - currentPath != currentProtectedPath + def firstFlowMainPath = flowPathsInfo.first().getPathNodes(Direction.FORWARD, false) + def firstFlowProtectedPath = flowPathsInfo.first().getPathNodes(Direction.FORWARD, true) + firstFlowMainPath != firstFlowProtectedPath //check that all other flows use the same paths, so above verification applies to all of them flowPathsInfo.each { flowPathInfo -> - assert pathHelper.convert(flowPathInfo) == currentPath - assert pathHelper.convert(flowPathInfo.protectedPath) == currentProtectedPath + assert flowPathInfo.getPathNodes(Direction.FORWARD, false) == firstFlowMainPath + assert flowPathInfo.getPathNodes(Direction.FORWARD, true) == firstFlowProtectedPath } and: "Bandwidth is reserved for protected paths on involved ISLs" - def protectedIsls = pathHelper.getInvolvedIsls(currentPath) + def protectedIsls = flowPathsInfo.first().flowPath.getProtectedPathInvolvedIsls() def protectedIslsInfo = protectedIsls.collect { islUtils.getIslInfo(it).get() } initialIsls.each { initialIsl -> protectedIslsInfo.each { currentIsl -> @@ -338,19 +378,20 @@ class ProtectedPathSpec extends HealthCheckSpecification { } when: "Break ISL on the main path (bring port down) to init auto swap" - def islToBreak = pathHelper.getInvolvedIsls(currentPath)[0] + def islToBreak = flowPathsInfo.first().flowPath.getMainPathInvolvedIsls().first() islHelper.breakIsl(islToBreak) then: "Flows are switched to protected paths" Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { flows.each { flow -> - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - def flowPathInfoAfterRerouting = northbound.getFlowPath(flow.flowId) + assert flow.retrieveFlowStatus().status == FlowState.UP + def flowPathInfoAfterRerouting = flow.retrieveAllEntityPaths() - assert pathHelper.convert(flowPathInfoAfterRerouting) == currentProtectedPath + assert flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, false) == firstFlowProtectedPath if (4 <= uniquePathCount) { - assert pathHelper.convert(flowPathInfoAfterRerouting.protectedPath) != currentPath - assert pathHelper.convert(flowPathInfoAfterRerouting.protectedPath) != currentProtectedPath + // protected path is recalculated due to the main path broken ISl + assert flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, true) != firstFlowMainPath + assert flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, true) != firstFlowProtectedPath } } } @@ -359,7 +400,10 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.restoreIsl(islToBreak) then: "Path of the flow is not changed" - flows.each { assert pathHelper.convert(northbound.getFlowPath(it.flowId)) == currentProtectedPath } + flows.each { flow -> + flow.waitForBeingInState(FlowState.UP) + assert flow.retrieveAllEntityPaths().getPathNodes(Direction.FORWARD, false) == firstFlowProtectedPath + } where: flowDescription | bandwidth @@ -373,36 +417,34 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withAtLeastNNonOverlappingPaths(2).random() when: "Create flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair).tap { allocateProtectedPath = true } - flowHelperV2.addFlow(flow) - def path = northbound.getFlowPath(flow.flowId) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() + def initialFlowPath = flow.retrieveAllEntityPaths() and: "Other paths have not enough bandwidth to host the flow in case of reroute" - def originalMainPath = pathHelper.convert(path) - def originalProtectedPath = pathHelper.convert(path.protectedPath) - def usedIsls = pathHelper.getInvolvedIsls(originalMainPath) + - pathHelper.getInvolvedIsls(originalProtectedPath) + def originalMainPath = initialFlowPath.getPathNodes(Direction.FORWARD, false) + def originalProtectedPath = initialFlowPath.getPathNodes(Direction.FORWARD, true) + def usedIsls = initialFlowPath.flowPath.getInvolvedIsls() def otherIsls = switchPair.paths.findAll { it != originalMainPath && it != originalProtectedPath }.collectMany { pathHelper.getInvolvedIsls(it) } .findAll { !usedIsls.contains(it) && !usedIsls.contains(it.reversed) } .unique { a, b -> a == b || a == b.reversed ? 0 : 1 } - otherIsls.collectMany{[it, it.reversed]}.each { - database.updateIslMaxBandwidth(it, flow.maximumBandwidth - 1) - islHelper.setAvailableBandwidth(it, flow.maximumBandwidth - 1) + otherIsls.collectMany{[it, it.reversed]}.each { Isl isl -> + database.updateIslMaxBandwidth(isl, flow.maximumBandwidth - 1) + islHelper.setAvailableBandwidth(isl, flow.maximumBandwidth - 1) } and: "Main flow path breaks" - def mainIsl = pathHelper.getInvolvedIsls(path).first() + def mainIsl = initialFlowPath.flowPath.getMainPathInvolvedIsls().first() islHelper.breakIsl(mainIsl) then: "Main path swaps to protected, flow becomes degraded, main path UP, protected DOWN" Wrappers.wait(WAIT_OFFSET) { - def newPath = northbound.getFlowPath(flow.flowId) - assert pathHelper.convert(newPath) == pathHelper.convert(path.protectedPath) - verifyAll(northbound.getFlow(flow.flowId)) { - status == FlowState.DEGRADED.toString() - flowStatusDetails.mainFlowPathStatus == "Up" - flowStatusDetails.protectedFlowPathStatus == "Down" + def newPath = flow.retrieveAllEntityPaths() + assert newPath.getPathNodes(Direction.FORWARD, false) == originalProtectedPath + verifyAll(flow.retrieveDetails()) { + status == FlowState.DEGRADED + statusDetails.mainPath == "Up" + statusDetails.protectedPath == "Down" statusInfo == StatusInfo.OVERLAPPING_PROTECTED_PATH } } @@ -411,13 +453,13 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.restoreIsl(mainIsl) then: "Main path remains the same, flow becomes UP, main path UP, protected UP" - Wrappers.wait(WAIT_OFFSET) { - def newPath = northbound.getFlowPath(flow.flowId) - assert pathHelper.convert(newPath) == pathHelper.convert(path.protectedPath) - verifyAll(northbound.getFlow(flow.flowId)) { - status == FlowState.UP.toString() - flowStatusDetails.mainFlowPathStatus == "Up" - flowStatusDetails.protectedFlowPathStatus == "Up" + Wrappers.wait(WAIT_OFFSET * 2) { + def newPath = flow.retrieveAllEntityPaths() + assert newPath.getPathNodes(Direction.FORWARD, false) == originalProtectedPath + verifyAll(flow.retrieveDetails()) { + status == FlowState.UP + statusDetails.mainPath == "Up" + statusDetails.protectedPath == "Up" } } } @@ -428,13 +470,13 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withAtLeastNNonOverlappingPaths(2).random() when: "Create flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair).tap { allocateProtectedPath = true } - flowHelperV2.addFlow(flow) - def path = northbound.getFlowPath(flow.flowId) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() + + def initialFlowPath = flow.retrieveAllEntityPaths() and: "Other paths are not available (ISLs are down)" - def originalMainPath = pathHelper.convert(path) - def originalProtectedPath = pathHelper.convert(path.protectedPath) + def originalMainPath = initialFlowPath.getPathNodes(Direction.FORWARD, false) + def originalProtectedPath = initialFlowPath.getPathNodes(Direction.FORWARD, true) def usedIsls = pathHelper.getInvolvedIsls(originalMainPath) + pathHelper.getInvolvedIsls(originalProtectedPath) def otherIsls = switchPair.paths.findAll { it != originalMainPath && it != originalProtectedPath }.collectMany { pathHelper.getInvolvedIsls(it) } @@ -443,17 +485,17 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.breakIsls(otherIsls) and: "Main flow path breaks" - def mainIsl = pathHelper.getInvolvedIsls(path).first() + def mainIsl = initialFlowPath.flowPath.getMainPathInvolvedIsls().first() islHelper.breakIsl(mainIsl) then: "Main path swaps to protected, flow becomes degraded, main path UP, protected DOWN" Wrappers.wait(WAIT_OFFSET) { - def newPath = northbound.getFlowPath(flow.flowId) - assert pathHelper.convert(newPath) == originalProtectedPath - verifyAll(northbound.getFlow(flow.flowId)) { - status == FlowState.DEGRADED.toString() - flowStatusDetails.mainFlowPathStatus == "Up" - flowStatusDetails.protectedFlowPathStatus == "Down" + def newPath = flow.retrieveAllEntityPaths() + assert newPath.getPathNodes(Direction.FORWARD, false) == originalProtectedPath + verifyAll(flow.retrieveDetails()) { + status == FlowState.DEGRADED + statusDetails.mainPath == "Up" + statusDetails.protectedPath == "Down" statusInfo == StatusInfo.OVERLAPPING_PROTECTED_PATH } } @@ -463,13 +505,13 @@ class ProtectedPathSpec extends HealthCheckSpecification { then: "Main path remains the same (no swap), flow becomes UP, main path remains UP, protected path becomes UP" Wrappers.wait(WAIT_OFFSET) { - def newPath = northbound.getFlowPath(flow.flowId) - assert pathHelper.convert(newPath) == originalProtectedPath - assert pathHelper.convert(newPath.protectedPath) == originalMainPath - verifyAll(northbound.getFlow(flow.flowId)) { - status == FlowState.UP.toString() - flowStatusDetails.mainFlowPathStatus == "Up" - flowStatusDetails.protectedFlowPathStatus == "Up" + def newPath = flow.retrieveAllEntityPaths() + assert newPath.getPathNodes(Direction.FORWARD, false) == originalProtectedPath + assert newPath.getPathNodes(Direction.FORWARD, true) == originalMainPath + verifyAll(flow.retrieveDetails()) { + status == FlowState.UP + statusDetails.mainPath == "Up" + statusDetails.protectedPath == "Up" } } } @@ -481,43 +523,43 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(4).random() and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0) + .withProtectedPath(true).build() + .create() - def flowPathInfo = northbound.getFlowPath(flow.flowId) - assert flowPathInfo.protectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - assert currentPath != currentProtectedPath + def initialMainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + assert initialMainPath != initialProtectedPath when: "Make the current and protected path less preferable than alternatives" - def alternativePaths = switchPair.paths.findAll { it != currentPath && it != currentProtectedPath } - alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } - alternativePaths.each { pathHelper.makePathMorePreferable(it, currentProtectedPath) } + def alternativePaths = switchPair.paths.findAll { it != initialMainPath && it != initialProtectedPath } + alternativePaths.each { pathHelper.makePathMorePreferable(it, initialMainPath) } + alternativePaths.each { pathHelper.makePathMorePreferable(it, initialProtectedPath) } and: "Init intentional reroute" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() then: "Flow is rerouted" rerouteResponse.rerouted Wrappers.wait(WAIT_OFFSET) { - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == FlowState.UP } and: "Path is not changed to protected path" - def flowPathInfoAfterRerouting = northbound.getFlowPath(flow.flowId) - def newCurrentPath = pathHelper.convert(flowPathInfoAfterRerouting) - newCurrentPath != currentPath - newCurrentPath != currentProtectedPath + def flowPathInfoAfterRerouting = flow.retrieveAllEntityPaths() + def mainPath = flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, false) + mainPath != initialMainPath + mainPath != initialProtectedPath //protected path is rerouted too, because more preferable path is exist - def newCurrentProtectedPath = pathHelper.convert(flowPathInfoAfterRerouting.protectedPath) - newCurrentProtectedPath != currentPath - newCurrentProtectedPath != currentProtectedPath - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + def newCurrentProtectedPath = flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, true) + newCurrentProtectedPath != initialMainPath + newCurrentProtectedPath != initialProtectedPath + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } where: flowDescription | bandwidth @@ -534,40 +576,40 @@ class ProtectedPathSpec extends HealthCheckSpecification { def uniquePathCount = switchPair.paths.unique(false) { a, b -> a.intersect(b) == [] ? 1 : 0 }.size() and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0) + .withProtectedPath(true).build() + .create() - def flowPathInfo = northbound.getFlowPath(flow.flowId) - assert flowPathInfo.protectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - assert currentPath != currentProtectedPath + def initialMainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + assert initialMainPath != initialProtectedPath when: "Make the current and protected path less preferable than alternatives" - def alternativePaths = switchPair.paths.findAll { it != currentPath && it != currentProtectedPath } - alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } - alternativePaths.each { pathHelper.makePathMorePreferable(it, currentProtectedPath) } + def alternativePaths = switchPair.paths.findAll { it != initialMainPath && it != initialProtectedPath } + alternativePaths.each { pathHelper.makePathMorePreferable(it, initialMainPath) } + alternativePaths.each { pathHelper.makePathMorePreferable(it, initialProtectedPath) } and: "Break ISL on the main path (bring port down) to init auto swap" - def islToBreak = pathHelper.getInvolvedIsls(currentPath)[0] + def islToBreak = flowPathInfo.flowPath.getMainPathInvolvedIsls().first() islHelper.breakIsl(islToBreak) then: "Flow is switched to protected path" Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - def newPathInfo = northbound.getFlowPath(flow.flowId) - def newCurrentPath = pathHelper.convert(newPathInfo) - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert newCurrentPath != currentPath + def newPathInfo = flow.retrieveAllEntityPaths() + def newMainPath = newPathInfo.getPathNodes(Direction.FORWARD, false) + assert flow.retrieveFlowStatus().status == FlowState.UP + assert newMainPath != initialMainPath + assert newMainPath == initialProtectedPath - def newCurrentProtectedPath = pathHelper.convert(newPathInfo.protectedPath) - assert newCurrentPath == currentProtectedPath + def newCurrentProtectedPath = newPathInfo.getPathNodes(Direction.FORWARD, true) if (4 <= uniquePathCount) { - assert newCurrentProtectedPath != currentPath - assert newCurrentProtectedPath != currentProtectedPath + assert newCurrentProtectedPath != initialMainPath + assert newCurrentProtectedPath != initialProtectedPath } } @@ -575,7 +617,7 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.restoreIsl(islToBreak) then: "Path of the flow is not changed" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentProtectedPath + flow.retrieveAllEntityPaths().getPathNodes(Direction.FORWARD, false) == initialProtectedPath where: flowDescription | bandwidth @@ -588,24 +630,25 @@ class ProtectedPathSpec extends HealthCheckSpecification { def isls = topology.getIslsForActiveSwitches() def (srcSwitch, dstSwitch) = [isls.first().srcSwitch, isls.first().dstSwitch] - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withProtectedPath(true).build() + .create() - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - assert flowPathInfo.protectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() + + def initialMainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) when: "Init intentional reroute" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() then: "Flow is not rerouted" !rerouteResponse.rerouted - def newFlowPathInfo = northbound.getFlowPath(flow.flowId) - pathHelper.convert(newFlowPathInfo) == currentPath - pathHelper.convert(newFlowPathInfo.protectedPath) == currentProtectedPath + def newFlowPathInfo = flow.retrieveAllEntityPaths() + newFlowPathInfo.getPathNodes(Direction.FORWARD, false) == initialMainPath + newFlowPathInfo.getPathNodes(Direction.FORWARD, true) == initialProtectedPath } @Tags([LOW_PRIORITY, ISL_PROPS_DB_RESET]) @@ -619,22 +662,21 @@ class ProtectedPathSpec extends HealthCheckSpecification { isls[1..-1].each { islHelper.setAvailableBandwidth(it, 90) } when: "Create flow without protected path" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = bandwidth - flow.allocateProtectedPath = false - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(bandwidth) + .withProtectedPath(false).build() + .create() then: "Flow is created without protected path" - !northbound.getFlowPath(flow.flowId).protectedPath + !flow.retrieveAllEntityPaths().flowPath.protectedPath when: "Update flow: enable protected path" - northboundV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = true }) + flow.update(flow.tap { it.allocateProtectedPath = true }, FlowState.DEGRADED) then: "Flow state is changed to DEGRADED" - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.flowId).status == FlowState.DEGRADED } - verifyAll(northbound.getFlow(flow.flowId).flowStatusDetails) { - mainFlowPathStatus == "Up" - protectedFlowPathStatus == "Down" + verifyAll(flow.retrieveDetails().statusDetails) { + mainPath == "Up" + protectedPath == "Down" } } @@ -649,17 +691,18 @@ class ProtectedPathSpec extends HealthCheckSpecification { isls[1..-1].each { islHelper.setAvailableBandwidth(it, 90) } when: "Create flow with protected path" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = bandwidth - flow.allocateProtectedPath = true - flow.ignoreBandwidth = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(true) + .withProtectedPath(true).build() + .create() + then: "Flow is created with protected path" - northbound.getFlowPath(flow.flowId).protectedPath + !flow.retrieveAllEntityPaths().flowPath.protectedPath.isPathAbsent() and: "One transit vlan is created for main and protected paths" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() database.getTransitVlans(flowInfo.forwardPathId, flowInfo.reversePathId).size() == 1 database.getTransitVlans(flowInfo.protectedForwardPathId, flowInfo.protectedReversePathId).size() == 1 } @@ -671,19 +714,17 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(3).random() when: "Create a flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() then: "Flow is created with protected path" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - flowPathInfo.protectedPath + def initialFlowPathInfo = flow.retrieveAllEntityPaths() + !initialFlowPathInfo.flowPath.protectedPath.isPathAbsent() - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - currentPath != currentProtectedPath + def initialMainPath = initialFlowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath = initialFlowPathInfo.getPathNodes(Direction.FORWARD, true) + initialMainPath != initialProtectedPath - def protectedIsls = pathHelper.getInvolvedIsls(currentProtectedPath) + def protectedIsls = initialFlowPathInfo.flowPath.getProtectedPathInvolvedIsls() def protectedIslsInfo = protectedIsls.collect { islUtils.getIslInfo(it).get() } allIsls.each { isl -> @@ -699,20 +740,20 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.breakIsl(islToBreakProtectedPath) then: "Protected path is recalculated" - def newProtectedPath + FlowEntityPath newFlowPathInfo Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - newProtectedPath = pathHelper.convert(northbound.getFlowPath(flow.flowId).protectedPath) - assert newProtectedPath != currentProtectedPath - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + newFlowPathInfo = flow.retrieveAllEntityPaths() + assert newFlowPathInfo.getPathNodes(Direction.FORWARD, true) != initialProtectedPath + assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Current path is not changed" - currentPath == pathHelper.convert(northbound.getFlowPath(flow.flowId)) + initialMainPath == newFlowPathInfo.getPathNodes(Direction.FORWARD, false) and: "Bandwidth is reserved for new protected path on involved ISLs" def allLinks Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - def newProtectedIsls = pathHelper.getInvolvedIsls(newProtectedPath) + def newProtectedIsls = newFlowPathInfo.flowPath.getProtectedPathInvolvedIsls() allLinks = northbound.getAllLinks() def newProtectedIslsInfo = newProtectedIsls.collect { islUtils.getIslInfo(allLinks, it).get() } @@ -738,7 +779,7 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.restoreIsl(islToBreakProtectedPath) then: "Path is not recalculated again" - pathHelper.convert(northbound.getFlowPath(flow.flowId).protectedPath) == newProtectedPath + flow.retrieveAllEntityPaths().getPathNodes(Direction.FORWARD, true) == newFlowPathInfo.getPathNodes(Direction.FORWARD, true) } @Tags(ISL_RECOVER_ON_FAIL) @@ -749,26 +790,26 @@ class ProtectedPathSpec extends HealthCheckSpecification { and: "A flow without protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = false - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(false) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0).build() + .create() + def initialFlowPathInfo = flow.retrieveAllEntityPaths() and: "All alternative paths are unavailable (bring ports down on the source switch)" - def flowPathPortOnSourceSwitch = pathHelper.convert(northbound.getFlowPath(flow.flowId)).first().portNo + def flowPathPortOnSourceSwitch = initialFlowPathInfo.flowPath.getMainPathInvolvedIsls().first().srcPort def broughtDownIsls = topology.getRelatedIsls(switchPair.getSrc()) .findAll{it.srcSwitch == switchPair.getSrc() && it.srcPort != flowPathPortOnSourceSwitch } islHelper.breakIsls(broughtDownIsls) when: "Update flow: enable protected path(allocateProtectedPath=true)" - northboundV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = true }) + flow.update(flow.tap { it.allocateProtectedPath = true }, FlowState.DEGRADED) - then: "Flow state is changed to DEGRADED" - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.flowId).status == FlowState.DEGRADED } - verifyAll(northbound.getFlow(flow.flowId)) { - flowStatusDetails.mainFlowPathStatus == "Up" - flowStatusDetails.protectedFlowPathStatus == "Down" + then: "Flow state is changed to DEGRADED as protected path is DOWN" + verifyAll(flow.retrieveDetails().statusDetails) { + mainPath == "Up" + protectedPath == "Down" } where: @@ -783,18 +824,18 @@ class ProtectedPathSpec extends HealthCheckSpecification { given: "Two active neighboring switches with two not overlapping paths at least" def switchPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(2).random() - and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flowHelperV2.addFlow(flow) - def flowInfoPath = northbound.getFlowPath(flow.flowId) - assert flowInfoPath.protectedPath + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0).build() + .create() + + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() when: "All alternative paths are unavailable" - def mainPath = pathHelper.convert(flowInfoPath) + def mainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) def untouchableIsls = pathHelper.getInvolvedIsls(mainPath).collectMany { [it, it.reversed] } def altPaths = switchPair.paths.findAll { [it, it.reverse()].every { it != mainPath }} def islsToBreak = altPaths.collectMany { pathHelper.getInvolvedIsls(it) } @@ -804,21 +845,22 @@ class ProtectedPathSpec extends HealthCheckSpecification { then: "Flow status is DEGRADED" Wrappers.wait(WAIT_OFFSET) { - verifyAll(northbound.getFlow(flow.flowId)) { - status == FlowState.DEGRADED.toString() + verifyAll(flow.retrieveDetails()) { + status == FlowState.DEGRADED statusInfo == StatusInfo.OVERLAPPING_PROTECTED_PATH } - assert flowHelper.getLatestHistoryEntry(flow.flowId).payload.find { it.action == REROUTE_FAIL } - assert northboundV2.getFlowHistoryStatuses(flow.flowId, 1).historyStatuses*.statusBecome == ["DEGRADED"] + def rerouteEvent = flow.retrieveFlowHistory().getEntriesByType(REROUTE) + assert rerouteEvent && rerouteEvent.last().payload.last().action == REROUTE_FAILED.payloadLastAction + assert flow.retrieveFlowHistoryStatus(1).statusBecome == [FlowStatusHistoryEvent.DEGRADED] } when: "Update flow: disable protected path(allocateProtectedPath=false)" - northboundV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = false }) + flow.update(flow.tap { it.allocateProtectedPath = false }) then: "Flow status is UP" Wrappers.wait(WAIT_OFFSET) { - verifyAll(northbound.getFlow(flow.flowId)) { - status == FlowState.UP.toString() + verifyAll(flow.retrieveDetails()) { + status == FlowState.UP !statusInfo //statusInfo is cleared after changing flowStatus to UP } } @@ -835,15 +877,16 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all(false).withAtLeastNNonOverlappingPaths(4).random() and: "A protected flow" - def flow = flowHelperV2.randomFlow(switchPair).tap { it.allocateProtectedPath = true } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true).build() + .create() when: "Main paths breaks" - def paths = northbound.getFlowPath(flow.flowId) - def mainPath = pathHelper.convert(paths) - def mainPathIsl = pathHelper.getInvolvedIsls(mainPath).first() - def protectedPath = pathHelper.convert(paths.protectedPath) - def protectedPathIsl = pathHelper.getInvolvedIsls(protectedPath).first() + def flowPathInfo = flow.retrieveAllEntityPaths() + def mainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def mainPathIsl = flowPathInfo.flowPath.getMainPathInvolvedIsls().first() + def protectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def protectedPathIsl = flowPathInfo.flowPath.getProtectedPathInvolvedIsls().first() islHelper.breakIsl(mainPathIsl) and: "Protected path breaks when swap is in progress" @@ -856,13 +899,11 @@ class ProtectedPathSpec extends HealthCheckSpecification { log.debug("original main: $mainPath\n original protected: $protectedPath") Wrappers.wait(rerouteDelay + PATH_INSTALLATION_TIME) { Wrappers.timedLoop(3) { //this should be a stable result, all reroutes must finish - assert northbound.getFlowStatus(flow.flowId).status == FlowState.UP - def currentPath = northbound.getFlowPath(flow.flowId) - [currentPath, currentPath.protectedPath].each { - assert pathHelper.getInvolvedIsls(pathHelper.convert(it)).findAll { - it in [mainPathIsl, protectedPathIsl] - }.empty, "Found broken ISL being used in path: $it" - } + assert flow.retrieveFlowStatus().status == FlowState.UP + def currentPath = flow.retrieveAllEntityPaths() + assert currentPath.flowPath.getMainPathInvolvedIsls().intersect([mainPathIsl, protectedPathIsl]).isEmpty() + assert currentPath.flowPath.getProtectedPathInvolvedIsls().intersect([mainPathIsl, protectedPathIsl]).isEmpty() + } } } @@ -873,47 +914,45 @@ class ProtectedPathSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().neighbouring().withExactlyNNonOverlappingPaths(3).random() and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() - def flowPathInfo = northbound.getFlowPath(flow.flowId) - assert flowPathInfo.protectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - assert currentPath != currentProtectedPath + def initialMainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + assert initialMainPath != initialProtectedPath when: "Make the current and protected path less preferable than alternatives" - def alternativePaths = switchPair.paths.findAll { it != currentPath && it != currentProtectedPath } + def alternativePaths = switchPair.paths.findAll { it != initialMainPath && it != initialProtectedPath } withPool { - alternativePaths.eachParallel { pathHelper.makePathMorePreferable(it, currentPath) } - alternativePaths.eachParallel { pathHelper.makePathMorePreferable(it, currentProtectedPath) } + alternativePaths.eachParallel { pathHelper.makePathMorePreferable(it, initialMainPath) } + alternativePaths.eachParallel { pathHelper.makePathMorePreferable(it, initialProtectedPath) } } and: "Init intentional reroute" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() then: "Flow should be rerouted" rerouteResponse.rerouted and: "Flow main path should be rerouted to a new path and ignore protected path" - def flowPathInfoAfterRerouting - def newCurrentPath + FlowEntityPath flowPathInfoAfterRerouting + def newMainPath Wrappers.wait(RULES_INSTALLATION_TIME) { - flowPathInfoAfterRerouting = northbound.getFlowPath(flow.flowId) - newCurrentPath = pathHelper.convert(flowPathInfoAfterRerouting) - newCurrentPath != currentPath - newCurrentPath != currentProtectedPath + flowPathInfoAfterRerouting = flow.retrieveAllEntityPaths() + newMainPath = flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, false) + newMainPath != initialMainPath + newMainPath != initialProtectedPath } and: "Flow protected path shouldn't be rerouted due to lack of non overlapping path" - pathHelper.convert(flowPathInfoAfterRerouting.protectedPath) == currentProtectedPath + flowPathInfoAfterRerouting.getPathNodes(Direction.FORWARD, true) == initialProtectedPath and: "Flow and both its paths are UP" Wrappers.wait(WAIT_OFFSET) { - verifyAll(northboundV2.getFlow(flow.flowId)) { - status == "Up" + verifyAll(flow.retrieveDetails()) { + status == FlowState.UP statusDetails.mainPath == "Up" statusDetails.protectedPath == "Up" } @@ -1009,17 +1048,15 @@ class ProtectedPathSpec extends HealthCheckSpecification { * System takes into account PoP and try not to place protected path into the same transit PoPs. * So, the protected path will be built through the trSw3 because trSw1 and trSw2 are in the same PoP zone. * */ - def flow = flowHelperV2.randomFlow(swPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair).withProtectedPath(true).build().create() then: "Main path is built through the preferable path(tr1 or tr2)" - def flowPaths = northbound.getFlowPath(flow.flowId) - def realFlowPathInvolvedSwitches = pathHelper.getInvolvedSwitches(pathHelper.convert(flowPaths))*.dpId + def flowPaths = flow.retrieveAllEntityPaths() + def realFlowPathInvolvedSwitches = flowPaths.flowPath.path.forward.getInvolvedSwitches() realFlowPathInvolvedSwitches == involvedSwP1 || realFlowPathInvolvedSwitches == involvedSwP2 and: "Protected path is built through the non preferable path(tr3)" - pathHelper.getInvolvedSwitches(pathHelper.convert(flowPaths.protectedPath))*.dpId == involvedSwProtected + flowPaths.flowPath.protectedPath.forward.getInvolvedSwitches() == involvedSwProtected } @Tags(LOW_PRIORITY) @@ -1029,19 +1066,18 @@ class ProtectedPathSpec extends HealthCheckSpecification { def (srcSwitch, dstSwitch) = [isls.first().srcSwitch, isls.first().dstSwitch] and: "A flow without protected path" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.allocateProtectedPath = false - flowHelperV2.addFlow(flow) - !northbound.getFlowPath(flow.flowId).protectedPath + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withProtectedPath(false).build() + .create() + assert !flow.retrieveAllEntityPaths().flowPath.protectedPath when: "Try to swap paths for flow that doesn't have protected path" - northbound.swapFlowPath(flow.flowId) + flow.swapFlowPath() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: Flow $flow.flowId doesn't have protected path" + new FlowPathNotSwappedExpectedError( + ~/Could not swap paths: Flow $flow.flowId doesn't have protected path/).matches(exc) } @Tags(LOW_PRIORITY) @@ -1051,9 +1087,8 @@ class ProtectedPathSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: Flow $NON_EXISTENT_FLOW_ID not found" + new FlowPathNotSwappedExpectedError(HttpStatus.NOT_FOUND, + ~/Could not swap paths: Flow $NON_EXISTENT_FLOW_ID not found/).matches(exc) } @Tags(ISL_PROPS_DB_RESET) @@ -1067,18 +1102,15 @@ class ProtectedPathSpec extends HealthCheckSpecification { isls[1..-1].each { islHelper.setAvailableBandwidth(it, 90) } when: "Create flow with protected path" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = bandwidth - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(bandwidth) + .withProtectedPath(true).build() + .sendCreateRequest() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Not enough bandwidth or no path found. " + - "Couldn't find non overlapping protected path" + new FlowNotCreatedWithMissingPathExpectedError( + ~/Not enough bandwidth or no path found. Couldn't find non overlapping protected path/).matches(exc) } @Tags([LOW_PRIORITY, ISL_RECOVER_ON_FAIL]) @@ -1090,28 +1122,24 @@ class ProtectedPathSpec extends HealthCheckSpecification { .withAtLeastNNonOverlappingPaths(2).random() and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() and: "All alternative paths are unavailable (bring ports down on the source switch)" - def flowPathIsl = pathHelper.getInvolvedIsls(pathHelper.convert(northbound.getFlowPath(flow.flowId))) + def flowPathIsl = flow.retrieveAllEntityPaths().flowPath.getMainPathInvolvedIsls() def broughtDownIsls = topology.getRelatedIsls(switchPair.src) - flowPathIsl islHelper.breakIsls(broughtDownIsls) when: "Break ISL on a protected path (bring port down) for changing the flow state to DEGRADED" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - def protectedIsls = pathHelper.getInvolvedIsls(currentProtectedPath) - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def flowPathInfo = flow.retrieveAllEntityPaths() + def protectedIsls = flowPathInfo.flowPath.getProtectedPathInvolvedIsls() + def currentIsls = flowPathInfo.flowPath.getMainPathInvolvedIsls() islHelper.breakIsl(protectedIsls[0]) then: "Flow state is changed to DEGRADED" - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DEGRADED } - verifyAll(northboundV2.getFlow(flow.flowId).statusDetails) { - mainPath == "Up" - protectedPath == "Down" + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.DEGRADED } + verifyAll(flow.retrieveDetails()) { + statusDetails.mainPath == "Up" + statusDetails.protectedPath == "Down" } when: "Break ISL on the main path (bring port down) for changing the flow state to DOWN" @@ -1119,32 +1147,31 @@ class ProtectedPathSpec extends HealthCheckSpecification { then: "Flow state is changed to DOWN" Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).find{ + assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).find{ it.taskId =~ (/.+ : retry #1 ignore_bw true/) - }?.payload?.last()?.action == REROUTE_FAIL + }?.payload?.last()?.action == REROUTE_FAILED.payloadLastAction } - verifyAll(northboundV2.getFlow(flow.flowId).statusDetails) { - mainPath == "Down" - protectedPath == "Down" + verifyAll(flow.retrieveDetails()) { + statusDetails.mainPath == "Down" + statusDetails.protectedPath == "Down" } when: "Try to swap paths when main/protected paths are not available" - northbound.swapFlowPath(flow.flowId) + flow.swapFlowPath() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: Protected flow path $flow.flowId is not in ACTIVE state" + new FlowPathNotSwappedExpectedError( + ~/Could not swap paths: Protected flow path ${flow.flowId} is not in ACTIVE state/).matches(exc) when: "Restore ISL for the main path only" islHelper.restoreIsl(currentIsls[0]) then: "Flow state is still DEGRADED" Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DEGRADED - verifyAll(northboundV2.getFlow(flow.flowId)) { + assert flow.retrieveFlowStatus().status == FlowState.DEGRADED + verifyAll(flow.retrieveDetails()) { statusDetails.mainPath == "Up" statusDetails.protectedPath == "Down" statusInfo == StatusInfo.OVERLAPPING_PROTECTED_PATH @@ -1152,15 +1179,12 @@ class ProtectedPathSpec extends HealthCheckSpecification { } when: "Try to swap paths when the main path is available and the protected path is not available" - northbound.swapFlowPath(flow.flowId) + flow.swapFlowPath() then: "Human readable error is returned" def exc1 = thrown(HttpClientErrorException) - exc1.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not swap paths for flow" - errorDetails.errorDescription == - "Could not swap paths: Protected flow path $flow.flowId is not in ACTIVE state" + new FlowPathNotSwappedExpectedError( + ~/Could not swap paths: Protected flow path ${flow.flowId} is not in ACTIVE state/).matches(exc1) when: "Restore ISL for the protected path" islHelper.restoreIsl(protectedIsls[0]) @@ -1168,7 +1192,7 @@ class ProtectedPathSpec extends HealthCheckSpecification { then: "Flow state is changed to UP" //it often fails in scope of the whole spec on the hardware env, that's why '* 1.5' is added Wrappers.wait(discoveryInterval * 1.5 + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + assert flow.retrieveFlowStatus().status == FlowState.UP } } @@ -1178,16 +1202,11 @@ class ProtectedPathSpec extends HealthCheckSpecification { def sw = topology.activeSwitches.first() when: "Create single switch flow" - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + flowFactory.getBuilder(sw, sw).withProtectedPath(true).build().sendCreateRequest() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Couldn't setup protected path for one-switch flow" + new FlowNotCreatedExpectedError(~/Couldn't setup protected path for one-switch flow/).matches(exc) } @Tags(LOW_PRIORITY) @@ -1196,18 +1215,14 @@ class ProtectedPathSpec extends HealthCheckSpecification { def sw = topology.activeSwitches.first() and: "A flow without protected path" - def flow = flowHelperV2.singleSwitchFlow(sw) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw).build().create() when: "Update flow: enable protected path" - northboundV2.updateFlow(flow.flowId, flow.tap { it.allocateProtectedPath = true }) + flow.update(flow.tap { it.allocateProtectedPath = true }) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not update flow" - errorDetails.errorDescription == "Couldn't setup protected path for one-switch flow" + new FlowNotUpdatedExpectedError(~/Couldn't setup protected path for one-switch flow/).matches(exc) } @Tags(ISL_RECOVER_ON_FAIL) @@ -1219,19 +1234,16 @@ class ProtectedPathSpec extends HealthCheckSpecification { islHelper.breakIsls(broughtDownIsls) when: "Try to create a new flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flowHelperV2.addFlow(flow) + flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0).build() + .sendCreateRequest() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Not enough bandwidth or no path found." + - " Couldn't find non overlapping protected path" + new FlowNotCreatedWithMissingPathExpectedError( + ~/Not enough bandwidth or no path found. Couldn't find non overlapping protected path/).matches(exc) where: flowDescription | bandwidth @@ -1239,70 +1251,62 @@ class ProtectedPathSpec extends HealthCheckSpecification { "an unmetered" | 0 } + @Issue("https://github.com/telstra/open-kilda/issues/5699") @Tags(ISL_RECOVER_ON_FAIL) def "System doesn't reroute main flow path when protected path is broken and new alt path is available\ (altPath is more preferable than mainPath)"() { given: "Two active neighboring switches with three diverse paths at least" def switchPair = switchPairs.all().neighbouring().withExactlyNNonOverlappingPaths(3).random() - and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() and: "All alternative paths are unavailable (bring ports down on the source switch)" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def currentPath = pathHelper.convert(flowPathInfo) - def currentPathIsls = pathHelper.getInvolvedIsls(currentPath) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - def protectedIslToBreak = pathHelper.getInvolvedIsls(currentProtectedPath)[0] - def broughtDownIsls = topology.getRelatedIsls(switchPair.src) - currentPathIsls.first() - protectedIslToBreak + def initialFlowPathInfo = flow.retrieveAllEntityPaths() + def initialMainPath = initialFlowPathInfo.getPathNodes(Direction.FORWARD, false) + def mainPathIsl = initialFlowPathInfo.flowPath.getMainPathInvolvedIsls().first() + def initialProtectedPath = initialFlowPathInfo.getPathNodes(Direction.FORWARD, true) + def protectedIslToBreak = initialFlowPathInfo.flowPath.getProtectedPathInvolvedIsls().first() + def broughtDownIsls = topology.getRelatedIsls(switchPair.src) - mainPathIsl - protectedIslToBreak islHelper.breakIsls(broughtDownIsls) and: "ISL on a protected path is broken(bring port down) for changing the flow state to DEGRADED" islHelper.breakIsl(protectedIslToBreak) - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DEGRADED } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.DEGRADED } when: "Make the current path less preferable than alternative path" - def alternativePath = switchPair.paths.find { it != currentPath && it != currentProtectedPath } - def currentIsl = pathHelper.getInvolvedIsls(currentPath)[0] + def alternativePath = switchPair.paths.find { it != initialMainPath && it != initialProtectedPath } def alternativeIsl = pathHelper.getInvolvedIsls(alternativePath)[0] switchPair.paths.findAll { it != alternativePath }.each { pathHelper.makePathMorePreferable(alternativePath, it) } - assert northbound.getLink(currentIsl).cost > northbound.getLink(alternativeIsl).cost + assert northbound.getLink(mainPathIsl).cost > northbound.getLink(alternativeIsl).cost and: "Make alternative path available(bring port up on the source switch)" islHelper.restoreIsl(alternativeIsl) then: "Flow state is changed to UP" - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } and: "Protected path is recalculated only" - def newFlowPathInfo = northbound.getFlowPath(flow.flowId) - pathHelper.convert(newFlowPathInfo) == currentPath - pathHelper.convert(newFlowPathInfo.protectedPath) == alternativePath + def newFlowPathInfo = flow.retrieveAllEntityPaths() + newFlowPathInfo.getPathNodes(Direction.FORWARD, false) == initialMainPath + newFlowPathInfo.getPathNodes(Direction.FORWARD, true) == alternativePath } @Tags(LOW_PRIORITY) def "System doesn't allow to enable the pinned flag on a protected flow"() { given: "A protected flow" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build().create() when: "Update flow: enable the pinned flag(pinned=true)" - northboundV2.updateFlow(flow.flowId, flow.tap { it.pinned = true }) + flow.update(flow.tap { it.pinned = true }) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not update flow" - errorDetails.errorDescription == "Flow flags are not valid, unable to process pinned protected flow" + new FlowNotUpdatedExpectedError(~/Flow flags are not valid, unable to process pinned protected flow/).matches(exc) } List getCreatedMeterIds(SwitchId switchId) { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathV1Spec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathV1Spec.groovy index 2ddd8e1d7de..e8c32f50f85 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathV1Spec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ProtectedPathV1Spec.groovy @@ -1,35 +1,43 @@ package org.openkilda.functionaltests.spec.flows +import static groovyx.gpars.GParsPool.withPool +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE_FAILED +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.model.cookie.CookieBase.CookieType.SERVICE_OR_FLOW_SEGMENT +import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID +import static org.openkilda.testing.Constants.PROTECTED_PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotCreatedWithMissingPathExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotUpdatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowPathNotSwappedExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.error.MessageError +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.model.stats.Direction +import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder + import org.springframework.beans.factory.annotation.Autowired +import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative import spock.lang.See import spock.lang.Shared -import javax.inject.Provider import java.time.Instant - -import static groovyx.gpars.GParsPool.withPool -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.model.cookie.CookieBase.CookieType.SERVICE_OR_FLOW_SEGMENT -import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID -import static org.openkilda.testing.Constants.PROTECTED_PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET +import javax.inject.Provider @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/solutions/protected-paths") @Narrative("""Protected path - it is pre-calculated, reserved, and deployed (except ingress rule), @@ -48,32 +56,45 @@ Main and protected paths can't use the same link.""") @Tags([LOW_PRIORITY]) class ProtectedPathV1Spec extends HealthCheckSpecification { - @Autowired @Shared + @Autowired + @Shared + FlowFactory flowFactory + + @Autowired + @Shared Provider traffExamProvider + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + + public static final Closure REQUIRED_COOKIE = { Long cookie -> !new Cookie(cookie).serviceFlag && new Cookie(cookie).type == SERVICE_OR_FLOW_SEGMENT } + def "Able to create a flow with protected path when maximumBandwidth=#bandwidth, vlan=#vlanId"() { given: "Two active not neighboring switches with two diverse paths at least" def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() when: "Create flow with protected path" - def flow = flowHelper.randomFlow(switchPair) - flow.allocateProtectedPath = true - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flow.source.vlanId = vlanId - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0) + .withSourceVlan(vlanId).build() + .createV1() then: "Flow is created with protected path" - def flowPathInfo = northbound.getFlowPath(flow.id) - flowPathInfo.protectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + !flowPathInfo.flowPath.protectedPath.isPathAbsent() and: "Rules for main and protected paths are created" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.id) } + Wrappers.wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } and: "Validation of flow must be successful" - northbound.validateFlow(flow.id).each { direction -> - assert direction.discrepancies.empty - } + flow.validateAndCollectDiscrepancies().isEmpty() where: bandwidth | vlanId @@ -86,49 +107,90 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() when: "Create flow without protected path" - def flow = flowHelper.randomFlow(switchPair) - flow.allocateProtectedPath = false - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(false).build() + .createV1() then: "Flow is created without protected path" - !northbound.getFlowPath(flow.id).protectedPath - def flowInfo = northbound.getFlow(flow.id) - !flowInfo.flowStatusDetails + !flow.retrieveAllEntityPaths().flowPath.protectedPath + def flowInfo = flow.retrieveDetailsV1() + !flowInfo.statusDetails + + and: "Source switch passes validation" + def initialSrcValidation = switchHelper.validate(switchPair.src.dpId) + initialSrcValidation.isAsExpected() + + and: "Cookies are created by flow" + HashMap initialAmountOfFlowRules = [switchPair.src.dpId, switchPair.dst.dpId] + .collectEntries {swId -> + def createdCookies = switchRulesFactory.get(swId).getRules() + .findAll { !new Cookie(it.cookie).serviceFlag }*.cookie + + def swProps = switchHelper.getCachedSwProps(swId) + def amountOfServer42Rules = 0 + + if(swProps.server42FlowRtt){ + amountOfServer42Rules +=1 + swId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules + swId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules + } + def amountOfFlowRules = 3 + amountOfServer42Rules + assert createdCookies.size() == amountOfFlowRules + [(swId): amountOfFlowRules] + } when: "Update flow: enable protected path(allocateProtectedPath=true)" def currentLastUpdate = flowInfo.lastUpdated - flowHelper.updateFlow(flow.id, flow.tap { it.allocateProtectedPath = true }) + flow.updateV1(flow.tap { it.allocateProtectedPath = true }) then: "Protected path is enabled" - def flowPathInfoAfterUpdating = northbound.getFlowPath(flow.id) - flowPathInfoAfterUpdating.protectedPath - northbound.getFlow(flow.id).flowStatusDetails - def flowInfoFromDb = database.getFlow(flow.id) - def protectedForwardCookie = flowInfoFromDb.protectedForwardPath.cookie.value - def protectedReverseCookie = flowInfoFromDb.protectedReversePath.cookie.value + def flowPathInfoAfterUpdating = flow.retrieveAllEntityPaths() + !flowPathInfoAfterUpdating.flowPath.protectedPath.isPathAbsent() + flow.retrieveDetailsV1().statusDetails + def flowInfoFromDb = flow.retrieveDetailsFromDB() + def protectedFlowCookies = [flowInfoFromDb.protectedForwardPath.cookie.value, flowInfoFromDb.protectedReversePath.cookie.value] - Instant.parse(currentLastUpdate) < Instant.parse(northbound.getFlow(flow.id).lastUpdated) + Instant.parse(currentLastUpdate) < Instant.parse(flow.retrieveDetailsV1().lastUpdated) and: "Rules for main and protected paths are created" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.id) } + Wrappers.wait(WAIT_OFFSET) { + HashMap> flowInvolvedSwitchesWithRules = flowPathInfoAfterUpdating.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + + def cookiesAfterEnablingProtectedPath = flowInvolvedSwitchesWithRules.get(switchPair.src.dpId) + .findAll { !new Cookie(it.cookie).serviceFlag }*.cookie + // initialAmountOfFlowRules was collected for flow without protected path + one for protected path + assert cookiesAfterEnablingProtectedPath.size() == initialAmountOfFlowRules.get(switchPair.src.dpId) + 1 + } + + def srcValidation = switchHelper.validate(switchPair.src.dpId) + srcValidation.isAsExpected() + srcValidation.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() == initialSrcValidation.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() + 1 + when: "Update flow: disable protected path(allocateProtectedPath=false)" - def protectedFlowPath = northbound.getFlowPath(flow.id).protectedPath.forwardPath - northbound.updateFlow(flow.id, flow.tap { it.allocateProtectedPath = false }) + def protectedPathSwitches = flowPathInfoAfterUpdating.flowPath.protectedPath.forward.getInvolvedSwitches() + flow.updateV1(flow.tap { it.allocateProtectedPath = false }) then: "Protected path is disabled" - !northbound.getFlowPath(flow.id).protectedPath - !northbound.getFlow(flow.id).flowStatusDetails + !flow.retrieveAllEntityPaths().flowPath.protectedPath + !flow.retrieveDetailsV1().statusDetails + + and: "Source switch passes validation" + verifyAll(switchHelper.validate(switchPair.src.dpId)) { + it.isAsExpected() + it.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() == initialSrcValidation.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() + } and: "Rules for protected path are deleted" Wrappers.wait(WAIT_OFFSET) { - assert northbound.getFlowStatus(flow.id).status == FlowState.UP - protectedFlowPath.each { sw -> - def rules = northbound.getSwitchRules(sw.switchId).flowEntries.findAll { + assert flow.retrieveFlowStatus().status == FlowState.UP + protectedPathSwitches.each { sw -> + def rules = switchRulesFactory.get(sw).getRules().findAll { !new Cookie(it.cookie).serviceFlag } - assert rules.every { it != protectedForwardCookie && it != protectedReverseCookie } - } + assert rules.findAll { it.cookie in protectedFlowCookies }.isEmpty() } } } @@ -137,16 +199,11 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { def sw = topology.activeSwitches.first() when: "Create single switch flow" - def flow = flowHelper.singleSwitchFlow(sw) - flow.allocateProtectedPath = true - flowHelper.addFlow(flow) + flowFactory.getBuilder(sw, sw).withProtectedPath(true).build().sendCreateRequestV1() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Couldn't setup protected path for one-switch flow" + new FlowNotCreatedExpectedError(~/Couldn't setup protected path for one-switch flow/).matches(exc) } def "Unable to update a single switch flow to enable protected path"() { @@ -154,18 +211,14 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { def sw = topology.activeSwitches.first() and: "A flow without protected path" - def flow = flowHelper.singleSwitchFlow(sw) - flowHelper.addFlow(flow) + def flow = flowFactory.getRandom(sw, sw) when: "Update flow: enable protected path" - northbound.updateFlow(flow.id, flow.tap { it.allocateProtectedPath = true }) + flow.updateV1(flow.tap { it.allocateProtectedPath = true }) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not update flow" - errorDetails.errorDescription == "Couldn't setup protected path for one-switch flow" + new FlowNotUpdatedExpectedError(~/Couldn't setup protected path for one-switch flow/).matches(exc) } @Tags(ISL_PROPS_DB_RESET) @@ -179,18 +232,15 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { isls[1..-1].each { islHelper.setAvailableBandwidth(it, 90) } when: "Create flow with protected path" - def flow = flowHelper.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = bandwidth - flow.allocateProtectedPath = true - flowHelper.addFlow(flow) + flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(bandwidth) + .withProtectedPath(true).build() + .sendCreateRequestV1() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Not enough bandwidth or no path found. " + - "Couldn't find non overlapping protected path" + new FlowNotCreatedWithMissingPathExpectedError( + ~/Not enough bandwidth or no path found. Couldn't find non overlapping protected path/).matches(exc) } @Tags(ISL_RECOVER_ON_FAIL) @@ -201,19 +251,16 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { islHelper.breakIsls(broughtDownIsls) when: "Try to create a new flow with protected path" - def flow = flowHelper.randomFlow(switchPair) - flow.allocateProtectedPath = true - flow.maximumBandwidth = bandwidth - flow.ignoreBandwidth = bandwidth == 0 - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withBandwidth(bandwidth) + .withIgnoreBandwidth(bandwidth == 0).build() + .sendCreateRequestV1() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == "Not enough bandwidth or no path found." + - " Couldn't find non overlapping protected path" + new FlowNotCreatedWithMissingPathExpectedError( + ~/Not enough bandwidth or no path found. Couldn't find non overlapping protected path/).matches(exc) where: flowDescription | bandwidth @@ -228,64 +275,55 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { .withTraffgensOnBothEnds() .withPathHavingAtLeastNSwitches(4) .random() - def flow = flowHelper.randomFlow(switchPair, true) - flow.allocateProtectedPath = false - flowHelper.addFlow(flow) - assert !northbound.getFlowPath(flow.id).protectedPath + def flow = flowFactory.getBuilder(switchPair, true) + .withProtectedPath(false).build() + .createV1() - and: "Cookies are created by flow" - def createdCookiesSrcSw = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - }*.cookie - def createdCookiesDstSw = northbound.getSwitchRules(switchPair.dst.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - }*.cookie - def srcSwProps = switchHelper.getCachedSwProps(switchPair.src.dpId) - def amountOfserver42Rules = srcSwProps.server42FlowRtt ? 1 : 0 - def amountOfFlowRulesSrcSw = 3 + amountOfserver42Rules - if (srcSwProps.server42FlowRtt && flow.source.vlanId) { - amountOfFlowRulesSrcSw += 1 - } - assert createdCookiesSrcSw.size() == amountOfFlowRulesSrcSw - def dstSwProps = switchHelper.getCachedSwProps(switchPair.dst.dpId) - def amountOfserver42RulesDstSw = dstSwProps.server42FlowRtt ? 1 : 0 - def amountOfFlowRulesDstSw = 3 + amountOfserver42RulesDstSw - if (dstSwProps.server42FlowRtt && flow.destination.vlanId) { - amountOfFlowRulesDstSw += 1 - } - assert createdCookiesDstSw.size() == amountOfFlowRulesDstSw + assert !flow.retrieveAllEntityPaths().flowPath.protectedPath + + and: "Number of flow-related cookies has been collected for both source and destination switch" + HashMap initialAmountOfFlowRules = [switchPair.src.dpId, switchPair.dst.dpId] + .collectEntries { + [(it): switchHelper.validate(it).rules.proper.cookie.findAll(REQUIRED_COOKIE).size()] + } as HashMap when: "Update flow: enable protected path(allocateProtectedPath=true)" - flowHelper.updateFlow(flow.id, flow.tap { it.allocateProtectedPath = true }) + flow.updateV1(flow.tap { it.allocateProtectedPath = true }) then: "Protected path is enabled" - def flowPathInfo = northbound.getFlowPath(flow.id) - flowPathInfo.protectedPath - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - currentPath != currentProtectedPath + def flowPathInfo = flow.retrieveAllEntityPaths() + !flowPathInfo.flowPath.protectedPath.isPathAbsent() + + def initialMainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def initialProtectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + initialMainPath != initialProtectedPath and: "Rules for main and protected paths are created" Wrappers.wait(WAIT_OFFSET) { - flowHelper.verifyRulesOnProtectedFlow(flow.id) - def cookiesAfterEnablingProtectedPath = northbound.getSwitchRules(switchPair.src.dpId).flowEntries.findAll { - !new Cookie(it.cookie).serviceFlag - }*.cookie - // amountOfFlowRules for main path + one for protected path - cookiesAfterEnablingProtectedPath.size() == amountOfFlowRulesSrcSw + 1 + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } + + and: "Source and destination switches pass validation" + [switchPair.src.dpId, switchPair.dst.dpId].each { switchId -> + def switchValidateInfo = switchHelper.validate(switchId) + // + 1 for protected path + assert switchValidateInfo.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() == initialAmountOfFlowRules.get(switchId) + 1 + assert switchValidateInfo.isAsExpected() } and: "No rule discrepancies on every switch of the flow on the main path" - def mainSwitches = pathHelper.getInvolvedSwitches(currentPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(mainSwitches*.getDpId()).isEmpty() + def mainSwitches = flowPathInfo.flowPath.path.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(mainSwitches).isEmpty() and: "No rule discrepancies on every switch of the flow on the protected path)" - def protectedSwitches = pathHelper.getInvolvedSwitches(currentProtectedPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(protectedSwitches*.getDpId()).isEmpty() + def protectedSwitches = flowPathInfo.flowPath.protectedPath.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(protectedSwitches).isEmpty() and: "The flow allows traffic(on the main path)" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam(flow, 1000, 5) + def exam = flow.traffExam(traffExam, 1000, 5) withPool { [exam.forward, exam.reverse].eachParallel { direction -> def resources = traffExam.startExam(direction) @@ -297,20 +335,20 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { when: "Swap flow paths" def srcSwitchCreatedMeterIds = getCreatedMeterIds(switchPair.src.dpId) def dstSwitchCreatedMeterIds = getCreatedMeterIds(switchPair.dst.dpId) - def currentLastUpdate = northbound.getFlow(flow.id).lastUpdated - northbound.swapFlowPath(flow.id) + def currentLastUpdate = flow.retrieveDetailsV1().lastUpdated + flow.swapFlowPath() then: "Flow paths are swapped" - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.id).status == FlowState.UP } - def flowPathInfoAfterSwapping = northbound.getFlowPath(flow.id) - def newCurrentPath = pathHelper.convert(flowPathInfoAfterSwapping) - def newCurrentProtectedPath = pathHelper.convert(flowPathInfoAfterSwapping.protectedPath) - newCurrentPath != currentPath - newCurrentPath == currentProtectedPath - newCurrentProtectedPath != currentProtectedPath - newCurrentProtectedPath == currentPath + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.UP } + def flowPathInfoAfterSwapping = flow.retrieveAllEntityPaths() + def newMainPath = flowPathInfoAfterSwapping.getPathNodes(Direction.FORWARD, false) + def newProtectedPath = flowPathInfoAfterSwapping.getPathNodes(Direction.FORWARD, true) + verifyAll { + newMainPath == initialProtectedPath + newProtectedPath == initialMainPath + } - Instant.parse(currentLastUpdate) < Instant.parse(northbound.getFlow(flow.id).lastUpdated) + Instant.parse(currentLastUpdate) < Instant.parse(flow.retrieveDetailsV1().lastUpdated) and: "New meter is created on the src and dst switches" def newSrcSwitchCreatedMeterIds = getCreatedMeterIds(switchPair.src.dpId) @@ -319,42 +357,40 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { newSrcSwitchCreatedMeterIds.sort() != srcSwitchCreatedMeterIds.sort() || srcSwitchCreatedMeterIds.empty newDstSwitchCreatedMeterIds.sort() != dstSwitchCreatedMeterIds.sort() || dstSwitchCreatedMeterIds.empty + and: "No rule discrepancies when doing flow validation" + flow.validateAndCollectDiscrepancies().isEmpty() + and: "Rules are updated" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.id) } + HashMap> flowInvolvedSwitchesWithRules + Wrappers.wait(WAIT_OFFSET) { + flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + } and: "Old meter is deleted on the src and dst switches" - Wrappers.wait(WAIT_OFFSET) { - [switchPair.src.dpId, switchPair.dst.dpId].each { switchId -> - def switchValidateInfo = switchHelper.validate(switchId) - if(switchValidateInfo.meters) { - assert switchValidateInfo.meters.proper.findAll({dto -> !isDefaultMeter(dto)}).size() == 1 - } - assert switchValidateInfo.rules.proper.findAll { def cookie = new Cookie(it.getCookie()) - !cookie.serviceFlag && cookie.type == SERVICE_OR_FLOW_SEGMENT }.size() == - (switchId == switchPair.src.dpId) ? amountOfFlowRulesSrcSw + 1 : amountOfFlowRulesDstSw + 1 - switchValidateInfo.isAsExpected() + [switchPair.src.dpId, switchPair.dst.dpId].each { switchId -> + def switchValidateInfo = switchHelper.validate(switchId) + if (switchValidateInfo.meters) { + assert switchValidateInfo.meters.proper.findAll({ dto -> !isDefaultMeter(dto) }).size() == 1 } + assert switchValidateInfo.rules.proper.cookie.findAll(REQUIRED_COOKIE).size() == initialAmountOfFlowRules.get(switchId) + 1 + assert switchValidateInfo.isAsExpected() } and: "Transit switches store the correct info about rules and meters" - def involvedTransitSwitches = (currentPath[1..-2].switchId + currentProtectedPath[1..-2].switchId).unique() + def involvedTransitSwitches = (mainSwitches[1..-2] + protectedSwitches[1..-2]).unique() Wrappers.wait(WAIT_OFFSET) { assert switchHelper.validateAndCollectFoundDiscrepancies(involvedTransitSwitches).isEmpty() } - and: "No rule discrepancies when doing flow validation" - northbound.validateFlow(flow.id).each { assert it.discrepancies.empty } - - and: "All rules for main and protected paths are updated" - Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.id) } - and: "No rule discrepancies on every switch of the flow on the main path" - def newMainSwitches = pathHelper.getInvolvedSwitches(newCurrentPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(newMainSwitches*.getDpId()).isEmpty() + def newMainSwitches = flowPathInfoAfterSwapping.flowPath.path.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(newMainSwitches).isEmpty() and: "No rule discrepancies on every switch of the flow on the protected path)" - def newProtectedSwitches = pathHelper.getInvolvedSwitches(newCurrentProtectedPath) - switchHelper.synchronizeAndCollectFixedDiscrepancies(newProtectedSwitches*.getDpId()).isEmpty() + def newProtectedSwitches = flowPathInfoAfterSwapping.flowPath.protectedPath.forward.getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(newProtectedSwitches).isEmpty() and: "The flow allows traffic(on the protected path)" withPool { @@ -372,19 +408,19 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { def (srcSwitch, dstSwitch) = [isls.first().srcSwitch, isls.first().dstSwitch] and: "A flow without protected path" - def flow = flowHelper.randomFlow(srcSwitch, dstSwitch) - flow.allocateProtectedPath = false - flowHelper.addFlow(flow) - !northbound.getFlowPath(flow.id).protectedPath + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withProtectedPath(false).build() + .createV1() + + assert !flow.retrieveAllEntityPaths().flowPath.protectedPath when: "Try to swap paths for flow that doesn't have protected path" - northbound.swapFlowPath(flow.id) + flow.swapFlowPath() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: Flow $flow.id doesn't have protected path" + new FlowPathNotSwappedExpectedError( + ~/Could not swap paths: Flow $flow.flowId doesn't have protected path/).matches(exc) } def "Unable to swap paths for a non-existent flow"() { @@ -393,9 +429,8 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: Flow $NON_EXISTENT_FLOW_ID not found" + new FlowPathNotSwappedExpectedError(HttpStatus.NOT_FOUND, + ~/Could not swap paths: Flow $NON_EXISTENT_FLOW_ID not found/).matches(exc) } @Tags(ISL_RECOVER_ON_FAIL) @@ -404,28 +439,26 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { def switchPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(2).random() and: "A flow with protected path" - def flow = flowHelper.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true).build() + .createV1() and: "All alternative paths are unavailable (bring ports down on the source switch)" - def flowPathIsl = pathHelper.getInvolvedIsls(pathHelper.convert(northbound.getFlowPath(flow.id).forwardPath)) + def flowPathIsl = flow.retrieveAllEntityPaths().flowPath.getMainPathInvolvedIsls() def broughtDownIsls = topology.getRelatedIsls(switchPair.src) - flowPathIsl islHelper.breakIsls(broughtDownIsls) when: "Break ISL on a protected path (bring port down) for changing the flow state to DEGRADED" - def flowPathInfo = northbound.getFlowPath(flow.id) - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - def protectedIsls = pathHelper.getInvolvedIsls(currentProtectedPath) - def currentIsls = pathHelper.getInvolvedIsls(currentPath) + def flowPathInfo = flow.retrieveAllEntityPaths() + def protectedIsls = flowPathInfo.flowPath.getProtectedPathInvolvedIsls() + def currentIsls = flowPathInfo.flowPath.getMainPathInvolvedIsls() islHelper.breakIsl(protectedIsls[0]) then: "Flow state is changed to DEGRADED" - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow.id).status == FlowState.DEGRADED } - verifyAll(northbound.getFlow(flow.id).flowStatusDetails) { - mainFlowPathStatus == "Up" - protectedFlowPathStatus == "Down" + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == FlowState.DEGRADED } + verifyAll(flow.retrieveDetailsV1().statusDetails) { + mainPath == "Up" + protectedPath == "Down" } when: "Break ISL on the main path (bring port down) for changing the flow state to DOWN" @@ -433,74 +466,67 @@ class ProtectedPathV1Spec extends HealthCheckSpecification { then: "Flow state is changed to DOWN" Wrappers.wait(WAIT_OFFSET) { - assert northbound.getFlowStatus(flow.id).status == FlowState.DOWN - assert flowHelper.getHistoryEntriesByAction(flow.id, REROUTE_ACTION).find { + assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).find { it.taskId =~ (/.+ : retry #1 ignore_bw true/) - }?.payload?.last()?.action == REROUTE_FAIL + }?.payload?.last()?.action == REROUTE_FAILED.payloadLastAction } - verifyAll(northbound.getFlow(flow.id).flowStatusDetails) { - mainFlowPathStatus == "Down" - protectedFlowPathStatus == "Down" + verifyAll(flow.retrieveDetailsV1().statusDetails) { + mainPath == "Down" + protectedPath == "Down" } when: "Try to swap paths when main/protected paths are not available" - northbound.swapFlowPath(flow.id) + flow.swapFlowPath() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: Protected flow path $flow.id is not in ACTIVE state" + new FlowPathNotSwappedExpectedError( + ~/Could not swap paths: Protected flow path ${flow.flowId} is not in ACTIVE state/).matches(exc) when: "Restore ISL for the main path only" islHelper.restoreIsl(currentIsls[0]) then: "Flow state is still DEGRADED" Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - assert northbound.getFlowStatus(flow.id).status == FlowState.DEGRADED - verifyAll(northbound.getFlow(flow.id).flowStatusDetails) { - mainFlowPathStatus == "Up" - protectedFlowPathStatus == "Down" + assert flow.retrieveFlowStatus().status == FlowState.DEGRADED + verifyAll(flow.retrieveDetailsV1().statusDetails) { + mainPath == "Up" + protectedPath == "Down" } } when: "Try to swap paths when the main path is available and the protected path is not available" - northbound.swapFlowPath(flow.id) + flow.swapFlowPath() then: "Human readable error is returned" def exc1 = thrown(HttpClientErrorException) - exc1.rawStatusCode == 400 - def errorDetails = exc1.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not swap paths for flow" - errorDetails.errorDescription == - "Could not swap paths: Protected flow path $flow.id is not in ACTIVE state" + new FlowPathNotSwappedExpectedError( + ~/Could not swap paths: Protected flow path ${flow.flowId} is not in ACTIVE state/).matches(exc1) when: "Restore ISL for the protected path" islHelper.restoreIsl(protectedIsls[0]) then: "Flow state is changed to UP" - //it often fails in scope of the whole spec on the hardware env, that's why '* 1.5' is added - Wrappers.wait(discoveryInterval * 1.5 + WAIT_OFFSET) { - assert northbound.getFlowStatus(flow.id).status == FlowState.UP + //it often fails in scope of the whole spec on the hardware env, that's why '* 2' is added + Wrappers.wait(discoveryInterval * 2 + WAIT_OFFSET) { + assert flow.retrieveFlowStatus().status == FlowState.UP } } def "System doesn't allow to enable the pinned flag on a protected flow"() { given: "A protected flow" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelper.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true).build() + .createV1() when: "Update flow: enable the pinned flag(pinned=true)" - northbound.updateFlow(flow.id, flow.tap { it.pinned = true }) + flow.updateV1(flow.tap { it.pinned = true }) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not update flow" - errorDetails.errorDescription == "Flow flags are not valid, unable to process pinned protected flow" + new FlowNotUpdatedExpectedError(~/Flow flags are not valid, unable to process pinned protected flow/).matches(exc) } List getCreatedMeterIds(SwitchId switchId) { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/QinQFlowSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/QinQFlowSpec.groovy index 6fba8f1748b..9cdf1a0dfea 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/QinQFlowSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/QinQFlowSpec.groovy @@ -1,7 +1,14 @@ package org.openkilda.functionaltests.spec.flows -import groovy.transform.Memoized -import groovy.util.logging.Slf4j +import static groovyx.gpars.GParsPool.withPool +import static org.assertj.core.api.Assertions.assertThat +import static org.junit.jupiter.api.Assumptions.assumeFalse +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError import org.openkilda.functionaltests.error.flow.FlowNotCreatedWithConflictExpectedError @@ -9,53 +16,56 @@ import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.IterationTags import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.functionaltests.helpers.model.SwitchPair +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.messaging.command.switches.DeleteRulesAction -import org.openkilda.model.FlowEncapsulationType +import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType -import org.openkilda.northbound.dto.v1.flows.PingInput import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder +import org.openkilda.testing.service.traffexam.model.FlowBidirectionalExam + +import groovy.transform.Memoized +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Shared import javax.inject.Provider -import static groovyx.gpars.GParsPool.withPool -import static org.assertj.core.api.Assertions.assertThat -import static org.junit.jupiter.api.Assumptions.assumeFalse -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.DELETE_SUCCESS -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET - @Slf4j class QinQFlowSpec extends HealthCheckSpecification { - @Autowired @Shared + @Autowired + @Shared Provider traffExamProvider + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory @Tags([SMOKE_SWITCHES, TOPOLOGY_DEPENDENT]) def "System allows to manipulate with QinQ flow\ [srcVlan:#srcVlanId, srcInnerVlan:#srcInnerVlanId, dstVlan:#dstVlanId, dstInnerVlan:#dstInnerVlanId, sw:#swPair.hwSwString()]#trafficDisclaimer"() { when: "Create a QinQ flow" - def qinqFlow = flowHelperV2.randomFlow(swPair).tap { - source.vlanId = srcVlanId - source.innerVlanId = srcInnerVlanId - destination.vlanId = dstVlanId - destination.innerVlanId = dstInnerVlanId - } - def response = flowHelperV2.addFlow(qinqFlow) + def qinqFlow = flowFactory.getBuilder(swPair) + .withSourceVlan(srcVlanId) + .withSourceInnerVlan(srcInnerVlanId) + .withDestinationVlan(dstVlanId) + .withDestinationInnerVlan(dstInnerVlanId) + .build().sendCreateRequest() then: "Response contains correct info about vlanIds" - with(response) { + qinqFlow.waitForBeingInState(FlowState.UP) + with(qinqFlow) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -63,7 +73,7 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is really created with requested vlanIds" - with(northbound.getFlow(qinqFlow.flowId)) { + with(qinqFlow.retrieveDetails()) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -71,8 +81,8 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is valid and pingable" - northbound.validateFlow(qinqFlow.flowId).each { assert it.asExpected } - verifyAll(northbound.pingFlow(qinqFlow.flowId, new PingInput())) { + qinqFlow.validateAndCollectDiscrepancies().isEmpty() + verifyAll(qinqFlow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -81,45 +91,35 @@ class QinQFlowSpec extends HealthCheckSpecification { def traffExam = traffExamProvider.get() def examQinQFlow if(!trafficDisclaimer) { - examQinQFlow = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(qinqFlow), 1000, 5) - withPool { - [examQinQFlow.forward, examQinQFlow.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + examQinQFlow = qinqFlow.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(examQinQFlow, traffExam) } and: "Involved switches pass switch validation" - def involvedSwitchesFlow1 = pathHelper.getInvolvedSwitches( - pathHelper.convert(northbound.getFlowPath(qinqFlow.flowId)) - ) - switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesFlow1*.getDpId()).isEmpty() + def involvedSwitchesFlow1 = qinqFlow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesFlow1).isEmpty() when: "Create a vlan flow on the same port as QinQ flow" - def vlanFlow = flowHelper.randomFlow(swPair).tap { - it.source.portNumber = qinqFlow.source.portNumber - it.source.vlanId = qinqFlow.source.vlanId + 1 - it.destination.portNumber = qinqFlow.destination.portNumber - it.destination.vlanId = qinqFlow.destination.vlanId + 1 - } - flowHelper.addFlow(vlanFlow) + def vlanFlow = flowFactory.getBuilder(swPair) + .withSourcePort(qinqFlow.source.portNumber) + .withSourceVlan(qinqFlow.source.vlanId + 1) + .withDestinationPort(qinqFlow.destination.portNumber) + .withDestinationVlan(qinqFlow.destination.vlanId + 1) + .build().create() then: "Both existing flows are valid" - [qinqFlow.flowId, vlanFlow.id].each { - northbound.validateFlow(it).each { assert it.asExpected } + [qinqFlow, vlanFlow].each { + it.validateAndCollectDiscrepancies().isEmpty() } and: "Involved switches pass switch validation" - def involvedSwitchesFlow2 = pathHelper.getInvolvedSwitches(pathHelper.convert(northbound.getFlowPath(vlanFlow.id))) - def involvedSwitchesforBothFlows = (involvedSwitchesFlow1 + involvedSwitchesFlow2).unique { it.dpId } - switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesforBothFlows*.getDpId()).isEmpty() + def involvedSwitchesFlow2 = vlanFlow.retrieveAllEntityPaths().getInvolvedSwitches() + def involvedSwitchesforBothFlows = (involvedSwitchesFlow1 + involvedSwitchesFlow2).unique() + switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesforBothFlows).isEmpty() and: "Both flows are pingable" - [qinqFlow.flowId, vlanFlow.id].each { - verifyAll(northbound.pingFlow(it, new PingInput())) { + [qinqFlow, vlanFlow].each { + verifyAll(it.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -127,28 +127,22 @@ class QinQFlowSpec extends HealthCheckSpecification { then: "Both flows allow traffic" if(!trafficDisclaimer) { - def examSimpleFlow = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(vlanFlow, 1000, 5) - withPool { - [examQinQFlow.forward, examQinQFlow.reverse, examSimpleFlow.forward, examSimpleFlow.reverse] - .eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def examSimpleFlow = vlanFlow.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(examQinQFlow, traffExam) + verifyFlowHasBidirectionalTraffic(examSimpleFlow, traffExam) } when: "Update the QinQ flow(outer/inner vlans)" - def updateResponse = flowHelperV2.updateFlow(qinqFlow.flowId, qinqFlow.tap { - qinqFlow.source.vlanId = vlanFlow.source.vlanId - qinqFlow.source.innerVlanId = vlanFlow.destination.vlanId - qinqFlow.destination.vlanId = vlanFlow.destination.vlanId - qinqFlow.destination.innerVlanId = vlanFlow.source.vlanId - }) + def updateQinqFlowEntity = qinqFlow.tap { + it.source.vlanId = vlanFlow.source.vlanId + it.source.innerVlanId = vlanFlow.destination.vlanId + it.destination.vlanId = vlanFlow.destination.vlanId + it.destination.innerVlanId = vlanFlow.source.vlanId + } + def updatedQinqFlow = qinqFlow.sendUpdateRequest(updateQinqFlowEntity) then: "Update response contains correct info about innerVlanIds" - with(updateResponse) { + with(updatedQinqFlow) { it.source.vlanId == vlanFlow.source.vlanId it.source.innerVlanId == vlanFlow.destination.vlanId it.destination.vlanId == vlanFlow.destination.vlanId @@ -156,7 +150,7 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is really updated" - with(northbound.getFlow(qinqFlow.flowId)) { + with(qinqFlow.retrieveDetails()) { it.source.vlanId == vlanFlow.source.vlanId it.source.innerVlanId == vlanFlow.destination.vlanId it.destination.vlanId == vlanFlow.destination.vlanId @@ -164,14 +158,14 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow history shows actual info into stateBefore and stateAfter sections" - def flowHistoryEntry = flowHelper.getLatestHistoryEntry(qinqFlow.flowId) - with(flowHistoryEntry.dumps.find { it.type == "stateBefore" }){ + def qinqFlowHistoryEntry = qinqFlow.waitForHistoryEvent(FlowActionType.UPDATE) + with(qinqFlowHistoryEntry.dumps.find { it.type == "stateBefore" }){ it.sourceVlan == srcVlanId it.sourceInnerVlan == srcInnerVlanId it.destinationVlan == dstVlanId it.destinationInnerVlan == dstInnerVlanId } - with(flowHistoryEntry.dumps.find { it.type == "stateAfter" }){ + with(qinqFlowHistoryEntry.dumps.find { it.type == "stateAfter" }){ it.sourceVlan == vlanFlow.source.vlanId it.sourceInnerVlan == vlanFlow.destination.vlanId it.destinationVlan == vlanFlow.destination.vlanId @@ -179,31 +173,33 @@ class QinQFlowSpec extends HealthCheckSpecification { } then: "Both existing flows are still valid and pingable" - [qinqFlow.flowId, vlanFlow.id].each { - northbound.validateFlow(it).each { assert it.asExpected } + [qinqFlow, vlanFlow].each { + it.validateAndCollectDiscrepancies().isEmpty() } - [qinqFlow.flowId, vlanFlow.id].each { - verifyAll(northbound.pingFlow(it, new PingInput())) { + [qinqFlow, vlanFlow].each { + verifyAll(it.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } } when: "Delete the flows" - [qinqFlow.flowId, vlanFlow.id].each { it && flowHelperV2.deleteFlow(it) } + [qinqFlow, vlanFlow].each { it && it.delete() } then: "Flows rules are deleted" - involvedSwitchesforBothFlows.each { sw -> + def allSwitches = topology.activeSwitches + involvedSwitchesforBothFlows.each { swId -> + def sw = allSwitches.find { item -> item.dpId == swId } Wrappers.wait(RULES_INSTALLATION_TIME, 1) { - assertThat(northbound.getSwitchRules(sw.dpId).flowEntries*.cookie.toArray()).as(sw.dpId.toString()) + assertThat(switchRulesFactory.get(swId).getRules()*.cookie.toArray()).as(swId.toString()) .containsExactlyInAnyOrder(*sw.defaultCookies) } } and: "Shared rule of flow is deleted" [swPair.src.dpId, swPair.dst.dpId].each { swId -> - assert northbound.getSwitchRules(swId).flowEntries.findAll { + assert switchRulesFactory.get(swId).getRules().findAll { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW }.empty } @@ -220,15 +216,15 @@ class QinQFlowSpec extends HealthCheckSpecification { def "System allows to create a single switch QinQ flow\ [srcVlan:#srcVlanId, srcInnerVlan:#srcInnerVlanId, dstVlan:#dstVlanId, dstInnerVlan:#dstInnerVlanId, sw:#swPair.src.hwSwString]#trafficDisclaimer"() { when: "Create a single switch QinQ flow" - def qinqFlow = flowHelperV2.singleSwitchFlow(swPair) - qinqFlow.source.vlanId = srcVlanId - qinqFlow.source.innerVlanId = srcInnerVlanId - qinqFlow.destination.vlanId = dstVlanId - qinqFlow.destination.innerVlanId = dstInnerVlanId - def response = flowHelperV2.addFlow(qinqFlow) + def qinqFlow = flowFactory.getBuilder(swPair) + .withSourceVlan(srcVlanId) + .withSourceInnerVlan(srcInnerVlanId) + .withDestinationVlan(dstVlanId) + .withDestinationInnerVlan(dstInnerVlanId) + .build().create() then: "Response contains correct info about vlanIds" - with(response) { + with(qinqFlow) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -236,7 +232,7 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is really created with requested vlanIds" - with(northbound.getFlow(qinqFlow.flowId)) { + with(qinqFlow.retrieveDetails()) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -244,10 +240,10 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is valid" - northbound.validateFlow(qinqFlow.flowId).each { assert it.asExpected } + qinqFlow.validateAndCollectDiscrepancies().isEmpty() and: "Unable to ping a one-switch qinq flow" - verifyAll(northbound.pingFlow(qinqFlow.flowId, new PingInput())) { + verifyAll(qinqFlow.ping()) { !it.forward !it.reverse it.error == "Flow ${qinqFlow.flowId} should not be one-switch flow" @@ -259,26 +255,19 @@ class QinQFlowSpec extends HealthCheckSpecification { and: "Traffic examination is successful (if possible)" if(!trafficDisclaimer) { def traffExam = traffExamProvider.get() - def examQinQFlow = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(qinqFlow), 1000, 5) - withPool { - [examQinQFlow.forward, examQinQFlow.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def examQinQFlow = qinqFlow.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(examQinQFlow, traffExam) } when: "Delete the flow" - flowHelperV2.deleteFlow(qinqFlow.flowId) + qinqFlow.delete() then: "Flow rules are deleted" Wrappers.wait(RULES_INSTALLATION_TIME, 1) { - assertThat(northbound.getSwitchRules(swPair.src.dpId).flowEntries*.cookie.toArray()) + assertThat(switchRulesFactory.get(swPair.src.dpId).getRules()*.cookie.toArray()) .containsExactlyInAnyOrder(*swPair.src.defaultCookies) } - northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + switchRulesFactory.get(swPair.src.dpId).getRules().findAll { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW }.empty @@ -297,10 +286,10 @@ class QinQFlowSpec extends HealthCheckSpecification { def swP = switchPairs.all().neighbouring().random() when: "Try to create a QinQ flow with incorrect innerVlanId" - def flow = flowHelperV2.randomFlow(swP) - flow.source.innerVlanId = srcInnerVlanId - flow.destination.innerVlanId = dstInnerVlanId - flowHelperV2.addFlow(flow) + flowFactory.getBuilder(swP) + .withSourceInnerVlan(srcInnerVlanId) + .withDestinationInnerVlan(dstInnerVlanId) + .build().create() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -320,19 +309,20 @@ class QinQFlowSpec extends HealthCheckSpecification { def "Flow with innerVlan and vlanId=0 is transformed into a regular vlan flow without innerVlan"() { when: "Create a flow with vlanId=0 and innerVlanId!=0" def swP = switchPairs.all().random() - def flow = flowHelper.randomFlow(swP) - flow.source.vlanId = 0 - flow.source.innerVlanId = 123 - flowHelper.addFlow(flow) + def flowEntity = flowFactory.getBuilder(swP) + .withSourceVlan(0) + .withSourceInnerVlan(123) + .build() + def flow = flowEntity.create() then: "Flow is created but with vlanId!=0 and innerVlanId==0" - with(northbound.getFlow(flow.id)) { - it.source.vlanId == flow.source.innerVlanId + with(flow.retrieveDetails()) { + it.source.vlanId == flowEntity.source.innerVlanId it.source.innerVlanId == 0 } and: "Flow is valid" - northbound.validateFlow(flow.id).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } def "System allow to create/update/delete a protected QinQ flow via APIv1"() { @@ -340,60 +330,61 @@ class QinQFlowSpec extends HealthCheckSpecification { def swP = switchPairs.all().withAtLeastNNonOverlappingPaths(2).random() when: "Create a QinQ flow" - def flow = flowHelper.randomFlow(swP) - flow.source.innerVlanId = 234 - flow.destination.innerVlanId = 432 - flowHelper.addFlow(flow) + def flowEntity = flowFactory.getBuilder(swP) + .withSourceInnerVlan(234) + .withDestinationInnerVlan(432) + .build() + def flow = flowEntity.createV1() then: "Flow is really created with requested innerVlanId" - with(northbound.getFlow(flow.id)) { - it.source.innerVlanId == flow.source.innerVlanId - it.destination.innerVlanId == flow.destination.innerVlanId + with(flow.retrieveDetailsV1()) { + it.source.innerVlanId == flowEntity.source.innerVlanId + it.destination.innerVlanId == flowEntity.destination.innerVlanId } when: "Update the flow(innerVlan/vlanId) via partialUpdate" def newDstVlanId = flow.destination.vlanId + 1 def newDstInnerVlanId = flow.destination.innerVlanId + 1 def updateRequest = new FlowPatchV2( - destination: new FlowPatchEndpoint(innerVlanId: newDstInnerVlanId, vlanId: newDstVlanId) + destination: new FlowPatchEndpoint( + innerVlanId: newDstInnerVlanId, + vlanId: newDstVlanId + ) ) - def response = flowHelperV2.partialUpdate(flow.id, updateRequest) + def response = flow.sendPartialUpdateRequest(updateRequest) then: "Partial update response reflects the changes" + flow.waitForBeingInState(FlowState.UP) response.destination.vlanId == newDstVlanId response.destination.innerVlanId == newDstInnerVlanId and: "Flow is really updated with requested innerVlanId/vlanId" - with(northbound.getFlow(flow.id)) { + with(flow.retrieveDetailsV1()) { it.destination.vlanId == newDstVlanId it.destination.innerVlanId == newDstInnerVlanId } and: "Flow is valid and pingable" - northbound.validateFlow(flow.id).each { assert it.asExpected } - verifyAll(northbound.pingFlow(flow.id, new PingInput())) { + flow.validateAndCollectDiscrepancies().isEmpty() + verifyAll(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } when: "Delete the flow via APIv1" - northbound.deleteFlow(flow.id) - Wrappers.wait(WAIT_OFFSET) { - assert !northbound.getFlowStatus(flow.id) - assert northbound.getFlowHistory(flow.id).find { it.payload.last().action == DELETE_SUCCESS } - } + flow.deleteV1() then: "Flows rules are deleted" [swP.src, swP.dst].each { sw -> Wrappers.wait(RULES_INSTALLATION_TIME, 1) { - assertThat(northbound.getSwitchRules(sw.dpId).flowEntries*.cookie.toArray()) + assertThat(switchRulesFactory.get(sw.dpId).getRules()*.cookie.toArray()) .containsExactlyInAnyOrder(*sw.defaultCookies) } } and: "Shared rule of flow is deleted" [swP.src.dpId, swP.dst.dpId].each { swId -> - assert northbound.getSwitchRules(swId).flowEntries.findAll { + assert switchRulesFactory.get(swId).getRules().findAll { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW }.empty } @@ -404,31 +395,23 @@ class QinQFlowSpec extends HealthCheckSpecification { def swP = switchPairs.all().neighbouring().withTraffgensOnBothEnds().random() when: "Create a QinQ flow" - def flowWithQinQ = flowHelperV2.randomFlow(swP) - flowWithQinQ.source.innerVlanId = 234 - flowWithQinQ.destination.innerVlanId = 432 - flowHelperV2.addFlow(flowWithQinQ) + def flowWithQinQ = flowFactory.getBuilder(swP) + .withSourceInnerVlan(234) + .withDestinationInnerVlan(432) + .build().create() and: "Create a flow without QinQ" - def flowWithoutQinQ = flowHelperV2.randomFlow(swP) - flowWithoutQinQ.source.vlanId = 0 - flowWithoutQinQ.source.innerVlanId = flowWithQinQ.source.vlanId - flowHelperV2.addFlow(flowWithoutQinQ) + def flowWithoutQinQ = flowFactory.getBuilder(swP) + .withSourceVlan(0) + .withSourceInnerVlan(flowWithQinQ.source.vlanId) + .build().create() then: "Both flows allow traffic" def traffExam = traffExamProvider.get() - def examFlowWithtQinQ = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flowWithQinQ), 1000, 5) - def examFlowWithoutQinQ = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flowWithoutQinQ), 1000, 5) - withPool { - [examFlowWithtQinQ.forward, examFlowWithtQinQ.reverse, - examFlowWithoutQinQ.forward, examFlowWithoutQinQ.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def examFlowWithtQinQ = flowWithQinQ.traffExam(traffExam, 1000, 5) + def examFlowWithoutQinQ = flowWithoutQinQ.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(examFlowWithtQinQ, traffExam) + verifyFlowHasBidirectionalTraffic(examFlowWithoutQinQ, traffExam) } def "System detects conflict QinQ flows(oVlan: #conflictVlan, iVlan: #conflictInnerVlanId)"() { @@ -436,17 +419,19 @@ class QinQFlowSpec extends HealthCheckSpecification { def swP = switchPairs.all().neighbouring().random() when: "Create a first flow" - def flow = flowHelperV2.randomFlow(swP) - flow.source.vlanId = vlan - flow.source.innerVlanId = innerVlan - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swP) + .withSourceVlan(vlan) + .withSourceInnerVlan(innerVlan) + .build() + flow.create() and: "Try to create a flow which conflicts(vlan) with first flow" - def conflictFlow = flowHelperV2.randomFlow(swP) - conflictFlow.source.vlanId = conflictVlan - conflictFlow.source.innerVlanId = conflictInnerVlanId - conflictFlow.source.portNumber = flow.source.portNumber - flowHelperV2.addFlow(conflictFlow) + def conflictFlow = flowFactory.getBuilder(swP) + .withSourceVlan(conflictVlan) + .withSourceInnerVlan(conflictInnerVlanId) + .withSourcePort(flow.source.portNumber) + .build() + conflictFlow.create() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) @@ -464,24 +449,23 @@ class QinQFlowSpec extends HealthCheckSpecification { def swP = switchPairs.all().neighbouring().withTraffgensOnBothEnds().random() when: "Create a first QinQ flow" - def flow1 = flowHelperV2.randomFlow(swP) - flow1.source.innerVlanId = 300 - flow1.destination.innerVlanId = 400 - flowHelperV2.addFlow(flow1) + def flow1 = flowFactory.getBuilder(swP) + .withSourceInnerVlan(300) + .withDestinationInnerVlan(400) + .build().create() and: "Create a second QinQ flow" - def flow2 = flowHelperV2.randomFlow(swP) - flow2.source.vlanId = flow1.source.vlanId - flow2.source.innerVlanId = flow1.destination.innerVlanId - flow2.destination.vlanId = flow1.destination.vlanId - flow2.destination.innerVlanId = flow1.source.innerVlanId - flowHelperV2.addFlow(flow2) - + def flow2 = flowFactory.getBuilder(swP) + .withSourceVlan(flow1.source.vlanId) + .withSourceInnerVlan(flow1.destination.innerVlanId) + .withDestinationVlan(flow1.destination.vlanId) + .withDestinationInnerVlan(flow1.source.innerVlanId) + .build().create() then: "Both flow are valid and pingable" - [flow1.flowId, flow2.flowId].each { flowId -> - northbound.validateFlow(flowId).each { assert it.asExpected } - verifyAll(northbound.pingFlow(flowId, new PingInput())) { + [flow1, flow2].each { flow -> + flow.validateAndCollectDiscrepancies().isEmpty() + verifyAll(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -489,36 +473,23 @@ class QinQFlowSpec extends HealthCheckSpecification { and: "Flows allow traffic" def traffExam = traffExamProvider.get() - def exam1 = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flow1), 1000, 5) - def exam2 = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flow2), 1000, 5) - withPool { - [exam1.forward, exam1.reverse, exam2.forward, exam2.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def exam1 = flow1.traffExam(traffExam, 1000, 5) + def exam2 = flow2.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(exam1, traffExam) + verifyFlowHasBidirectionalTraffic(exam2, traffExam) when: "Delete the second flow" - flowHelperV2.deleteFlow(flow2.flowId) + flow2.delete() then: "The first flow is still valid and pingable" - northbound.validateFlow(flow1.flowId).each { assert it.asExpected } - verifyAll(northbound.pingFlow(flow1.flowId, new PingInput())) { + flow1.validateAndCollectDiscrepancies().isEmpty() + verifyAll(flow1.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } and: "The first flow still allows traffic" - withPool { - [exam1.forward, exam1.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + verifyFlowHasBidirectionalTraffic(exam1, traffExam) } def "System allows to create a single-switch-port QinQ flow\ @@ -527,15 +498,15 @@ class QinQFlowSpec extends HealthCheckSpecification { def sw = topology.activeSwitches[0] when: "Create a single switch QinQ flow" - def qinqFlow = flowHelperV2.singleSwitchSinglePortFlow(sw) - qinqFlow.source.vlanId = srcVlanId - qinqFlow.source.innerVlanId = srcInnerVlanId - qinqFlow.destination.vlanId = dstVlanId - qinqFlow.destination.innerVlanId = dstInnerVlanId - def response = flowHelperV2.addFlow(qinqFlow) + def qinqFlow = flowFactory.getBuilder(sw, sw) + .withSourceVlan(srcVlanId) + .withSourceInnerVlan(srcInnerVlanId) + .withDestinationVlan(dstVlanId) + .withDestinationInnerVlan(dstInnerVlanId) + .build().create() then: "Response contains correct info about vlanIds" - with(response) { + with(qinqFlow) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -543,7 +514,7 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is really created with requested vlanIds" - with(northbound.getFlow(qinqFlow.flowId)) { + with(qinqFlow.retrieveDetails()) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -551,20 +522,20 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is valid" - northbound.validateFlow(qinqFlow.flowId).each { assert it.asExpected } + qinqFlow.validateAndCollectDiscrepancies().isEmpty() and: "Involved switches pass switch validation" - switchHelper.synchronizeAndCollectFixedDiscrepancies( - pathHelper.getInvolvedSwitches( - pathHelper.convert(northbound.getFlowPath(qinqFlow.flowId)))*.getDpId()).isEmpty() + def involvedSwitches = qinqFlow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitches).isEmpty() when: "Delete the flow" - flowHelperV2.deleteFlow(qinqFlow.flowId) + qinqFlow.delete() then: "Flow rules are deleted" + def singleSw = topology.getActiveSwitches().find { it.dpId == sw.dpId } Wrappers.wait(RULES_INSTALLATION_TIME, 1) { - assertThat(northbound.getSwitchRules(sw.dpId).flowEntries*.cookie.toArray()).as(sw.dpId.toString()) - .containsExactlyInAnyOrder(*sw.defaultCookies) + assertThat(switchRulesFactory.get(singleSw.dpId).getRules()*.cookie.toArray()).as(singleSw.dpId.toString()) + .containsExactlyInAnyOrder(*singleSw.defaultCookies) } where: @@ -581,13 +552,14 @@ class QinQFlowSpec extends HealthCheckSpecification { def "System allows to manipulate with QinQ vxlan flow\ [srcVlan:#srcVlanId, srcInnerVlan:#srcInnerVlanId, dstVlan:#dstVlanId, dstInnerVlan:#dstInnerVlanId, sw:#swPair.hwSwString()]#trafficDisclaimer"() { when: "Create QinQ vxlan flow" - def qinqFlow = flowHelperV2.randomFlow(swPair) - qinqFlow.encapsulationType = FlowEncapsulationType.VXLAN - qinqFlow.source.vlanId = srcVlanId - qinqFlow.source.innerVlanId = srcInnerVlanId - qinqFlow.destination.vlanId = dstVlanId - qinqFlow.destination.innerVlanId = dstInnerVlanId - def response = flowHelperV2.addFlow(qinqFlow) + def qinqFlow = flowFactory.getBuilder(swPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .withSourceVlan(srcVlanId) + .withSourceInnerVlan(srcInnerVlanId) + .withDestinationVlan(dstVlanId) + .withDestinationInnerVlan(dstInnerVlanId) + .build() + def response = qinqFlow.create() then: "Response contains correct info about vlanIds" /** System doesn't allow to create a flow with innerVlan and without vlan at the same time. @@ -603,7 +575,7 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is really created with requested vlanIds" - with(northbound.getFlow(qinqFlow.flowId)) { + with(qinqFlow.retrieveDetails()) { it.source.vlanId == srcVlanId it.source.innerVlanId == srcInnerVlanId it.destination.vlanId == dstVlanId @@ -611,8 +583,8 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is valid and pingable" - northbound.validateFlow(qinqFlow.flowId).each { assert it.asExpected } - verifyAll(northbound.pingFlow(qinqFlow.flowId, new PingInput())) { + qinqFlow.validateAndCollectDiscrepancies().isEmpty() + verifyAll(qinqFlow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -621,45 +593,36 @@ class QinQFlowSpec extends HealthCheckSpecification { def traffExam = traffExamProvider.get() def examQinQFlow if(!trafficDisclaimer) { - examQinQFlow = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(qinqFlow), 1000, 5) - withPool { - [examQinQFlow.forward, examQinQFlow.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + examQinQFlow = qinqFlow.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(examQinQFlow, traffExam) } and: "Involved switches pass switch validation" - def involvedSwitchesFlow1 = pathHelper.getInvolvedSwitches( - pathHelper.convert(northbound.getFlowPath(qinqFlow.flowId)) - ) - switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesFlow1*.getDpId()).isEmpty() + def involvedSwitchesFlow1 = qinqFlow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesFlow1).isEmpty() when: "Create a vlan flow on the same port as QinQ flow" - def vlanFlow = flowHelper.randomFlow(swPair).tap { - it.source.portNumber = qinqFlow.source.portNumber - it.source.vlanId = qinqFlow.source.vlanId + 1 - it.destination.portNumber = qinqFlow.destination.portNumber - it.destination.vlanId = qinqFlow.destination.vlanId + 1 - } - flowHelperV2.addFlow(vlanFlow) + def vlanFlow = flowFactory.getBuilder(swPair) + .withSourcePort(qinqFlow.source.portNumber) + .withSourceVlan(qinqFlow.source.vlanId + 1) + .withDestinationPort(qinqFlow.destination.portNumber) + .withDestinationVlan(qinqFlow.destination.vlanId + 1) + .build() + vlanFlow.create() then: "Both existing flows are valid" - [qinqFlow.flowId, vlanFlow.id].each { - northbound.validateFlow(it).each { assert it.asExpected } + [qinqFlow, vlanFlow].each { + it.validateAndCollectDiscrepancies().isEmpty() } and: "Involved switches pass switch validation" - def involvedSwitchesFlow2 = pathHelper.getInvolvedSwitches(pathHelper.convert(northbound.getFlowPath(vlanFlow.id))) - def involvedSwitchesforBothFlows = (involvedSwitchesFlow1 + involvedSwitchesFlow2).unique { it.dpId } - switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesforBothFlows*.getDpId()).isEmpty() + def involvedSwitchesFlow2 = vlanFlow.retrieveAllEntityPaths().getInvolvedSwitches() + def involvedSwitchesforBothFlows = (involvedSwitchesFlow1 + involvedSwitchesFlow2).unique() + switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitchesforBothFlows).isEmpty() and: "Both flows are pingable" - [qinqFlow.flowId, vlanFlow.id].each { - verifyAll(northbound.pingFlow(it, new PingInput())) { + [qinqFlow, vlanFlow].each { + verifyAll(it.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -667,25 +630,19 @@ class QinQFlowSpec extends HealthCheckSpecification { then: "Both flows allow traffic" if(!trafficDisclaimer) { - def examSimpleFlow = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(vlanFlow, 1000, 5) - withPool { - [examQinQFlow.forward, examQinQFlow.reverse, examSimpleFlow.forward, examSimpleFlow.reverse] - .eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def examSimpleFlow = vlanFlow.traffExam(traffExam, 1000, 5) + verifyFlowHasBidirectionalTraffic(examQinQFlow, traffExam) + verifyFlowHasBidirectionalTraffic(examSimpleFlow, traffExam) } when: "Update the QinQ flow(outer/inner vlans)" - def updateResponse = flowHelperV2.updateFlow(qinqFlow.flowId, qinqFlow.tap { - qinqFlow.source.vlanId = vlanFlow.source.vlanId - qinqFlow.source.innerVlanId = vlanFlow.destination.vlanId - qinqFlow.destination.vlanId = vlanFlow.destination.vlanId - qinqFlow.destination.innerVlanId = vlanFlow.source.vlanId - }) + def updateRequest = qinqFlow.tap { + it.source.vlanId = vlanFlow.source.vlanId + it.source.innerVlanId = vlanFlow.destination.vlanId + it.destination.vlanId = vlanFlow.destination.vlanId + it.destination.innerVlanId = vlanFlow.source.vlanId + } + def updateResponse = qinqFlow.update(updateRequest) then: "Update response contains correct info about innerVlanIds" with(updateResponse) { @@ -696,7 +653,7 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is really updated" - with(northbound.getFlow(qinqFlow.flowId)) { + with(qinqFlow.retrieveDetails()) { it.source.vlanId == vlanFlow.source.vlanId it.source.innerVlanId == vlanFlow.destination.vlanId it.destination.vlanId == vlanFlow.destination.vlanId @@ -704,31 +661,32 @@ class QinQFlowSpec extends HealthCheckSpecification { } then: "Both existing flows are still valid and pingable" - [qinqFlow.flowId, vlanFlow.id].each { - northbound.validateFlow(it).each { assert it.asExpected } + [qinqFlow, vlanFlow].each { + it.validateAndCollectDiscrepancies().isEmpty() } - [qinqFlow.flowId, vlanFlow.id].each { - verifyAll(northbound.pingFlow(it, new PingInput())) { + [qinqFlow, vlanFlow].each { + verifyAll(it.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } } when: "Delete the flows" - [qinqFlow.flowId, vlanFlow.id].each { flowHelperV2.deleteFlow(it) } + [qinqFlow, vlanFlow].each { it.delete() } then: "Flows rules are deleted" - involvedSwitchesforBothFlows.each { sw -> + involvedSwitchesforBothFlows.each { swId -> + def sw = topology.getActiveSwitches().find { it.dpId == swId } Wrappers.wait(RULES_INSTALLATION_TIME, 1) { - assertThat(northbound.getSwitchRules(sw.dpId).flowEntries*.cookie.toArray()).as(sw.dpId.toString()) + assertThat(switchRulesFactory.get(swId).getRules()*.cookie.toArray()).as(swId.toString()) .containsExactlyInAnyOrder(*sw.defaultCookies) } } and: "Shared rule of flow is deleted" [swPair.src.dpId, swPair.dst.dpId].each { swId -> - assert northbound.getSwitchRules(swId).flowEntries.findAll { + assert switchRulesFactory.get(swId).getRules().findAll { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW }.empty } @@ -750,11 +708,11 @@ class QinQFlowSpec extends HealthCheckSpecification { def swP = switchPairs.all().neighbouring().withTraffgensOnBothEnds().random() and: "A QinQ flow on the given switches" - def flow = flowHelperV2.randomFlow(swP) - flow.maximumBandwidth = 100 - flow.source.innerVlanId = 600 - flow.destination.innerVlanId = 700 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swP) + .withBandwidth(100) + .withSourceInnerVlan(600) + .withDestinationInnerVlan(700) + .build().create() when: "Delete all flow rules(ingress/egress/shared) on the src switch" switchHelper.deleteSwitchRules(swP.src.dpId, DeleteRulesAction.DROP_ALL_ADD_DEFAULTS) @@ -767,19 +725,12 @@ class QinQFlowSpec extends HealthCheckSpecification { } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "The flow allows traffic" def traffExam = traffExamProvider.get() - def examFlow = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flow), 100, 5) - withPool { - [examFlow.forward, examFlow.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def examFlow = flow.traffExam(traffExam, 100, 5) + verifyFlowHasBidirectionalTraffic(examFlow, traffExam) } def "System doesn't rebuild flow path to more preferable path while updating innerVlanId"() { @@ -790,52 +741,46 @@ class QinQFlowSpec extends HealthCheckSpecification { .random() and: "A flow" - def flow = flowHelperV2.randomFlow(switchPair) - flow.source.innerVlanId = flow.source.vlanId - flow.destination.innerVlanId = flow.destination.vlanId - flowHelperV2.addFlow(flow) + def flowEntity = flowFactory.getBuilder(switchPair).build().tap { + it.source.innerVlanId = it.source.vlanId + it.destination.innerVlanId = it.destination.vlanId + } + def flow = flowEntity.create() when: "Make the current path less preferable than alternatives" - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) + def currentPath = flow.retrieveAllEntityPaths().getPathNodes() def alternativePaths = switchPair.paths.findAll { it != currentPath } alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } and: "Update the flow: port number and vlanId on the src/dst endpoints" - def updatedFlow = flow.jacksonCopy().tap { + def updatedFlow = flow.deepCopy().tap { it.source.innerVlanId = flow.destination.vlanId it.destination.innerVlanId = flow.source.vlanId } - flowHelperV2.updateFlow(flow.flowId, updatedFlow) + flow.update(updatedFlow) then: "Flow is really updated" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.source.innerVlanId == updatedFlow.source.innerVlanId it.destination.innerVlanId == updatedFlow.destination.innerVlanId } and: "Flow is not rerouted" Wrappers.timedLoop(rerouteDelay + WAIT_OFFSET / 2) { - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + assert flow.retrieveAllEntityPaths().getPathNodes() == currentPath } and: "System allows traffic on the flow" def traffExam = traffExamProvider.get() - def examFlow = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam( - flowHelperV2.toV1(updatedFlow), 100, 5 - ) - withPool { - [examFlow.forward, examFlow.reverse].eachParallel { direction -> - def resources = traffExam.startExam(direction) - direction.setResources(resources) - assert traffExam.waitExam(direction).hasTraffic() - } - } + def examFlow = updatedFlow.traffExam(traffExam, 100, 5) + verifyFlowHasBidirectionalTraffic(examFlow, traffExam) and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "All involved switches pass switch validation" - switchHelper.synchronizeAndCollectFixedDiscrepancies(currentPath*.switchId).isEmpty() + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + switchHelper.synchronizeAndCollectFixedDiscrepancies(involvedSwitches).isEmpty() } @Memoized @@ -868,4 +813,14 @@ class QinQFlowSpec extends HealthCheckSpecification { } return result } + + def verifyFlowHasBidirectionalTraffic(FlowBidirectionalExam examFlow, TraffExamService traffExam) { + withPool { + [examFlow.forward, examFlow.reverse].eachParallel { direction -> + def resources = traffExam.startExam(direction) + direction.setResources(resources) + assert traffExam.waitExam(direction).hasTraffic() + } + } + } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/SwapEndpointSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/SwapEndpointSpec.groovy index b565ffbb35b..01fcf00027e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/SwapEndpointSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/SwapEndpointSpec.groovy @@ -1,191 +1,194 @@ package org.openkilda.functionaltests.spec.flows -import com.fasterxml.jackson.databind.ObjectMapper +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE +import static org.openkilda.functionaltests.helpers.model.FlowActionType.REROUTE_FAILED +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowEndpointsNotSwappedExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.helpers.model.SwitchPair -import org.openkilda.messaging.error.MessageError -import org.openkilda.messaging.payload.flow.FlowCreatePayload -import org.openkilda.messaging.payload.flow.FlowEndpointPayload +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.SwitchId import org.openkilda.model.SwitchStatus import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowLoopPayload +import org.openkilda.northbound.dto.v2.flows.SwapFlowEndpointPayload import org.openkilda.northbound.dto.v2.flows.SwapFlowPayload import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder +import org.openkilda.testing.tools.SoftAssertions import org.springframework.beans.factory.annotation.Autowired +import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException import org.springframework.web.client.HttpServerErrorException import spock.lang.Ignore -import javax.inject.Provider -import static groovyx.gpars.GParsPool.withPool -import static org.junit.jupiter.api.Assumptions.assumeTrue +import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.testing.Constants.FLOW_CRUD_TIMEOUT -import static org.openkilda.testing.Constants.NON_EXISTENT_FLOW_ID -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW +import javax.inject.Provider class SwapEndpointSpec extends HealthCheckSpecification { + //Kilda allows user to pass reserved VLAN IDs 1 and 4095 if they want. + static final IntRange KILDA_ALLOWED_VLANS = 1..4095 + + @Autowired + @Shared + FlowFactory flowFactory + + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory @Autowired Provider traffExamProvider def "Able to swap endpoints(#data.description)"() { given: "Some flows in the system according to preconditions" - flows.each { flowHelper.addFlow(it) } + flows.each { it.create() } when: "Try to swap endpoints with #data.description" def response = northbound.swapFlowEndpoint(firstSwap, secondSwap) then: "Endpoints are successfully swapped" verifyEndpoints(response, firstSwap.source, firstSwap.destination, secondSwap.source, secondSwap.destination) - verifyEndpoints(firstSwap.flowId, secondSwap.flowId, firstSwap.source, firstSwap.destination, - secondSwap.source, secondSwap.destination) + + verifyEndpoints(flows.find { it.flowId == firstSwap.flowId }, + flows.find { it.flowId == secondSwap.flowId }, + firstSwap.source, firstSwap.destination, secondSwap.source, secondSwap.destination) and: "Flows validation doesn't show any rule discrepancies" Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { - flows.each { - assert northbound.validateFlow(it.id).each { direction -> assert direction.asExpected } + flows.each { flowExtended -> + assert flowExtended.validateAndCollectDiscrepancies().isEmpty() } } and: "Switch validation doesn't show any missing/excess rules and meters" - List involvedSwitches = flows.collectMany { - [it.source.datapath, it.destination.datapath].collect { findSw(it) } + List involvedSwitches = flows.collectMany { flowExtended -> + flowExtended.retrieveAllEntityPaths().getInvolvedSwitches() }.unique() - validateSwitches(involvedSwitches) + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(involvedSwitches).isEmpty() + } where: data << [ [description: "no vlan vs vlan on the same port on src switch"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair) - flow1.source.portNumber = getFreePort(switchPair.src, [switchPair.dst]) - flow1.source.vlanId = 0 - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) - flow2.source.portNumber = flow1.source.portNumber + def flow1 = getFlowFactory().getBuilder(switchPair) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst])) + .withSourceVlan(0).build() + def flow2 = getFlowFactory().getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withSourcePort(flow1.source.portNumber).build() it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow1.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, getFlowHelper().toFlowEndpointV2(flow1.source), - getFlowHelper().toFlowEndpointV2(flow2.destination)) + it.firstSwap = new SwapFlowPayload(flow1.flowId, flow2.source, flow1.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, flow1.source, flow2.destination) }, [description: "same port, swap vlans on dst switch + third idle novlan flow on that port"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair) - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) - flow1.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src]) - flow2.destination.portNumber = flow1.destination.portNumber - flow2.destination.vlanId = getFreeVlan(flow2.destination.datapath, [flow1]) - def flow3 = getFlowHelper().randomFlow(switchPair, false, [flow1, flow2]) - flow3.destination.portNumber = flow1.destination.portNumber - flow3.destination.vlanId = 0 + def flow1 = getFlowFactory().getBuilder(switchPair) + .withDestinationPort(getFreePort(switchPair.dst, [switchPair.src])).build() + List busyEndpoints = flow1.occupiedEndpoints() + def flow2 = getFlowFactory().getBuilder(switchPair, false, busyEndpoints) + .withDestinationPort(flow1.destination.portNumber) + .build() + flow2.destination.vlanId = getFreeVlan(flow2.destination.switchId, busyEndpoints) + busyEndpoints.addAll(flow2.occupiedEndpoints()) + def flow3 = getFlowFactory().getBuilder(switchPair, false, busyEndpoints) + .withDestinationPort(flow1.destination.portNumber) + .withDestinationVlan(0).build() it.flows = [flow1, flow2, flow3] - it.firstSwap = new SwapFlowPayload(flow1.id, getFlowHelper().toFlowEndpointV2(flow1.source), - getFlowHelper().toFlowEndpointV2(flow2.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow1.destination)) + + it.firstSwap = new SwapFlowPayload(flow1.flowId, flow1.source, flow2.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, flow2.source, flow1.destination) }, [description: "vlan on src1 <-> vlan on dst2, same port numbers"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair) - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) - flow1.source.portNumber = getFreePort(switchPair.src, [switchPair.dst]) - flow2.destination.portNumber = flow1.source.portNumber + def flow1 = getFlowFactory().getBuilder(switchPair) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst])).build() + def flow2 = getFlowFactory().getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withDestinationPort(flow1.source.portNumber).build() it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, - getFlowHelper().toFlowEndpointV2(flow1.source).tap { it.vlanId = flow2.destination.vlanId }, - getFlowHelper().toFlowEndpointV2(flow1.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, - getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow2.destination).tap { it.vlanId = flow1.source.vlanId }) + it.firstSwap = new SwapFlowPayload(flow1.flowId, + flow1.source.tap { it.vlanId = flow2.destination.vlanId }, + flow1.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, + flow2.source, + flow2.destination.tap { it.vlanId = flow1.source.vlanId }) }, [description: "port on dst1 <-> port on src2, vlans are equal"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair, false) - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) + def flow1 = getFlowFactory().getBuilder(switchPair, false).build() + def flow2 = getFlowFactory().getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withSourceVlan(flow1.source.vlanId) + .build() flow1.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src], [flow1.source.portNumber, flow2.source.portNumber]) flow2.source.portNumber = getFreePort(switchPair.src, [switchPair.dst], [flow2.destination.portNumber, flow1.source.portNumber]) - flow2.source.vlanId = flow1.source.vlanId it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, - getFlowHelper().toFlowEndpointV2(flow1.source), - getFlowHelper().toFlowEndpointV2(flow1.destination) - .tap { it.portNumber = flow2.source.portNumber }) - it.secondSwap = new SwapFlowPayload(flow2.id, - getFlowHelper().toFlowEndpointV2(flow2.source) - .tap { it.portNumber = flow1.destination.portNumber }, - getFlowHelper().toFlowEndpointV2(flow2.destination)) + it.firstSwap = new SwapFlowPayload(flow1.flowId, + flow1.source, + flow1.destination.tap { it.portNumber = flow2.source.portNumber }) + it.secondSwap = new SwapFlowPayload(flow2.flowId, + flow2.source.tap { it.portNumber = flow1.destination.portNumber }, + flow2.destination) }, [description: "switch on src1 <-> switch on dst2, other params random"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair) - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) - flow1.source.portNumber = getFreePort(switchPair.src, [switchPair.dst]) - flow2.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src]) + def flow1 = getFlowFactory().getBuilder(switchPair) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst])).build() + def flow2 = getFlowFactory().getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withDestinationPort(getFreePort(switchPair.dst, [switchPair.src])).build() it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, - getFlowHelper().toFlowEndpointV2(flow1.source) - .tap { it.switchId = flow2.destination.datapath }, - getFlowHelper().toFlowEndpointV2(flow1.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, - getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow2.destination) - .tap { it.switchId = flow1.source.datapath }) + it.firstSwap = new SwapFlowPayload(flow1.flowId, + flow1.source.tap { it.switchId = flow2.destination.switchId }, + flow1.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, + flow2.source, + flow2.destination.tap { it.switchId = flow1.source.switchId }) }, [description: "both endpoints swap, same switches"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair) - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) - flow1.source.portNumber = getFreePort(switchPair.src, [switchPair.dst]) - flow1.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src]) - flow2.source.portNumber = getFreePort(switchPair.src, [switchPair.dst]) - flow2.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src]) + def flow1 = getFlowFactory().getBuilder(switchPair) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst])) + .withDestinationPort(getFreePort(switchPair.dst, [switchPair.src])).build() + def flow2 = getFlowFactory().getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst])) + .withDestinationPort(getFreePort(switchPair.dst, [switchPair.src])).build() it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, - getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow2.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, - getFlowHelper().toFlowEndpointV2(flow1.source), - getFlowHelper().toFlowEndpointV2(flow1.destination)) + it.firstSwap = new SwapFlowPayload(flow1.flowId, flow2.source, flow2.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, flow1.source, flow1.destination) }, [description: "endpoints src1 <-> dst2, same switches"].tap { def switchPair = getSwitchPairs().all().nonNeighbouring().random() - def flow1 = getFlowHelper().randomFlow(switchPair) - def flow2 = getFlowHelper().randomFlow(switchPair, false, [flow1]) - flow1.source.portNumber = getFreePort(switchPair.src, [switchPair.dst]) - flow1.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src]) - flow2.source.portNumber = getFreePort(switchPair.src, [switchPair.dst], [flow1.source.portNumber]) - flow2.destination.portNumber = getFreePort(switchPair.dst, [switchPair.src], [flow1.destination.portNumber]) - flow1.source.vlanId = getFreeVlan(flow2.destination.datapath, [flow2]) - flow2.destination.vlanId = getFreeVlan(flow1.destination.datapath, [flow1]) + def flow1 = getFlowFactory().getBuilder(switchPair) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst])) + .withDestinationPort(getFreePort(switchPair.dst, [switchPair.src])).build() + def flow2 = getFlowFactory().getBuilder(switchPair, false, flow1.occupiedEndpoints()) + .withSourcePort(getFreePort(switchPair.src, [switchPair.dst], [flow1.source.portNumber])) + .withDestinationPort(getFreePort(switchPair.dst, [switchPair.src], [flow1.destination.portNumber])).build() + flow1.source.vlanId = getFreeVlan(flow2.destination.switchId, flow2.occupiedEndpoints()) + flow2.destination.vlanId = getFreeVlan(flow1.destination.switchId, flow1.occupiedEndpoints()) it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, - getFlowHelper().toFlowEndpointV2(flow2.destination), - getFlowHelper().toFlowEndpointV2(flow1.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, - getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow1.source)) + it.firstSwap = new SwapFlowPayload(flow1.flowId, flow2.destination, flow1.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, flow2.source, flow1.source) }, [description: "endpoints src1 <-> src2, different src switches, same dst"].tap { List swPairs = getSwitchPairs().all().nonNeighbouring().getSwitchPairs() @@ -195,20 +198,16 @@ class SwapEndpointSpec extends HealthCheckSpecification { if (halfDifferent) result = [switchPair, halfDifferent] return result } - def flow1 = getFlowHelper().randomFlow(swPairs[0]) - def flow2 = getFlowHelper().randomFlow(swPairs[1], false, [flow1]) - flow1.source.portNumber = getFreePort(swPairs[0].src, [swPairs[1].src]) - flow2.source.portNumber = getFreePort(swPairs[1].src, [swPairs[0].src]) + def flow1 = getFlowFactory().getBuilder(swPairs[0]) + .withSourcePort(getFreePort(swPairs[0].src, [swPairs[1].src])).build() + def flow2 = getFlowFactory().getBuilder(swPairs[1], false, flow1.occupiedEndpoints()) + .withSourcePort(getFreePort(swPairs[1].src, [swPairs[0].src])).build() it.flows = [flow1, flow2] - it.firstSwap = new SwapFlowPayload(flow1.id, - getFlowHelper().toFlowEndpointV2(flow2.source), - getFlowHelper().toFlowEndpointV2(flow1.destination)) - it.secondSwap = new SwapFlowPayload(flow2.id, - getFlowHelper().toFlowEndpointV2(flow1.source), - getFlowHelper().toFlowEndpointV2(flow2.destination)) + it.firstSwap = new SwapFlowPayload(flow1.flowId, flow2.source, flow1.destination) + it.secondSwap = new SwapFlowPayload(flow2.flowId, flow1.source, flow2.destination) } ] - flows = data.flows as List + flows = data.flows as List firstSwap = data.firstSwap as SwapFlowPayload secondSwap = data.secondSwap as SwapFlowPayload } @@ -217,57 +216,60 @@ class SwapEndpointSpec extends HealthCheckSpecification { def "Able to swap #data.endpointsPart (src1 <-> dst2, dst1 <-> src2) for two flows with the same source and different destination \ switches"() { given: "Two flows with the same source and different destination switches" - flowHelper.addFlow(data.flow1) - flowHelper.addFlow(data.flow2) + FlowExtended flow = data.flow1.create() + FlowExtended additionalFlow = data.flow2.create() when: "Try to swap #endpointsPart for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(data.flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(data.flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(additionalFlow.flowId, data.flow2Src, data.flow2Dst)) then: "#endpointsPart.capitalize() are successfully swapped" verifyEndpoints(response, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) - verifyEndpoints(data.flow1.id, data.flow2.id, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + + verifyEndpoints(flow, additionalFlow, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(data.flow1, data.flow2) + flow.validateAndCollectDiscrepancies().isEmpty() + additionalFlow.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(data.switchPairs[0]) - validateSwitches(data.switchPairs[1]) + List switches = [flow, additionalFlow].collectMany{it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: data << [{ it.endpointsPart = "vlans" - it.flow1Src = changePropertyValue(it.flow1.source, "vlanId", it.flow2.destination.vlanId) - it.flow1Dst = changePropertyValue(it.flow1.destination, "vlanId", it.flow2.source.vlanId) - it.flow2Src = changePropertyValue(it.flow2.source, "vlanId", it.flow1.destination.vlanId) - it.flow2Dst = changePropertyValue(it.flow2.destination, "vlanId", it.flow1.source.vlanId) + it.flow1Src = it.flow1.source.jacksonCopy().tap { endpoint -> endpoint.vlanId = it.flow2.destination.vlanId } + it.flow1Dst = it.flow1.destination.jacksonCopy().tap { endpoint -> endpoint.vlanId = it.flow2.source.vlanId } + it.flow2Src = it.flow2.source.jacksonCopy().tap { endpoint -> endpoint.vlanId = it.flow1.destination.vlanId } + it.flow2Dst = it.flow2.destination.jacksonCopy().tap { endpoint -> endpoint.vlanId = it.flow1.source.vlanId } }, { it.endpointsPart = "ports" - it.flow1Src = changePropertyValue(it.flow1.source, "portNumber", it.flow2.destination.portNumber) - it.flow1Dst = changePropertyValue(it.flow1.destination, "portNumber", it.flow2.source.portNumber) - it.flow2Src = changePropertyValue(it.flow2.source, "portNumber", it.flow1.destination.portNumber) - it.flow2Dst = changePropertyValue(it.flow2.destination, "portNumber", it.flow1.source.portNumber) + it.flow1Src = it.flow1.source.jacksonCopy().tap { endpoint -> endpoint.portNumber = it.flow2.destination.portNumber } + it.flow1Dst = it.flow1.destination.jacksonCopy().tap { endpoint -> endpoint.portNumber = it.flow2.source.portNumber } + it.flow2Src = it.flow2.source.jacksonCopy().tap { endpoint -> endpoint.portNumber = it.flow1.destination.portNumber } + it.flow2Dst = it.flow2.destination.jacksonCopy().tap { endpoint -> endpoint.portNumber = it.flow1.source.portNumber } }, { it.endpointsPart = "switches" - it.flow1Src = changePropertyValue(it.flow1.source, "datapath", it.flow2.destination.datapath) - it.flow1Dst = changePropertyValue(it.flow1.destination, "datapath", it.flow2.source.datapath) - it.flow2Src = changePropertyValue(it.flow2.source, "datapath", it.flow1.destination.datapath) - it.flow2Dst = changePropertyValue(it.flow2.destination, "datapath", it.flow1.source.datapath) - }].collect { iterationData -> + it.flow1Src = it.flow1.source.jacksonCopy().tap { endpoint -> endpoint.switchId = it.flow2.destination.switchId } + it.flow1Dst = it.flow1.destination.jacksonCopy().tap { endpoint -> endpoint.switchId = it.flow2.source.switchId } + it.flow2Src = it.flow2.source.jacksonCopy().tap { endpoint -> endpoint.switchId = it.flow1.destination.switchId } + it.flow2Dst = it.flow2.destination.jacksonCopy().tap { endpoint -> endpoint.switchId = it.flow1.source.switchId } + } + ].collect { iterationData -> def switchPairs = getSwitchPairs().all().nonNeighbouring().getSwitchPairs().inject(null) { result, switchPair -> if (result) return result def halfDifferent = getHalfDifferentNotNeighboringSwitchPair(switchPair, "src") if (halfDifferent) result = [switchPair, halfDifferent] return result } - def flow1 = getFirstFlow(switchPairs?.get(0), switchPairs?.get(1)) - def flow2 = getSecondFlow(switchPairs?.get(0), switchPairs?.get(1), flow1) + FlowExtended flow1 = getFirstFlow(switchPairs?.get(0), switchPairs?.get(1)) + FlowExtended flow2 = getSecondFlow(switchPairs?.get(0), switchPairs?.get(1), flow1) [switchPairs: switchPairs, flow1: flow1, flow2: flow2].tap(iterationData) } } @@ -276,26 +278,29 @@ switches"() { def "Able to swap endpoints (#data.description) for two flows with the same source and different destination \ switches"() { given: "Two flows with the same source and different destination switches" - flowHelper.addFlow(data.flow1) - flowHelper.addFlow(data.flow2) + FlowExtended flow = data.flow1.create() + FlowExtended additionalFlow = data.flow2.create() when: "Try to swap endpoints for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(data.flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(data.flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(additionalFlow.flowId,data.flow2Src, data.flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) - verifyEndpoints(data.flow1.id, data.flow2.id, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + verifyEndpoints(flow, additionalFlow, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + and: "Flows validation doesn't show any discrepancies" - validateFlows(data.flow1, data.flow2) + flow.validateAndCollectDiscrepancies().isEmpty() + additionalFlow.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(data.switchPairs[0]) - validateSwitches(data.switchPairs[1]) + List switches = [flow, additionalFlow].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } + where: data << [{ @@ -342,30 +347,31 @@ switches"() { def "Able to swap #endpointsPart (#description) for two flows with different source and the same destination \ switches"() { given: "Two flows with different source and the same destination switches" - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + FlowExtended flow = flow1.create() + FlowExtended additionalFlow = flow2.create() when: "Try to swap #endpointsPart for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1Src), - flowHelper.toFlowEndpointV2(flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2Src), - flowHelper.toFlowEndpointV2(flow2Dst))) + new SwapFlowPayload(flow.flowId, flow1Src, flow1Dst), + new SwapFlowPayload(additionalFlow.flowId, flow2Src, flow2Dst)) then: "#endpointsPart.capitalize() are successfully swapped" verifyEndpoints(response, flow1Src, flow1Dst, flow2Src, flow2Dst) - verifyEndpoints(flow1.id, flow2.id, flow1Src, flow1Dst, flow2Src, flow2Dst) + verifyEndpoints(flow, additionalFlow, flow1Src, flow1Dst, flow2Src, flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow.validateAndCollectDiscrepancies().isEmpty() + additionalFlow.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(swPairs[0]) - validateSwitches(swPairs[1]) + List switches = [flow, additionalFlow].collectMany{it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: endpointsPart << ["vlans", "ports", "switches"] - proprtyName << ["vlanId", "portNumber", "datapath"] + proprtyName << ["vlanId", "portNumber", "switchId"] description = "src1 <-> dst2, dst1 <-> src2" swPairs = switchPairs.all().nonNeighbouring().getSwitchPairs().inject(null) { result, switchPair -> if (result) return result @@ -375,80 +381,85 @@ switches"() { } flow1 = getFirstFlow(swPairs?.get(0), swPairs?.get(1)) flow2 = getSecondFlow(swPairs?.get(0), swPairs?.get(1), flow1) - flow1Src = changePropertyValue(flow1.source, proprtyName, flow2.destination."$proprtyName") - flow1Dst = changePropertyValue(flow1.destination, proprtyName, flow2.source."$proprtyName") - flow2Src = changePropertyValue(flow2.source, proprtyName, flow1.destination."$proprtyName") - flow2Dst = changePropertyValue(flow2.destination, proprtyName, flow1.source."$proprtyName") + flow1Src = flow1.source.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow2.destination."$proprtyName" } + flow1Dst = flow1.destination.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow2.source."$proprtyName" } + flow2Src = flow2.source.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow1.destination."$proprtyName" } + flow2Dst = flow2.destination.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow1.source."$proprtyName" } + } @IterationTag(tags = [LOW_PRIORITY], iterationNameRegex = /dst1/) def "Able to swap #endpointsPart (#description) for two flows with different source and destination switches"() { given: "Two flows with different source and destination switches" - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + FlowExtended flow = flow1.create() + FlowExtended additionalFlow = flow2.create() when: "Try to swap #endpointsPart for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1Src), - flowHelper.toFlowEndpointV2(flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2Src), - flowHelper.toFlowEndpointV2(flow2Dst))) + new SwapFlowPayload(flow.flowId, flow1Src, flow1Dst), + new SwapFlowPayload(additionalFlow.flowId, flow2Src, flow2Dst)) then: "#endpointsPart.capitalize() are successfully swapped" verifyEndpoints(response, flow1Src, flow1Dst, flow2Src, flow2Dst) - verifyEndpoints(flow1.id, flow2.id, flow1Src, flow1Dst, flow2Src, flow2Dst) + verifyEndpoints(flow, additionalFlow, flow1Src, flow1Dst, flow2Src, flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow.validateAndCollectDiscrepancies().isEmpty() + additionalFlow.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(flow1SwitchPair) - validateSwitches(flow2SwitchPair) + List switches = [flow, additionalFlow].collectMany{it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: endpointsPart << ["vlans", "ports", "switches"] - proprtyName << ["vlanId", "portNumber", "datapath"] + proprtyName << ["vlanId", "portNumber", "switchId"] description = "src1 <-> dst2, dst1 <-> src2" flow1SwitchPair = switchPairs.all().nonNeighbouring().random() flow2SwitchPair = getDifferentNotNeighboringSwitchPair(flow1SwitchPair) flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - flow1Src = changePropertyValue(flow1.source, proprtyName, flow2.destination."$proprtyName") - flow1Dst = changePropertyValue(flow1.destination, proprtyName, flow2.source."$proprtyName") - flow2Src = changePropertyValue(flow2.source, proprtyName, flow1.destination."$proprtyName") - flow2Dst = changePropertyValue(flow2.destination, proprtyName, flow1.source."$proprtyName") + + flow1Src = flow1.source.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow2.destination."$proprtyName" } + flow1Dst = flow1.destination.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow2.source."$proprtyName" } + flow2Src = flow2.source.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow1.destination."$proprtyName" } + flow2Dst = flow2.destination.jacksonCopy().tap { endpoint -> endpoint."$proprtyName" = flow1.source."$proprtyName" } } @Tags(LOW_PRIORITY) def "Able to swap endpoints (#data.description) for two flows with different source and destination switches"() { given: "Two flows with different source and destination switches" - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + FlowExtended flow = flow1.create() + FlowExtended additionalFlow = flow2.create() when: "Try to swap endpoints for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(additionalFlow.flowId, data.flow2Src, data.flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) - verifyEndpoints(flow1.id, flow2.id, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + verifyEndpoints(flow, additionalFlow, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow.validateAndCollectDiscrepancies().isEmpty() + additionalFlow.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(flow1SwitchPair) - validateSwitches(flow2SwitchPair) + List switches = [flow, additionalFlow].collectMany{it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: data << [{ it.description = "src1 <-> src2" - it.flow2 = changePropertyValue( - changePropertyValue(getFlowHelper().randomFlow(it.flow2SwitchPair), "source", "portNumber", - it.flow1.source.portNumber), "source", "vlanId", it.flow1.source.vlanId) + it.flow2 = getFlowFactory().getBuilder(it.flow2SwitchPair) + .withSourcePort(it.flow1.source.portNumber) + .withSourceVlan(it.flow1.source.vlanId).build() + it.flow1Src = it.flow2.source it.flow1Dst = it.flow1.destination it.flow2Src = it.flow1.source @@ -456,9 +467,10 @@ switches"() { }, { it.description = "dst1 <-> dst2" - it.flow2 = changePropertyValue( - changePropertyValue(getFlowHelper().randomFlow(it.flow2SwitchPair), "destination", "portNumber", - it.flow1.destination.portNumber), "destination", "vlanId", it.flow1.destination.vlanId) + it.flow2 = getFlowFactory().getBuilder(it.flow2SwitchPair) + .withDestinationPort(it.flow1.destination.portNumber) + .withDestinationVlan(it.flow1.destination.vlanId).build() + it.flow1Src = it.flow1.source it.flow1Dst = it.flow2.destination it.flow2Src = it.flow2.source @@ -466,9 +478,10 @@ switches"() { }, { it.description = "src1 <-> dst2" - it.flow2 = changePropertyValue( - changePropertyValue(getFlowHelper().randomFlow(it.flow2SwitchPair), "destination", "portNumber", - it.flow1.source.portNumber), "destination", "vlanId", it.flow1.source.vlanId) + it.flow2 = getFlowFactory().getBuilder(it.flow2SwitchPair) + .withDestinationPort( it.flow1.source.portNumber) + .withDestinationVlan(it.flow1.source.vlanId).build() + it.flow1Src = it.flow2.destination it.flow1Dst = it.flow1.destination it.flow2Src = it.flow2.source @@ -476,9 +489,9 @@ switches"() { }, { it.description = "dst1 <-> src2" - it.flow2 = changePropertyValue( - changePropertyValue(getFlowHelper().randomFlow(it.flow2SwitchPair), "source", "portNumber", - it.flow1.destination.portNumber), "source", "vlanId", it.flow1.destination.vlanId) + it.flow2 = getFlowFactory().getBuilder(it.flow2SwitchPair).withSourcePort(it.flow1.destination.portNumber) + .withSourceVlan(it.flow1.destination.vlanId).build() + it.flow1Src = it.flow1.source it.flow1Dst = it.flow2.source it.flow2Src = it.flow1.destination @@ -489,56 +502,42 @@ switches"() { def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) [flow1SwitchPair: flow1SwitchPair, flow2SwitchPair: flow2SwitchPair, flow1: flow1].tap(iterationData) } - flow1 = data.flow1 as FlowCreatePayload - flow2 = data.flow2 as FlowCreatePayload - flow1SwitchPair = data.flow1SwitchPair as SwitchPair - flow2SwitchPair = data.flow2SwitchPair as SwitchPair + flow1 = data.flow1 as FlowExtended + flow2 = data.flow2 as FlowExtended } def "Unable to swap endpoints for existing flow and non-existing flow"() { given: "An active flow" def switchPair = switchPairs.all().neighbouring().random() - def flow1 = flowHelper.randomFlow(switchPair) - def flow2 = flowHelper.randomFlow(switchPair) - flowHelper.addFlow(flow1) - flow2.id = NON_EXISTENT_FLOW_ID + def flow1 = flowFactory.getRandom(switchPair) + def flow2 = flowFactory.getBuilder(switchPair).build() when: "Try to swap endpoints for existing flow and non-existing flow" northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1.source), - flowHelper.toFlowEndpointV2(flow2.destination)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2.source), - flowHelper.toFlowEndpointV2(flow1.destination))) + new SwapFlowPayload(flow1.flowId, flow1.source, flow2.destination), + new SwapFlowPayload(flow2.flowId, flow2.source, flow1.destination)) then: "An error is received (404 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - def error = exc.responseBodyAsString.to(MessageError) - error.errorMessage == "Could not swap endpoints" - error.errorDescription == "Flow ${flow2.id} not found" - def isTestComplete = true + new FlowEndpointsNotSwappedExpectedError(HttpStatus.NOT_FOUND, ~/Flow ${flow2.flowId} not found/).matches(exc) } @Tags(LOW_PRIORITY) def "Unable to swap #data.endpointsPart for two flows: #data.description"() { given: "Three active flows" - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) - flowHelper.addFlow(flow3) + flow1.create() + flow2.create() + flow3.create() when: "Try to swap #endpointsPart for two flows" northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow1.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(flow2.flowId, data.flow2Src, data.flow2Dst)) then: "An error is received (409 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 409 - def error = exc.responseBodyAsString.to(MessageError) - error.errorMessage == "Could not swap endpoints" - error.errorDescription.contains("Requested flow '$flow1.id' conflicts with existing flow '$flow3.id'.") + new FlowEndpointsNotSwappedExpectedError(HttpStatus.CONFLICT, + ~/Requested flow '${flow1.flowId}' conflicts with existing flow '${flow3.flowId}'./).matches(exc) where: data << [{ @@ -546,29 +545,41 @@ switches"() { endpointsPart = "ports and vlans" flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - flow3 = getConflictingFlow(flow1SwitchPair, flow1, "source", changePropertyValue( - changePropertyValue(flow1.source, "portNumber", flow2.destination.portNumber), "vlanId", - flow2.destination.vlanId)) - flow1Src = changePropertyValue(changePropertyValue(flow1.source, "portNumber", - flow2.destination.portNumber), "vlanId", flow2.destination.vlanId) + flow3 = getFlowFactory().getBuilder(flow1SwitchPair, false, flow1.occupiedEndpoints()) + .withSourceSwitch(flow1.source.switchId) + .withSourcePort(flow2.destination.portNumber) + .withSourceVlan(flow2.destination.vlanId).build() + + flow1Src = flow1.source.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow2.destination.portNumber + endpoint.vlanId = flow2.destination.vlanId + } flow1Dst = flow1.destination flow2Src = flow2.source - flow2Dst = changePropertyValue(changePropertyValue(flow2.destination, "portNumber", - flow1.source.portNumber), "vlanId", flow1.source.vlanId) + flow2Dst = flow2.destination.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow1.source.portNumber + endpoint.vlanId = flow1.source.vlanId + } }, { description = "the same ports and vlans on dst switch" endpointsPart = "ports and vlans" flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - flow3 = getConflictingFlow(flow1SwitchPair, flow1, "destination", changePropertyValue( - changePropertyValue(flow1.destination, "portNumber", flow2.source.portNumber), "vlanId", - flow2.source.vlanId)) + flow3 = getFlowFactory().getBuilder(flow1SwitchPair, false, flow1.occupiedEndpoints()) + .withDestinationSwitch(flow1.destination.switchId) + .withDestinationPort(flow2.source.portNumber) + .withDestinationVlan(flow2.source.vlanId).build() + flow1Src = flow1.source - flow1Dst = changePropertyValue(changePropertyValue(flow1.destination, "portNumber", - flow2.source.portNumber), "vlanId", flow2.source.vlanId) - flow2Src = changePropertyValue(changePropertyValue(flow2.source, "portNumber", - flow1.destination.portNumber), "vlanId", flow1.destination.vlanId) + flow1Dst = flow1.destination.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow2.source.portNumber + endpoint.vlanId = flow2.source.vlanId + } + flow2Src = flow2.source.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow1.destination.portNumber + endpoint.vlanId = flow1.destination.vlanId + } flow2Dst = flow2.destination }, { @@ -576,23 +587,36 @@ switches"() { endpointsPart = "vlans" flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - flow3 = getConflictingFlow(flow1SwitchPair, flow1, "source", - changePropertyValue(flow1.source, "vlanId", flow2.destination.vlanId)) - flow1Src = changePropertyValue(flow1.source, "vlanId", flow2.destination.vlanId) + flow3 = getFlowFactory().getBuilder(flow1SwitchPair, false, flow1.occupiedEndpoints()) + .withSourceSwitch(flow1.source.switchId) + .withSourcePort(flow1.source.portNumber) + .withSourceVlan(flow2.destination.vlanId).build() + + flow1Src = flow1.source.jacksonCopy().tap { endpoint -> + endpoint.vlanId = flow2.destination.vlanId + } flow1Dst = flow1.destination flow2Src = flow2.source - flow2Dst = changePropertyValue(flow2.destination, "vlanId", flow1.source.vlanId) + flow2Dst = flow2.destination.jacksonCopy().tap { endpoint -> + endpoint.vlanId = flow1.source.vlanId + } }, { description = "the same vlans on the same port on dst switch" endpointsPart = "vlans" flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - flow3 = getConflictingFlow(flow1SwitchPair, flow1, "destination", - changePropertyValue(flow1.destination, "vlanId", flow2.source.vlanId)) + flow3 = getFlowFactory().getBuilder(flow1SwitchPair, false, flow1.occupiedEndpoints()) + .withDestinationSwitch(flow1.destination.switchId) + .withDestinationPort(flow1.destination.portNumber) + .withDestinationVlan(flow2.source.vlanId).build() flow1Src = flow1.source - flow1Dst = changePropertyValue(flow1.destination, "vlanId", flow2.source.vlanId) - flow2Src = changePropertyValue(flow2.source, "vlanId", flow1.destination.vlanId) + flow1Dst = flow1.destination.jacksonCopy().tap { endpoint -> + endpoint.vlanId = flow2.source.vlanId + } + flow2Src = flow2.source.jacksonCopy().tap { endpoint -> + endpoint.vlanId = flow1.destination.vlanId + } flow2Dst = flow2.destination }, { @@ -600,23 +624,35 @@ switches"() { endpointsPart = "ports" flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair, true) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1, true) - flow3 = getConflictingFlow(flow1SwitchPair, flow1, "source", - changePropertyValue(flow1.source, "portNumber", flow2.destination.portNumber)) - flow1Src = changePropertyValue(flow1.source, "portNumber", flow2.destination.portNumber) + flow3 = getFlowFactory().getBuilder(flow1SwitchPair, false, flow1.occupiedEndpoints()) + .withSourceSwitch(flow1.source.switchId) + .withSourcePort(flow2.destination.portNumber) + .withSourceVlan(flow1.source.vlanId).build() + flow1Src = flow1.source.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow2.destination.portNumber + } flow1Dst = flow1.destination flow2Src = flow2.source - flow2Dst = changePropertyValue(flow2.destination, "portNumber", flow1.source.portNumber) + flow2Dst = flow2.destination.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow1.source.portNumber + } }, { description = "no vlans, both flows are on the same port on dst switch" endpointsPart = "ports" flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair, true) flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1, true) - flow3 = getConflictingFlow(flow1SwitchPair, flow1, "destination", - changePropertyValue(flow1.destination, "portNumber", flow2.source.portNumber)) + flow3 = getFlowFactory().getBuilder(flow1SwitchPair, false, flow1.occupiedEndpoints()) + .withDestinationSwitch(flow1.destination.switchId) + .withDestinationPort(flow2.source.portNumber) + .withDestinationVlan(flow1.destination.vlanId).build() flow1Src = flow1.source - flow1Dst = changePropertyValue(flow1.destination, "portNumber", flow2.source.portNumber) - flow2Src = changePropertyValue(flow2.source, "portNumber", flow1.destination.portNumber) + flow1Dst = flow1.destination.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow2.source.portNumber + } + flow2Src = flow2.source.jacksonCopy().tap { endpoint -> + endpoint.portNumber = flow1.destination.portNumber + } flow2Dst = flow2.destination }].collect { iterationData -> def flow1SwitchPair = switchPairs.all().nonNeighbouring().random() @@ -624,44 +660,40 @@ switches"() { def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) [flow1SwitchPair: flow1SwitchPair, flow2SwitchPair: flow2SwitchPair, flow1: flow1].tap(iterationData) } - flow1 = data.flow1 as FlowCreatePayload - flow2 = data.flow2 as FlowCreatePayload - flow3 = data.flow3 as FlowCreatePayload + flow1 = data.flow1 as FlowExtended + flow2 = data.flow2 as FlowExtended + flow3 = data.flow3 as FlowExtended } + //start from here tomorrow @IterationTag(tags = [LOW_PRIORITY], iterationNameRegex = /the same src endpoint for flows/) def "Unable to swap endpoints for two flows (#data.description)"() { given: "Two active flows" - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + flow1.create() + flow2.create() when: "Try to swap endpoints for two flows" northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow1.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(flow2.flowId, data.flow2Src, data.flow2Dst)) then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def error = exc.responseBodyAsString.to(MessageError) - error.errorMessage == "Could not swap endpoints" - error.errorDescription == "New requested endpoint for '$flow2.id' conflicts with existing endpoint for '$flow1.id'" - + new FlowEndpointsNotSwappedExpectedError(HttpStatus.BAD_REQUEST, + ~/New requested endpoint for '${flow2.flowId}' conflicts with existing endpoint for '${flow1.flowId}'/).matches(exc) where: data << [{ description = "the same src endpoint for flows" flow1Src = flow1.source - flow1Dst = changePropertyValue(flow1.destination, "portNumber", flow2.destination.portNumber) + flow1Dst = flow1.destination.jacksonCopy().tap { endpoint -> endpoint.portNumber = flow2.destination.portNumber } flow2Src = flow1.source - flow2Dst = changePropertyValue(flow2.destination, "portNumber", flow1.destination.portNumber) + flow2Dst = flow2.destination.jacksonCopy().tap { endpoint -> endpoint.portNumber = flow1.destination.portNumber } }, { description = "the same dst endpoint for flows" - flow1Src = changePropertyValue(flow1.source, "portNumber", flow2.source.portNumber) + flow1Src = flow1.source.jacksonCopy().tap { endpoint -> endpoint.portNumber = flow2.source.portNumber } flow1Dst = flow1.destination - flow2Src = changePropertyValue(flow2.source, "portNumber", flow1.source.portNumber) + flow2Src = flow2.source.jacksonCopy().tap { endpoint -> endpoint.portNumber = flow1.source.portNumber } flow2Dst = flow1.destination }].collect { iterationData -> def flow1SwitchPair = switchPairs.all().nonNeighbouring().random() @@ -670,10 +702,8 @@ switches"() { def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) [flow1SwitchPair: flow1SwitchPair, flow2SwitchPair: flow2SwitchPair, flow1: flow1, flow2: flow2].tap(iterationData) } - flow1 = data.flow1 as FlowCreatePayload - flow2 = data.flow2 as FlowCreatePayload - flow1SwitchPair = data.flow1SwitchPair as SwitchPair - flow2SwitchPair = data.flow2SwitchPair as SwitchPair + flow1 = data.flow1 as FlowExtended + flow2 = data.flow2 as FlowExtended } def "Unable to swap ports for two flows (port is occupied by ISL on src switch)"() { @@ -684,27 +714,26 @@ switches"() { islPort = topology.getAllowedPortsForSwitch(it.dst).find { it in busyPorts } } assert islPort - def flow1 = flowHelper.randomFlow(swPair) - def flow2 = changePropertyValue(flowHelper.randomFlow(swPair, false, [flow1]), - "destination", "portNumber", islPort) - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + def flow1 = flowFactory.getRandom(swPair) + def flow2 = flowFactory.getBuilder(swPair, false, flow1.occupiedEndpoints()) + .withDestinationPort(islPort).build() + .create() when: "Try to swap ports for two flows" northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2( - changePropertyValue(flow1.source, "portNumber", flow2.destination.portNumber)), - flowHelper.toFlowEndpointV2(flow1.destination)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2.source), - flowHelper.toFlowEndpointV2(changePropertyValue( - flow1.destination, "portNumber", flow2.source.portNumber)))) + new SwapFlowPayload( + flow1.flowId, + flow1.source.jacksonCopy().tap { it.portNumber = flow2.destination.portNumber }, + flow1.destination), + new SwapFlowPayload( + flow2.flowId, + flow2.source, + flow1.destination.jacksonCopy().tap { it.portNumber = flow2.source.portNumber })) then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def error = exc.responseBodyAsString.to(MessageError) - error.errorMessage == "Could not swap endpoints" - error.errorDescription == "The port $islPort on the switch '${swPair.src.dpId}' is occupied by an ISL (source endpoint collision)." + new FlowEndpointsNotSwappedExpectedError(HttpStatus.BAD_REQUEST, + ~/The port $islPort on the switch \'${swPair.src.dpId}\' is occupied by an ISL \(source endpoint collision\)./).matches(exc) } @Tags(ISL_RECOVER_ON_FAIL) @@ -718,27 +747,21 @@ switches"() { } SwitchPair flow1SwitchPair = switchPairs[0] SwitchPair flow2SwitchPair = switchPairs[1] - def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) - def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair).create() + def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1).create() and: "Update the first flow so that it consumes all bandwidth on the link" - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.id)) - def flow1Isl = pathHelper.getInvolvedIsls(flow1Path)[0] + def flow1Isl = flow1.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() def flow1IslMaxBw = islUtils.getIslInfo(flow1Isl).get().maxBandwidth - northbound.updateFlow(flow1.id, flow1.tap { it.maximumBandwidth = flow1IslMaxBw }) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { assert northbound.getFlowStatus(flow1.id).status == FlowState.UP } + + flow1.update(flow1.tap { it.maximumBandwidth = flow1IslMaxBw }) and: "Break all alternative paths for the first flow" def broughtDownIsls = topology.getRelatedIsls(flow1SwitchPair.src) - flow1Isl islHelper.breakIsls(broughtDownIsls) - and: "Update max bandwidth for the second flow's link so that it is equal to max bandwidth of the first flow" - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.id)) - def flow2Isl = pathHelper.getInvolvedIsls(flow2Path)[0] + def flow2Isl = flow2.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() islHelper.updateLinkMaxBandwidthUsingApi(flow2Isl, flow1IslMaxBw) and: "Break all alternative paths for the second flow" @@ -747,25 +770,26 @@ switches"() { when: "Try to swap endpoints for two flows" def flow1Src = flow2.source - def flow1Dst = changePropertyValue(flow1.destination, "portNumber", flow2.destination.portNumber) + def flow1Dst = flow1.destination.jacksonCopy().tap { it.portNumber = flow2.destination.portNumber} def flow2Src = flow1.source - def flow2Dst = changePropertyValue(flow2.destination, "portNumber", flow1.destination.portNumber) + def flow2Dst = flow2.destination.jacksonCopy().tap { it.portNumber = flow1.destination.portNumber} def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1Src), - flowHelper.toFlowEndpointV2(flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2Src), - flowHelper.toFlowEndpointV2(flow2Dst))) + new SwapFlowPayload(flow1.flowId, flow1Src, flow1Dst), + new SwapFlowPayload(flow2.flowId, flow2Src, flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, flow1Src, flow1Dst, flow2Src, flow2Dst) - verifyEndpoints(flow1.id, flow2.id, flow1Src, flow1Dst, flow2Src, flow2Dst) + verifyEndpoints(flow1, flow2, flow1Src, flow1Dst, flow2Src, flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(flow1SwitchPair) - validateSwitches(flow2SwitchPair) + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } } @Tags(ISL_RECOVER_ON_FAIL) @@ -779,26 +803,21 @@ switches"() { } SwitchPair flow1SwitchPair = switchPairs[0] SwitchPair flow2SwitchPair = switchPairs[1] - def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) - def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) - - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair).create() + def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1).create() and: "Update the first flow so that it consumes all bandwidth on the link" - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.id)) - def flow1Isl = pathHelper.getInvolvedIsls(flow1Path)[0] + def flow1Isl = flow1.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() def flow1IslMaxBw = islUtils.getIslInfo(flow1Isl).get().maxBandwidth - northbound.updateFlow(flow1.id, flow1.tap { it.maximumBandwidth = flow1IslMaxBw }) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { assert northbound.getFlowStatus(flow1.id).status == FlowState.UP } + + flow1.update(flow1.tap { it.maximumBandwidth = flow1IslMaxBw }) and: "Break all alternative paths for the first flow" def broughtDownIsls = topology.getRelatedIsls(flow1SwitchPair.src) - flow1Isl islHelper.breakIsls(broughtDownIsls) and: "Update max bandwidth for the second flow's link so that it is not enough bandwidth for the first flow" - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.id)) - def flow2Isl = pathHelper.getInvolvedIsls(flow2Path)[0] + def flow2Isl = flow2.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() islHelper.updateLinkMaxBandwidthUsingApi(flow2Isl, flow1IslMaxBw - 1) and: "Break all alternative paths for the second flow" @@ -807,21 +826,16 @@ switches"() { when: "Try to swap endpoints for two flows" def flow1Src = flow2.source - def flow1Dst = changePropertyValue(flow1.destination, "portNumber", flow2.destination.portNumber) + def flow1Dst = flow1.destination.jacksonCopy().tap { it.portNumber = flow2.destination.portNumber } def flow2Src = flow1.source - def flow2Dst = changePropertyValue(flow2.destination, "portNumber", flow1.destination.portNumber) + def flow2Dst = flow2.destination.jacksonCopy().tap { it.portNumber = flow1.destination.portNumber} northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1Src), - flowHelper.toFlowEndpointV2(flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2Src), - flowHelper.toFlowEndpointV2(flow2Dst))) + new SwapFlowPayload(flow1.flowId, flow1Src, flow1Dst), + new SwapFlowPayload(flow2.flowId, flow2Src, flow2Dst)) then: "An error is received (500 code)" def exc = thrown(HttpServerErrorException) - exc.rawStatusCode == 500 - def error = exc.responseBodyAsString.to(MessageError) - error.errorMessage == "Could not swap endpoints" - error.errorDescription.contains("Not enough bandwidth or no path found") + new FlowEndpointsNotSwappedExpectedError(~/Not enough bandwidth or no path found/).matches(exc) } @Tags([LOW_PRIORITY, ISL_RECOVER_ON_FAIL]) @@ -835,26 +849,24 @@ switches"() { } SwitchPair flow1SwitchPair = switchPairs[0] SwitchPair flow2SwitchPair = switchPairs[1] - def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) - def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) + def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair).tap { + it.ignoreBandwidth = true + }.create() - flowHelper.addFlow(flow1.tap { it.ignoreBandwidth = true }) - flowHelper.addFlow(flow2) + def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1).create() and: "Update the first flow so that it consumes all bandwidth on the link" - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.id)) - def flow1Isl = pathHelper.getInvolvedIsls(flow1Path)[0] + def flow1Isl = flow1.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() def flow1IslMaxBw = islUtils.getIslInfo(flow1Isl).get().maxBandwidth - northbound.updateFlow(flow1.id, flow1.tap { it.maximumBandwidth = flow1IslMaxBw }) - Wrappers.wait(FLOW_CRUD_TIMEOUT) { assert northbound.getFlowStatus(flow1.id).status == FlowState.UP } + + flow1.update(flow1.tap { it.maximumBandwidth = flow1IslMaxBw }) and: "Break all alternative paths for the first flow" def broughtDownIsls = topology.getRelatedIsls(flow1SwitchPair.src) - flow1Isl islHelper.breakIsls(broughtDownIsls) and: "Update max bandwidth for the second flow's link so that it is not enough bandwidth for the first flow" - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.id)) - def flow2Isl = pathHelper.getInvolvedIsls(flow2Path)[0] + def flow2Isl = flow2.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() islHelper.updateLinkMaxBandwidthUsingApi(flow2Isl, flow1IslMaxBw - 1) and: "Break all alternative paths for the second flow" @@ -863,25 +875,26 @@ switches"() { when: "Try to swap endpoints for two flows" def flow1Src = flow2.source - def flow1Dst = changePropertyValue(flow1.destination, "portNumber", flow2.destination.portNumber) + def flow1Dst = flow1.destination.jacksonCopy().tap { it.portNumber = flow2.destination.portNumber } def flow2Src = flow1.source - def flow2Dst = changePropertyValue(flow2.destination, "portNumber", flow1.destination.portNumber) + def flow2Dst = flow2.destination.jacksonCopy().tap { it.portNumber = flow1.destination.portNumber } def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1Src), - flowHelper.toFlowEndpointV2(flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2Src), - flowHelper.toFlowEndpointV2(flow2Dst))) + new SwapFlowPayload(flow1.flowId, flow1Src, flow1Dst), + new SwapFlowPayload(flow2.flowId, flow2Src, flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, flow1Src, flow1Dst, flow2Src, flow2Dst) - verifyEndpoints(flow1.id, flow2.id, flow1Src, flow1Dst, flow2Src, flow2Dst) + verifyEndpoints(flow1, flow2, flow1Src, flow1Dst, flow2Src, flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(flow1SwitchPair) - validateSwitches(flow2SwitchPair) + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } } @Tags(ISL_RECOVER_ON_FAIL) @@ -896,36 +909,27 @@ switches"() { } SwitchPair flow1SwitchPair = switchPairs[0] SwitchPair flow2SwitchPair = switchPairs[1] - def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair) - def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1) + def flow1 = getFirstFlow(flow1SwitchPair, flow2SwitchPair).create() + def flow2 = getSecondFlow(flow1SwitchPair, flow2SwitchPair, flow1).create() - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.id)) - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.id)) - def involvedSwIds = ( - pathHelper.getInvolvedSwitches(flow1Path)*.dpId + pathHelper.getInvolvedSwitches(flow2Path)*.dpId - ).unique() + List involvedSwIds = [flow1, flow2].collectMany { it.retrieveAllEntityPaths().getInvolvedSwitches() }.unique() and: "Break all paths for the first flow" def broughtDownIsls = topology.getRelatedIsls(flow1SwitchPair.src) islHelper.breakIsls(broughtDownIsls) - when: "Try to swap endpoints for two flows" northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow2.source), - flowHelper.toFlowEndpointV2(flow1.destination)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow1.source), - flowHelper.toFlowEndpointV2(flow2.destination))) + new SwapFlowPayload(flow1.flowId, flow2.source, flow1.destination), + new SwapFlowPayload(flow2.flowId, flow1.source, flow2.destination)) then: "An error is received (500 code)" def exc = thrown(HttpServerErrorException) new FlowEndpointsNotSwappedExpectedError(~/Not enough bandwidth or no path found/).matches(exc) when: "Get actual data of flow1 and flow2" - def actualFlow1Details = northboundV2.getFlow(flow1.id) - def actualFlow2Details = northboundV2.getFlow(flow2.id) + def actualFlow1Details = flow1.retrieveDetailsV1() + def actualFlow2Details = flow2.retrieveDetailsV1() then: "Actual flow1, flow2 sources are different" assert actualFlow1Details.source != actualFlow2Details.source @@ -937,26 +941,27 @@ switches"() { @Tags(LOW_PRIORITY) def "Able to swap endpoints (#data.description) for two protected flows"() { given: "Two protected flows with different source and destination switches" - flowHelper.addFlow(flow1.tap { it.allocateProtectedPath = true }) - flowHelper.addFlow(flow2.tap { it.allocateProtectedPath = true }) + flow1.create() + flow2.create() when: "Try to swap endpoints for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow1.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(flow2.flowId, data.flow2Src, data.flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) - verifyEndpoints(flow1.id, flow2.id, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + verifyEndpoints(flow1, flow2, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(flow1SwitchPair) - validateSwitches(flow2SwitchPair) + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: data << [{ @@ -989,17 +994,16 @@ switches"() { }].collect { iterationData -> def flow1SwitchPair = switchPairs.all().nonNeighbouring().random() def flow2SwitchPair = getDifferentNotNeighboringSwitchPair(flow1SwitchPair) - def flow1 = getFlowHelper().randomFlow(flow1SwitchPair) - def flow2 = getFlowHelper().randomFlow(flow2SwitchPair, false, [flow1]).tap { - it.source.portNumber = getFreePort(flow2SwitchPair.src, [flow1SwitchPair.src, flow1SwitchPair.dst]) - it.destination.portNumber = getFreePort(flow2SwitchPair.dst, [flow1SwitchPair.src, flow1SwitchPair.dst]) - } + def flow1 = flowFactory.getBuilder(flow1SwitchPair) + .withProtectedPath(true).build() + def flow2 = flowFactory.getBuilder(flow2SwitchPair, false, flow1.occupiedEndpoints()) + .withProtectedPath(true) + .withSourcePort(getFreePort(flow2SwitchPair.src, [flow1SwitchPair.src, flow1SwitchPair.dst])) + .withDestinationPort(getFreePort(flow2SwitchPair.dst, [flow1SwitchPair.src, flow1SwitchPair.dst])).build() [flow1SwitchPair: flow1SwitchPair, flow2SwitchPair: flow2SwitchPair, flow1: flow1, flow2: flow2].tap(iterationData) } - flow1 = data.flow1 as FlowCreatePayload - flow2 = data.flow2 as FlowCreatePayload - flow1SwitchPair = data.flow1SwitchPair as SwitchPair - flow2SwitchPair = data.flow2SwitchPair as SwitchPair + flow1 = data.flow1 as FlowExtended + flow2 = data.flow2 as FlowExtended } def "A protected flow with swapped endpoint allows traffic on main and protected paths"() { @@ -1013,35 +1017,36 @@ switches"() { .excludeSwitches([flow1SwitchPair.getSrc(), flow1SwitchPair.getDst()]) .withAtLeastNTraffgensOnSource(1) .random() - def flow1 = flowHelper.randomFlow(flow1SwitchPair) - def flow2 = flowHelper.randomFlow(flow2SwitchPair) - - flowHelper.addFlow(flow1.tap { it.allocateProtectedPath = true }) - flowHelper.addFlow(flow2.tap { it.allocateProtectedPath = true }) + def flow1 = flowFactory.getBuilder(flow1SwitchPair) + .withProtectedPath(true).build() + .create() + def flow2 = flowFactory.getBuilder(flow2SwitchPair) + .withProtectedPath(true).build() + .create() when: "Try to swap endpoints for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow2.source), - flowHelper.toFlowEndpointV2(flow1.destination)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow1.source), - flowHelper.toFlowEndpointV2(flow2.destination))) + new SwapFlowPayload(flow1.flowId, flow2.source, flow1.destination), + new SwapFlowPayload(flow2.flowId, flow1.source, flow2.destination)) then: "Endpoints are successfully swapped" verifyEndpoints(response, flow2.source, flow1.destination, flow1.source, flow2.destination) - verifyEndpoints(flow1.id, flow2.id, flow2.source, flow1.destination, flow1.source, flow2.destination) + verifyEndpoints(flow1, flow2, flow2.source, flow1.destination, flow1.source, flow2.destination) + def flow1AfterSwapEndpoints = flow1.retrieveDetails() and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(flow1SwitchPair) - validateSwitches(flow2SwitchPair) - def isSwitchValid = true + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } and: "The first flow allows traffic on the main path" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(northbound.getFlow(flow1.id), 0, 5) + def exam = flow1AfterSwapEndpoints.traffExam(traffExam, 0, 5) [exam.forward, exam.reverse].each { direction -> def resources = traffExam.startExam(direction) direction.setResources(resources) @@ -1049,11 +1054,12 @@ switches"() { } and: "The first flow allows traffic on the protected path" - northbound.swapFlowPath(flow1.id) - Wrappers.wait(WAIT_OFFSET) { assert northbound.getFlowStatus(flow1.id).status == FlowState.UP } + flow1.swapFlowPath() + Wrappers.wait(WAIT_OFFSET) { assert flow1AfterSwapEndpoints.retrieveFlowStatus().status == FlowState.UP } + flow1.validateAndCollectDiscrepancies().isEmpty() - def newExam = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(northbound.getFlow(flow1.id), 0, 5) + def flow1AfterSwapPath = flow1.retrieveDetails() + def newExam = flow1AfterSwapPath.traffExam(traffExam, 0, 5) [newExam.forward, newExam.reverse].each { direction -> def resources = traffExam.startExam(direction) direction.setResources(resources) @@ -1064,27 +1070,27 @@ switches"() { @Tags(LOW_PRIORITY) def "Able to swap endpoints (#data.description) for two vxlan flows with the same source and destination switches"() { given: "Two flows with the same source and destination switches" - flow1.encapsulationType = FlowEncapsulationType.VXLAN - flow2.encapsulationType = FlowEncapsulationType.VXLAN - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + flow1.tap { it.encapsulationType = FlowEncapsulationType.VXLAN }.create() + flow2.tap { it.encapsulationType = FlowEncapsulationType.VXLAN }.create() when: "Try to swap endpoints for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow1.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(flow2.flowId, data.flow2Src, data.flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) - verifyEndpoints(flow1.id, flow2.id, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + verifyEndpoints(flow1, flow2, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(switchPair) + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: data << [{ @@ -1106,9 +1112,8 @@ switches"() { def flow2 = getSecondFlow(switchPair, switchPair, flow1) [switchPair: switchPair, flow1: flow1, flow2: flow2].tap(iterationData) } - switchPair = data.switchPair as SwitchPair - flow1 = data.flow1 as FlowCreatePayload - flow2 = data.flow2 as FlowCreatePayload + flow1 = data.flow1 as FlowExtended + flow2 = data.flow2 as FlowExtended } def "Able to swap endpoints (#data.description) for two qinq flows with the same source and destination switches"() { @@ -1117,26 +1122,27 @@ switches"() { flow1.destination.innerVlanId = 400 flow2.source.innerVlanId = 500 flow2.destination.innerVlanId = 600 - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + flow1.createV1() + flow2.createV1() when: "Try to swap endpoints for flows" def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(data.flow1Src), - flowHelper.toFlowEndpointV2(data.flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(data.flow2Src), - flowHelper.toFlowEndpointV2(data.flow2Dst))) + new SwapFlowPayload(flow1.flowId, data.flow1Src, data.flow1Dst), + new SwapFlowPayload(flow2.flowId, data.flow2Src, data.flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) - verifyEndpoints(flow1.id, flow2.id, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) + verifyEndpoints(flow1, flow2, data.flow1Src, data.flow1Dst, data.flow2Src, data.flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches(switchPair) - + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } where: data << [{ description = "src1 <-> src2" @@ -1164,8 +1170,8 @@ switches"() { [switchPair: switchPair, flow1: flow1, flow2: flow2].tap(iterationData) } switchPair = data.switchPair as SwitchPair - flow1 = data.flow1 as FlowCreatePayload - flow2 = data.flow2 as FlowCreatePayload + flow1 = data.flow1 as FlowExtended + flow2 = data.flow2 as FlowExtended } @Tags(SWITCH_RECOVER_ON_FAIL) @@ -1177,14 +1183,13 @@ switches"() { .includeSourceSwitch(swPair1.getDst()) .random() .getReversed() - def flow1 = flowHelperV2.randomFlow(swPair1).tap { - it.source.portNumber = getFreePort(swPair1.src, [swPair2.src]) - } - def flow2 = flowHelperV2.randomFlow(swPair2).tap { - it.source.portNumber = getFreePort(swPair2.src, [swPair1.src]) - } - flowHelperV2.addFlow(flow1) - flowHelperV2.addFlow(flow2) + def flow1 = flowFactory.getBuilder(swPair1) + .withSourcePort(getFreePort(swPair1.src, [swPair2.src])).build() + .create() + + def flow2 = flowFactory.getBuilder(swPair2) + .withSourcePort(getFreePort(swPair2.src, [swPair1.src])).build() + .create() when: "Try to swap flow src endoints, but flow1 src switch does not respond" switchHelper.knockoutSwitch(swPair1.src, RW) @@ -1194,48 +1199,45 @@ switches"() { then: "Receive error response" def exc = thrown(HttpServerErrorException) - exc.rawStatusCode == 500 - def error = exc.responseBodyAsString.to(MessageError) - error.errorMessage == "Could not swap endpoints" - error.errorDescription == sprintf("Reverted flows: [%s, %s]", flow2.flowId, flow1.flowId) + new FlowEndpointsNotSwappedExpectedError(~/Reverted flows: \[${flow2.flowId}, ${flow1.flowId}\]/).matches(exc) and: "First flow is reverted to Down" Wrappers.wait(PATH_INSTALLATION_TIME + WAIT_OFFSET * 2) { // sometimes it takes more time on jenkins - assert northboundV2.getFlowStatus(flow1.flowId).status == FlowState.DOWN - def flowHistory = northbound.getFlowHistory(flow1.flowId) + assert flow1.retrieveFlowStatus().status == FlowState.DOWN + def flowHistory = flow1.retrieveFlowHistory().getEntriesByType(REROUTE) /* '||' due to instability on jenkins * locally: it always retry to reroute (reason of failed reroute: 'No bandwidth or path...') * jenkins: - reroute(ISL_1 become INACTIVE) + retry or reroute(ISL_1) + reroute(ISL_2); * - or one reroute only. (reason of failed reroute: 'Failed to allocate flow resources...') */ assert flowHistory.findAll { - it.action == REROUTE_ACTION && it.payload.last().action == REROUTE_FAIL + it.action == REROUTE.value && it.payload.last().action == REROUTE_FAILED.payloadLastAction }.size() > 1 || flowHistory.find { - it.action == REROUTE_ACTION && it.payload.last().action == REROUTE_FAIL && + it.action == REROUTE.value && it.payload.last().action == REROUTE_FAILED.payloadLastAction && it.payload.last().details.contains("Failed to allocate flow resources.") } } - with(northboundV2.getFlow(flow1.flowId)) { + with(flow1.retrieveDetails()) { source == flow1.source destination == flow1.destination } and: "Second flow is reverted to UP" Wrappers.wait(PATH_INSTALLATION_TIME) { - assert northboundV2.getFlowStatus(flow2.flowId).status == FlowState.UP + assert flow2.retrieveFlowStatus().status == FlowState.UP } - with(northboundV2.getFlow(flow2.flowId)) { + with(flow2.retrieveDetails()) { source == flow2.source destination == flow2.destination } when: "Delete both flows" - def switches = (pathHelper.getInvolvedSwitches(flow1.flowId) + - pathHelper.getInvolvedSwitches(flow2.flowId)).unique().findAll { it.dpId != swPair1.src.dpId } - [flow1, flow2].collect { flowHelperV2.deleteFlow(it.flowId) } + List switches = [flow1, flow2].collectMany{ it.retrieveAllEntityPaths().getInvolvedSwitches()}.unique() + .findAll { it != swPair1.src.dpId } + [flow1, flow2].collect {flow -> flow.delete() } then: "Related switches have no rule anomalies" - switchHelper.synchronizeAndCollectFixedDiscrepancies(switches*.getDpId()).isEmpty() + switchHelper.synchronizeAndCollectFixedDiscrepancies(switches).isEmpty() } def "Able to swap endpoints for a flow with flowLoop"() { @@ -1245,17 +1247,15 @@ switches"() { .includeSourceSwitch(flow1SwitchPair.getSrc()) .excludeDestinationSwitches([flow1SwitchPair.getDst()]) .random() - def flow1 = flowHelper.randomFlow(flow1SwitchPair) - def flow2 = flowHelper.randomFlow(flow2SwitchPair, true, [flow1]) - - flowHelper.addFlow(flow1) - flowHelper.addFlow(flow2) + def flow1 = flowFactory.getBuilder(flow1SwitchPair).build() + .create() + def flow2 = flowFactory.getBuilder(flow2SwitchPair, true, flow1.occupiedEndpoints()).build() + .create() and: "FlowLoop is created for the second flow on the dst switch" - northboundV2.createFlowLoop(flow2.id, new FlowLoopPayload(flow2SwitchPair.dst.dpId)) - Wrappers.wait(WAIT_OFFSET) { - assert northbound.getFlowStatus(flow2.id).status == FlowState.UP - } + flow2.createFlowLoop(flow2SwitchPair.dst.dpId) + flow2.waitForBeingInState(FlowState.UP) + assert flow2.retrieveDetails().loopSwitchId == flow2SwitchPair.dst.dpId when: "Try to swap dst endpoints for two flows" def flow1Dst = flow2.destination @@ -1263,22 +1263,19 @@ switches"() { def flow2Dst = flow1.destination def flow2Src = flow2.source def response = northbound.swapFlowEndpoint( - new SwapFlowPayload(flow1.id, flowHelper.toFlowEndpointV2(flow1Src), - flowHelper.toFlowEndpointV2(flow1Dst)), - new SwapFlowPayload(flow2.id, flowHelper.toFlowEndpointV2(flow2Src), - flowHelper.toFlowEndpointV2(flow2Dst))) + new SwapFlowPayload(flow1.flowId, flow1Src, flow1Dst), + new SwapFlowPayload(flow2.flowId, flow2Src, flow2Dst)) then: "Endpoints are successfully swapped" verifyEndpoints(response, flow1Src, flow1Dst, flow2Src, flow2Dst) - verifyEndpoints(flow1.id, flow2.id, flow1Src, flow1Dst, flow2Src, flow2Dst) + verifyEndpoints(flow1, flow2, flow1Src, flow1Dst, flow2Src, flow2Dst) and: "Flows validation doesn't show any discrepancies" - validateFlows(flow1, flow2) + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "FlowLoop is still created for the second flow but on the new dst switch" - with(northbound.getFlow(flow2.id)) { - it.loopSwitchId == flow1SwitchPair.dst.dpId - } + assert flow2.retrieveDetails().loopSwitchId == flow1SwitchPair.dst.dpId and: "FlowLoop rules are created on the new dst switch" Wrappers.wait(RULES_INSTALLATION_TIME) { @@ -1294,13 +1291,17 @@ switches"() { } and: "Switch validation doesn't show any missing/excess rules and meters" - validateSwitches([flow1SwitchPair.src, flow1SwitchPair.dst, flow2SwitchPair.src, flow2SwitchPair.dst].unique()) - def switchesAreValid = true + List switches = [flow1SwitchPair.src.dpId, flow1SwitchPair.dst.dpId, + flow2SwitchPair.src.dpId, flow2SwitchPair.dst.dpId].unique() + Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(switches).isEmpty() + } + when: "Send traffic via flow2" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(northbound.getFlow(flow2.id), 1000, 5) + def flowAfterSwap = flow2.retrieveDetails() + def exam = flowAfterSwap.traffExam(traffExam, 1000, 5) then: "Flow doesn't allow traffic, because it is grubbed by flowLoop rules" withPool { @@ -1315,55 +1316,26 @@ switches"() { getFlowLoopRules(flow1SwitchPair.dst.dpId)*.packetCount.every { it > 0 } } - void verifyEndpoints(response, FlowEndpointPayload flow1SrcExpected, FlowEndpointPayload flow1DstExpected, - FlowEndpointPayload flow2SrcExpected, FlowEndpointPayload flow2DstExpected) { - verifyEndpoints(response, flowHelper.toFlowEndpointV2(flow1SrcExpected), - flowHelper.toFlowEndpointV2(flow1DstExpected), flowHelper.toFlowEndpointV2(flow2SrcExpected), - flowHelper.toFlowEndpointV2(flow2DstExpected)) - } - - void verifyEndpoints(response, FlowEndpointV2 flow1SrcExpected, FlowEndpointV2 flow1DstExpected, + void verifyEndpoints(SwapFlowEndpointPayload response, FlowEndpointV2 flow1SrcExpected, FlowEndpointV2 flow1DstExpected, FlowEndpointV2 flow2SrcExpected, FlowEndpointV2 flow2DstExpected) { - assert response.firstFlow.source == flow1SrcExpected - assert response.firstFlow.destination == flow1DstExpected - assert response.secondFlow.source == flow2SrcExpected - assert response.secondFlow.destination == flow2DstExpected + SoftAssertions assertions = new SoftAssertions() + assertions.checkSucceeds { assert response.firstFlow.source == flow1SrcExpected } + assertions.checkSucceeds { assert response.firstFlow.destination == flow1DstExpected } + assertions.checkSucceeds { assert response.secondFlow.source == flow2SrcExpected } + assertions.checkSucceeds { assert response.secondFlow.destination == flow2DstExpected } + assertions.verify() } - void verifyEndpoints(flow1Id, flow2Id, FlowEndpointV2 flow1SrcExpected, FlowEndpointV2 flow1DstExpected, + void verifyEndpoints(FlowExtended flow, FlowExtended additionalFlow, FlowEndpointV2 flow1SrcExpected, FlowEndpointV2 flow1DstExpected, FlowEndpointV2 flow2SrcExpected, FlowEndpointV2 flow2DstExpected) { - def flow1Updated = northbound.getFlow(flow1Id) - def flow2Updated = northbound.getFlow(flow2Id) - - assert flowHelper.toFlowEndpointV2(flow1Updated.source) == flow1SrcExpected - assert flowHelper.toFlowEndpointV2(flow1Updated.destination) == flow1DstExpected - assert flowHelper.toFlowEndpointV2(flow2Updated.source) == flow2SrcExpected - assert flowHelper.toFlowEndpointV2(flow2Updated.destination) == flow2DstExpected - } - - void verifyEndpoints(flow1Id, flow2Id, FlowEndpointPayload flow1SrcExpected, FlowEndpointPayload flow1DstExpected, - FlowEndpointPayload flow2SrcExpected, FlowEndpointPayload flow2DstExpected) { - verifyEndpoints(flow1Id, flow2Id, flowHelper.toFlowEndpointV2(flow1SrcExpected), - flowHelper.toFlowEndpointV2(flow1DstExpected), flowHelper.toFlowEndpointV2(flow2SrcExpected), - flowHelper.toFlowEndpointV2(flow2DstExpected)) - } - - void validateFlows(flow1, flow2) { - Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { - [flow1, flow2].each { - assert northbound.validateFlow(it.id).each { direction -> assert direction.asExpected } - } - } - } - - void validateSwitches(SwitchPair switchPair) { - validateSwitches(switchPair.toList()) - } - - void validateSwitches(List switches) { - Wrappers.wait(RULES_DELETION_TIME + RULES_INSTALLATION_TIME) { - assert switchHelper.validateAndCollectFoundDiscrepancies(switches*.getDpId()).isEmpty() - } + def flow1Updated = flow.retrieveDetailsV1() + def flow2Updated = additionalFlow.retrieveDetailsV1() + SoftAssertions assertions = new SoftAssertions() + assertions.checkSucceeds { assert flow1Updated.source == flow1SrcExpected } + assertions.checkSucceeds { assert flow1Updated.destination == flow1DstExpected } + assertions.checkSucceeds { assert flow2Updated.source == flow2SrcExpected } + assertions.checkSucceeds { assert flow2Updated.destination == flow2DstExpected } + assertions.verify() } /** @@ -1385,26 +1357,28 @@ switches"() { /** * Get a free vlan which is not used in any of the given flows. */ - def getFreeVlan(SwitchId swId, List existingFlows = []) { - def r = new Random() - def vlans = (flowHelper.KILDA_ALLOWED_VLANS - existingFlows.collectMany { [it.source, it.destination] }.findAll { - it.datapath == swId - }.collect { it.vlanId }) - return vlans[r.nextInt(vlans.size())] + def getFreeVlan(SwitchId swId, List busyEndpoints = []) { + def vlans = KILDA_ALLOWED_VLANS - busyEndpoints.findAll{ it.sw == swId }.vlan + return vlans.shuffled().first() } /** - * Get a FlowCreatePayload instance for the flow. The instance is built considering ISL ports on source and + * Get a FlowExtended instance for the flow. The instance is built considering ISL ports on source and * destination switches of the first and second switch pairs. So no ISL port conflicts should appear while creating * the flow and swapping flow endpoints. * * @param firstFlowSwitchPair Switch pair for the first flow * @param secondFlowSwitchPair Switch pair for the second flow - * @return a FlowCreatePayload instance + * @return a FlowExtended instance for further creation */ - def getFirstFlow(SwitchPair firstFlowSwitchPair, SwitchPair secondFlowSwitchPair, noVlans = false) { + def getFirstFlow(SwitchPair firstFlowSwitchPair, SwitchPair secondFlowSwitchPair, Boolean noVlans = false) { assumeTrue(firstFlowSwitchPair && secondFlowSwitchPair, "Required conditions for switch-pairs for this test are not met") - def firstFlow = flowHelper.randomFlow(firstFlowSwitchPair) + def firstFlow = flowFactory.getBuilder(firstFlowSwitchPair).tap { + if (noVlans) { + it.withSourceVlan(0) + it.withDestinationVlan(0) + } + }.build() firstFlow.source.portNumber = (topology.getAllowedPortsForSwitch(firstFlowSwitchPair.src) - topology.getBusyPortsForSwitch(secondFlowSwitchPair.src) - topology.getBusyPortsForSwitch(secondFlowSwitchPair.dst) - @@ -1418,11 +1392,6 @@ switches"() { secondFlowSwitchPair.src.prop?.server42Port - secondFlowSwitchPair.dst.prop?.server42Port)[0] - if (noVlans) { - firstFlow.source.vlanId = null - firstFlow.destination.vlanId = null - } - return firstFlow } @@ -1432,7 +1401,7 @@ switches"() { } /** - * Get a FlowCreatePayload instance for the second flow. The instance is built considering ISL ports on source and + * Get a FlowExtendedinstance for the second flow. The instance is built considering ISL ports on source and * destination switches of the first and second switch pairs. Also ports of the first flow are considered as well. * So no conflicts should appear while creating the flow and swapping flow endpoints. * @@ -1442,9 +1411,14 @@ switches"() { * @param noVlans Whether use vlans or not * @return a FlowCreatePayload instance */ - def getSecondFlow(firstFlowSwitchPair, secondFlowSwitchPair, firstFlow, noVlans = false) { + def getSecondFlow(SwitchPair firstFlowSwitchPair, SwitchPair secondFlowSwitchPair, firstFlow, Boolean noVlans = false) { assumeTrue(firstFlowSwitchPair && secondFlowSwitchPair, "Required conditions for switch-pairs for this test are not met") - def secondFlow = flowHelper.randomFlow(secondFlowSwitchPair) + def secondFlow = flowFactory.getBuilder(secondFlowSwitchPair).tap{ + if (noVlans) { + it.withSourceVlan(0) + it.withDestinationVlan(0) + } + }.build() secondFlow.source.portNumber = (topology.getAllowedPortsForSwitch(secondFlowSwitchPair.src) - topology.getBusyPortsForSwitch(firstFlowSwitchPair.src) - topology.getBusyPortsForSwitch(firstFlowSwitchPair.dst) - @@ -1460,37 +1434,9 @@ switches"() { firstFlowSwitchPair.src.prop?.server42Port - firstFlowSwitchPair.dst.prop?.server42Port)[0] - if (noVlans) { - secondFlow.source.vlanId = null - secondFlow.destination.vlanId = null - } - return secondFlow } - def getConflictingFlow(switchPair, neighboringFlow, conflictingEndpointName, conflictingEndpointValue) { - def conflictingFlow = flowHelper.randomFlow(switchPair, false, [neighboringFlow]) - conflictingFlow."$conflictingEndpointName" = conflictingEndpointValue - - return conflictingFlow - } - - def changePropertyValue(flowEndpoint, propertyName, newValue) { - // Deep copy of object - def mapper = new ObjectMapper() - return mapper.readValue(mapper.writeValueAsString(flowEndpoint), FlowEndpointPayload).tap { - it."$propertyName" = newValue - } - } - - def changePropertyValue(flow, endpointName, propertyName, newValue) { - // Deep copy of object - def mapper = new ObjectMapper() - return mapper.readValue(mapper.writeValueAsString(flow), FlowCreatePayload).tap { - it."$endpointName"."$propertyName" = newValue - } - } - def getHalfDifferentNotNeighboringSwitchPair(switchPairToAvoid, equalEndpoint) { def differentEndpoint = (equalEndpoint == "src" ? "dst" : "src") switchPairs.all().nonNeighbouring().getSwitchPairs().find { @@ -1506,10 +1452,6 @@ switches"() { } } - def findSw(SwitchId swId) { - topology.switches.find { it.dpId == swId } - } - def getHalfDifferentNeighboringSwitchPair(switchPairToAvoid, equalEndpoint) { def differentEndpoint = (equalEndpoint == "src" ? "dst" : "src") switchPairs.all().neighbouring().getSwitchPairs().find { @@ -1519,7 +1461,7 @@ switches"() { } def getFlowLoopRules(SwitchId switchId) { - northbound.getSwitchRules(switchId).flowEntries.findAll { + switchRulesFactory.get(switchId).getRules().findAll { def hexCookie = Long.toHexString(it.cookie) hexCookie.startsWith("20080000") || hexCookie.startsWith("40080000") } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy index f1148face4e..4a35058d482 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/ThrottlingRerouteSpec.groovy @@ -1,31 +1,33 @@ package org.openkilda.functionaltests.spec.flows -import groovy.util.logging.Slf4j +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType +import org.openkilda.functionaltests.helpers.model.FlowEntityPath import org.openkilda.messaging.info.event.IslChangeType -import org.openkilda.messaging.payload.flow.FlowPathPayload import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value import spock.lang.Ignore import spock.lang.Narrative +import spock.lang.Shared import java.util.concurrent.TimeUnit -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET - @Narrative(""" This test verifies that we do not perform a reroute as soon as we receive a reroute request (we talk only about automatic reroutes here; manual reroutes are still performed instantly). Instead, system waits for 'reroute.delay' @@ -38,6 +40,11 @@ for each flowId). @Tags([VIRTUAL]) //may be unstable on hardware. not tested class ThrottlingRerouteSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + + @Value('${reroute.hardtimeout}') int rerouteHardTimeout @@ -50,55 +57,53 @@ class ThrottlingRerouteSpec extends HealthCheckSpecification { assumeTrue(swPairs.size() > 4, "Topology is too small to run this test") def flows = swPairs.take(5).collect { switchPair -> - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) flow } - def flowPaths = flows.collect { northbound.getFlowPath(it.flowId) } + def flowPaths = flows.collect { it.retrieveAllEntityPaths() } when: "All flows break one by one" - def timeBeforeBreak = new Date().time - def brokenIsls = flowPaths.collect { - breakFlow(it, false) - //don't sleep here, since there is already an antiFlapMin delay between actual port downs - } + flowPaths.collect { breakFlow(it, false) } + // Don't sleep here, since there is already an antiFlapMin delay between actual port downs def rerouteTriggersEnd = new Date().time /*At this point all reroute triggers have happened. Save this time in order to calculate when the actual reroutes will happen (time triggers stopped + reroute delay seconds)*/ then: "The oldest broken flow is still not rerouted before rerouteDelay run out" Wrappers.wait(rerouteDelay * 3) { - assert flowHelper.getLatestHistoryEntry(flows.first().flowId).action == "Flow rerouting" + assert flows.first().retrieveFlowHistory().entries.last().action == FlowActionType.REROUTE.value // wait till reroute starts } - def rerouteTimestamp = flowHelper.getLatestHistoryEntry(flows.first().flowId).timestampIso + def rerouteTimestamp = flows.first().retrieveFlowHistory().entries.last().timestampIso // check time diff between the time when reroute was triggered and the first action of reroute in history def differenceInMillis = flowHelper.convertStringTimestampIsoToLong(rerouteTimestamp) - rerouteTriggersEnd // reroute starts not earlier than the expected reroute delay assert differenceInMillis > (rerouteDelay) * 1000 // reroute starts not later than 2 seconds later than the expected delay - assert differenceInMillis < (rerouteDelay + 2) * 1000 + assert differenceInMillis < (rerouteDelay + 2.5) * 1000 and: "The oldest broken flow is rerouted when the rerouteDelay runs out" def untilReroutesBegin = { rerouteTriggersEnd + rerouteDelay * 1000 - new Date().time } def waitTime = untilReroutesBegin() / 1000.0 + PATH_INSTALLATION_TIME * 2 Wrappers.wait(waitTime) { - //Flow should go DOWN or change path on reroute. In our case it doesn't matter which of these happen. - assert (northboundV2.getFlowStatus(flows.first().flowId).status == FlowState.DOWN && - flowHelper.getHistoryEntriesByAction(flows.first().flowId, REROUTE_ACTION).find { - it.taskId =~ (/.+ : retry #1/) })|| - northbound.getFlowPath(flows.first().flowId) != flowPaths.first() + // Flow should go DOWN or change path on reroute. + // In our case it doesn't matter which of these happen. + def flow1 = flows.first() + assert (flow1.retrieveFlowStatus().status == FlowState.DOWN && + flow1.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE) + .find { it.taskId =~ (/.+ : retry #1/) })|| + flow1.retrieveAllEntityPaths().getPathNodes() != flowPaths.find{ it.flowPath.flowId == flow1.flowId} } and: "The rest of the flows are rerouted too" Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - flowPaths[1..-1].each { flowPath -> - assert (northboundV2.getFlowStatus(flowPath.id).status == FlowState.DOWN && - flowHelper.getHistoryEntriesByAction(flowPath.id, REROUTE_ACTION).find { - it.taskId =~ (/.+ : retry #1/) - }) || - (northbound.getFlowPath(flowPath.id) != flowPath && - northboundV2.getFlowStatus(flowPath.id).status == FlowState.UP) + flows.subList(1, flows.size()).each { flow -> + def currentFlowStatus = flow.retrieveFlowStatus().status + assert (currentFlowStatus == FlowState.DOWN && + flow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE) + .find { it.taskId =~ (/.+ : retry #1/)}) || + (flow.retrieveAllEntityPaths().getPathNodes() != flowPaths.find{ it.flowPath.flowId == flow.flowId} && + currentFlowStatus == FlowState.UP) } } } @@ -117,11 +122,10 @@ class ThrottlingRerouteSpec extends HealthCheckSpecification { int minFlowsRequired = (int) Math.min(rerouteHardTimeout / antiflapMin, antiflapCooldown / antiflapMin + 1) + 1 assumeTrue(switchPairs.size() >= minFlowsRequired, "Topology is too small to run this test") def flows = switchPairs.collect { switchPair -> - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) flow } - def flowPaths = flows.collect { northbound.getFlowPath(it.flowId) } + def flowPaths = flows.collect { it.retrieveAllEntityPaths() } when: "All flows begin to continuously reroute in a loop" def stop = false //flag to abort all reroute triggers @@ -152,20 +156,22 @@ class ThrottlingRerouteSpec extends HealthCheckSpecification { then: "Right until hard timeout should run out no flow reroutes happen" //check until 80% of hard timeout runs out while (System.currentTimeMillis() < rerouteTriggersStart.time + rerouteHardTimeout * 1000 * 0.8) { - flowPaths.each { flowPath -> - assert northboundV2.getFlowStatus(flowPath.id).status == FlowState.UP && - northbound.getFlowPath(flowPath.id) == flowPath + flows.each { flow -> + def initialFlowPath = flowPaths.find { it.flowPath.flowId == flow.flowId } + assert flow.retrieveFlowStatus().status == FlowState.UP && + flow.retrieveAllEntityPaths().getPathNodes() == initialFlowPath } } - and: "Flows should start to reroute after hard timeout, eventhough reroutes are still being triggered" + and: "Flows should start to reroute after hard timeout, even though reroutes are still being triggered" rerouteTriggers.any { it.alive } def flowPathsClone = flowPaths.collect() Wrappers.wait(untilHardTimeoutEnds() + WAIT_OFFSET) { flowPathsClone.removeAll { flowPath -> - (northboundV2.getFlowStatus(flowPath.id).status == FlowState.DOWN && northbound - .getFlowHistory(flowPath.id).last().payload.find { it.action == REROUTE_FAIL }) || - northbound.getFlowPath(flowPath.id) != flowPath + def flow = flows.find { it.flowId == flowPath.flowPath.flowId } + def lastFlowAction = flow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE_FAILED).last() + (flow.retrieveFlowStatus().status == FlowState.DOWN && lastFlowAction.payload.last().action == FlowActionType.REROUTE_FAILED.payloadLastAction) + || flow.retrieveAllEntityPaths().getPathNodes() != flowPath } assert flowPathsClone.empty } @@ -175,45 +181,38 @@ class ThrottlingRerouteSpec extends HealthCheckSpecification { def "Flow can be safely deleted while it is in the reroute window waiting for reroute"() { given: "A flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) - def path = northbound.getFlowPath(flow.flowId) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) + def path = flow.retrieveAllEntityPaths() when: "Init a flow reroute by breaking current path" - def brokenIsl = breakFlow(path) + breakFlow(path) and: "Immediately remove the flow before reroute delay runs out and flow is actually rerouted" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "The flow is not present in NB" !northboundV2.getAllFlows().find { it.flowId == flow.flowId} and: "Related switches have no excess rules, though need to wait until server42 rules are deleted" Wrappers.wait(RULES_DELETION_TIME) { - switchHelper.validateAndCollectFoundDiscrepancies( - pathHelper.getInvolvedSwitches(PathHelper.convert(path))*.getDpId()).isEmpty() + switchHelper.validateAndCollectFoundDiscrepancies(path.getInvolvedSwitches()).isEmpty() } } /** * Breaks certain flow path. Ensures that the flow is indeed broken by waiting for ISL to actually get FAILED. - * @param flowpath path to break + * @param flowPath path to break * @return ISL which 'src' was brought down in order to break the path */ - Isl breakFlow(FlowPathPayload flowpath, boolean waitForBrokenIsl = true) { - def sw = flowpath.forwardPath.first().switchId - def port = flowpath.forwardPath.first().outputPort - def brokenIsl = (topology.islsForActiveSwitches + - topology.islsForActiveSwitches.collect { it.reversed }).find { - it.srcSwitch.dpId == sw && it.srcPort == port - } - assert brokenIsl, "This should not be possible. Trying to switch port on ISL which is not present in config?" - antiflap.portDown(sw, port) + Isl breakFlow(FlowEntityPath flowPath, boolean waitForBrokenIsl = true) { + def islToBreak = flowPath.flowPath.path.forward.getInvolvedIsls().first() + assert islToBreak, "This should not be possible. Trying to switch port on ISL which is not present in config?" + antiflap.portDown(islToBreak.srcSwitch.dpId, islToBreak.srcPort) if (waitForBrokenIsl) { Wrappers.wait(WAIT_OFFSET, 0) { - assert northbound.getLink(brokenIsl).state == IslChangeType.FAILED + assert northbound.getLink(islToBreak).state == IslChangeType.FAILED } } - return brokenIsl + return islToBreak } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy index faae32a66a7..8e8ed566850 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/VxlanFlowSpec.groovy @@ -1,45 +1,43 @@ package org.openkilda.functionaltests.spec.flows +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotUpdatedExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.IterationTags import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.functionaltests.helpers.model.SwitchPair -import org.openkilda.messaging.error.MessageError +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.info.event.PathNode -import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload -import org.openkilda.messaging.payload.flow.FlowEndpointPayload -import org.openkilda.messaging.payload.flow.FlowPayload import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v1.flows.PingInput import org.openkilda.northbound.dto.v1.switches.SwitchPropertiesDto -import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder + import org.slf4j.Logger import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative +import spock.lang.Shared -import javax.inject.Provider import java.time.Instant - -import static groovyx.gpars.GParsPool.withPool -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.model.FlowEncapsulationType.VXLAN -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET +import javax.inject.Provider @Narrative("""This spec checks basic functionality(simple flow(rules, ping, traffic, validate), pinned flow, flow with protected path, default flow) for a flow with VXLAN encapsulation. @@ -51,7 +49,14 @@ class VxlanFlowSpec extends HealthCheckSpecification { static Logger logger = LoggerFactory.getLogger(VxlanFlowSpec.class) @Autowired + @Shared Provider traffExamProvider + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory @IterationTags([ @IterationTag(tags = [SMOKE_SWITCHES], iterationNameRegex = /TRANSIT_VLAN -> VXLAN/) @@ -60,21 +65,20 @@ class VxlanFlowSpec extends HealthCheckSpecification { [#data.encapsulationCreate.toString() -> #data.encapsulationUpdate.toString(), #swPair.hwSwString()]"(Map data, SwitchPair swPair) { when: "Create a flow with #encapsulationCreate.toString() encapsulation type" sleep(10000) //subsequent test fails due to traffexam. Was not able to track down the reason - def flow = flowHelperV2.randomFlow(swPair) - flow.encapsulationType = data.encapsulationCreate - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withEncapsulationType(data.encapsulationCreate) + .build().create() then: "Flow is created with the #encapsulationCreate.toString() encapsulation type" - def flowInfo = northboundV2.getFlow(flow.flowId) - flowInfo.encapsulationType == data.encapsulationCreate.toString().toLowerCase() + flow.encapsulationType == data.encapsulationCreate and: "Correct rules are installed" - def vxlanRule = (flowInfo.encapsulationType == VXLAN.toString().toLowerCase()) - def flowInfoFromDb = database.getFlow(flow.flowId) + def vxlanRule = (flow.encapsulationType == FlowEncapsulationType.VXLAN) + def flowInfoFromDb = flow.retrieveDetailsFromDB() // ingressRule should contain "pushVxlan" // egressRule should contain "tunnel-id" Wrappers.wait(RULES_INSTALLATION_TIME) { - verifyAll(northbound.getSwitchRules(swPair.src.dpId).flowEntries) { rules -> + verifyAll( switchRulesFactory.get(swPair.src.dpId).getRules() ) { rules -> rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.instructions.applyActions.pushVxlan as boolean == vxlanRule @@ -83,7 +87,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { }.match.tunnelId as boolean == vxlanRule } - verifyAll(northbound.getSwitchRules(swPair.dst.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(swPair.dst.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId as boolean == vxlanRule @@ -95,12 +99,11 @@ class VxlanFlowSpec extends HealthCheckSpecification { and: "Flow is valid" Wrappers.wait(PATH_INSTALLATION_TIME) { - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } - //todo remove in case no traffic on jenkins and: "Flow is pingable" - verifyAll(northbound.pingFlow(flow.flowId, new PingInput())) { + verifyAll(flow.ping()) { forward.pingSuccess reverse.pingSuccess } @@ -109,7 +112,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { def traffExam = traffExamProvider.get() def exam if (swPair.isTraffExamCapable()) { - exam = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam(toFlowPayload(flow), 50, 5) + exam = flow.traffExam(traffExam,50, 5) withPool { assert [exam.forward, exam.reverse].collectParallel { direction -> def resources = traffExam.startExam(direction) @@ -117,45 +120,47 @@ class VxlanFlowSpec extends HealthCheckSpecification { traffExam.waitExam(direction) }.every { it.hasTraffic() - }, northbound.getSwitchRules(swPair.getSrc().getDpId()) + }, switchRulesFactory.get(swPair.getSrc().getDpId()) } } and: "Flow is pingable" - verifyAll(northbound.pingFlow(flow.flowId, new PingInput())) { + verifyAll(flow.ping()) { forward.pingSuccess reverse.pingSuccess } when: "Try to update the encapsulation type to #encapsulationUpdate.toString()" - northboundV2.updateFlow(flowInfo.flowId, - flowHelperV2.toRequest(flowInfo.tap { it.encapsulationType = data.encapsulationUpdate })) + def updateEntity = flow.deepCopy().tap { + it.encapsulationType = data.encapsulationUpdate + } + flow.update(updateEntity) then: "The encapsulation type is changed to #encapsulationUpdate.toString()" - def flowInfo2 = northboundV2.getFlow(flow.flowId) - flowInfo2.encapsulationType == data.encapsulationUpdate.toString().toLowerCase() + def flowInfo2 = flow.retrieveDetails() + flowInfo2.encapsulationType == data.encapsulationUpdate and: "Flow is valid" Wrappers.wait(PATH_INSTALLATION_TIME) { - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } and: "Flow is pingable (though sometimes we have to wait)" Wrappers.wait(WAIT_OFFSET) { - verifyAll(northbound.pingFlow(flow.flowId, new PingInput())) { + verifyAll(flow.ping()) { forward.pingSuccess reverse.pingSuccess } } and: "Rules are recreated" - def flowInfoFromDb2 = database.getFlow(flow.flowId) + def flowInfoFromDb2 = flow.retrieveDetailsFromDB() [flowInfoFromDb.forwardPath.cookie.value, flowInfoFromDb.reversePath.cookie.value].sort() != [flowInfoFromDb2.forwardPath.cookie.value, flowInfoFromDb2.reversePath.cookie.value].sort() and: "New rules are installed correctly" Wrappers.wait(RULES_INSTALLATION_TIME) { - verifyAll(northbound.getSwitchRules(swPair.src.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(swPair.src.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb2.forwardPath.cookie.value }.instructions.applyActions.pushVxlan as boolean == !vxlanRule @@ -164,7 +169,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { }.match.tunnelId as boolean == !vxlanRule } - verifyAll(northbound.getSwitchRules(swPair.dst.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(swPair.dst.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb2.forwardPath.cookie.value }.match.tunnelId as boolean == !vxlanRule @@ -181,7 +186,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { def resources = traffExam.startExam(direction) direction.setResources(resources) traffExam.waitExam(direction) - }.every {it.hasTraffic()}, northbound.getSwitchRules(swPair.getSrc().getDpId()) + }.every {it.hasTraffic()}, switchRulesFactory.get(swPair.getSrc().getDpId()) } } @@ -190,10 +195,10 @@ class VxlanFlowSpec extends HealthCheckSpecification { [ [ encapsulationCreate: FlowEncapsulationType.TRANSIT_VLAN, - encapsulationUpdate: VXLAN + encapsulationUpdate: FlowEncapsulationType.VXLAN ], [ - encapsulationCreate: VXLAN, + encapsulationCreate: FlowEncapsulationType.VXLAN, encapsulationUpdate: FlowEncapsulationType.TRANSIT_VLAN ] ], getUniqueVxlanSwitchPairs() @@ -203,24 +208,26 @@ class VxlanFlowSpec extends HealthCheckSpecification { def "Able to CRUD a pinned flow with 'VXLAN' encapsulation"() { when: "Create a flow" def switchPair = switchPairs.all().neighbouring().withBothSwitchesVxLanEnabled().random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.encapsulationType = VXLAN - flow.pinned = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .withPinned(true) + .build().create() then: "Flow is created" - def flowInfo = northboundV2.getFlow(flow.flowId) - flowInfo.pinned + flow.pinned when: "Update the flow (pinned=false)" - northboundV2.updateFlow(flowInfo.flowId, flowHelperV2.toRequest(flowInfo.tap { it.pinned = false })) + def updateEntity = flow.deepCopy().tap { + it.pinned = false + } + flow.update(updateEntity) then: "The pinned option is disabled" - def newFlowInfo = northboundV2.getFlow(flow.flowId) + def newFlowInfo = flow.retrieveDetails() !newFlowInfo.pinned - Instant.parse(flowInfo.lastUpdated) < Instant.parse(newFlowInfo.lastUpdated) + Instant.parse(flow.lastUpdated) < Instant.parse(newFlowInfo.lastUpdated) Wrappers.wait(PATH_INSTALLATION_TIME) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + assert flow.retrieveFlowStatus().status == FlowState.UP } } @@ -236,27 +243,27 @@ class VxlanFlowSpec extends HealthCheckSpecification { assumeTrue(availablePaths.size() >= 2, "Unable to find required paths between switches") when: "Create a flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flow.encapsulationType = VXLAN - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build().create() then: "Flow is created with protected path" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - flowPathInfo.protectedPath - northboundV2.getFlow(flow.flowId).statusDetails + def flowPathInfo = flow.retrieveDetails() + flowPathInfo.allocateProtectedPath + flow.retrieveDetails().statusDetails and: "Rules for main and protected paths are created" Wrappers.wait(WAIT_OFFSET) { flowHelper.verifyRulesOnProtectedFlow(flow.flowId) } - def flowInfoFromDb = database.getFlow(flow.flowId) + def flowInfoFromDb = flow.retrieveDetailsFromDB() // ingressRule should contain "pushVxlan" // egressRule should contain "tunnel-id" // protected path creates engressRule def protectedForwardCookie = flowInfoFromDb.protectedForwardPath.cookie.value def protectedReverseCookie = flowInfoFromDb.protectedReversePath.cookie.value Wrappers.wait(RULES_INSTALLATION_TIME) { - verifyAll(northbound.getSwitchRules(switchPair.src.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(switchPair.src.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.instructions.applyActions.pushVxlan @@ -268,7 +275,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { }.match.tunnelId } - verifyAll(northbound.getSwitchRules(switchPair.dst.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(switchPair.dst.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId @@ -282,36 +289,35 @@ class VxlanFlowSpec extends HealthCheckSpecification { } and: "Validation of flow must be successful" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Update flow: disable protected path(allocateProtectedPath=false)" - def flowData = northboundV2.getFlow(flow.flowId) - def protectedFlowPath = northbound.getFlowPath(flow.flowId).protectedPath.forwardPath - flowHelperV2.updateFlow(flowData.flowId, flowHelperV2.toRequest(flowData.tap { it.allocateProtectedPath = false })) + def flowData = flow.retrieveDetails() + def protectedFlowPath = flow.retrieveAllEntityPaths().getPathNodes(Direction.FORWARD, true) + def updateEntity = flow.deepCopy().tap { it.allocateProtectedPath = false } + flow.update(updateEntity) then: "Protected path is disabled" - !northbound.getFlowPath(flow.flowId).protectedPath - !northboundV2.getFlow(flow.flowId).statusDetails + !flow.retrieveAllEntityPaths().flowPath.protectedPath + !flow.retrieveDetails().statusDetails and: "Rules for protected path are deleted" Wrappers.wait(RULES_DELETION_TIME) { protectedFlowPath.each { sw -> - def rules = northbound.getSwitchRules(sw.switchId).flowEntries.findAll { + def rules = switchRulesFactory.get(sw.switchId).getRules().findAll { !new Cookie(it.cookie).serviceFlag } - assert rules.every { it != protectedForwardCookie && it != protectedReverseCookie } + assert rules.every { it.cookie != protectedForwardCookie && it.cookie != protectedReverseCookie } } } and: "And rules for main path are recreacted" Wrappers.wait(RULES_INSTALLATION_TIME) { - def flowInfoFromDb2 = database.getFlow(flow.flowId) + def flowInfoFromDb2 = flow.retrieveDetailsFromDB() assert [flowInfoFromDb.forwardPath.cookie.value, flowInfoFromDb.reversePath.cookie.value].sort() != [flowInfoFromDb2.forwardPath.cookie.value, flowInfoFromDb2.reversePath.cookie.value].sort() - verifyAll(northbound.getSwitchRules(switchPair.src.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(switchPair.src.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb2.forwardPath.cookie.value }.instructions.applyActions.pushVxlan @@ -320,7 +326,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { }.match.tunnelId } - verifyAll(northbound.getSwitchRules(switchPair.dst.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(switchPair.dst.dpId).getRules()) { rules -> rules.find { it.cookie == flowInfoFromDb2.forwardPath.cookie.value }.match.tunnelId @@ -331,9 +337,7 @@ class VxlanFlowSpec extends HealthCheckSpecification { } and: "Validation of flow must be successful" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - } + flow.validateAndCollectDiscrepancies().isEmpty() } @Tags([SMOKE_SWITCHES]) @@ -345,19 +349,20 @@ class VxlanFlowSpec extends HealthCheckSpecification { .withTraffgensOnBothEnds() .random() when: "Create a default flow" - def defaultFlow = flowHelperV2.randomFlow(switchPair) - defaultFlow.source.vlanId = 0 - defaultFlow.destination.vlanId = 0 - defaultFlow.encapsulationType = VXLAN - flowHelperV2.addFlow(defaultFlow) + def defaultFlow = flowFactory.getBuilder(switchPair) + .withSourceVlan(0) + .withDestinationVlan(0) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build().create() - def flow = flowHelperV2.randomFlow(switchPair) - flow.source.vlanId = 10 - flow.destination.vlanId = 10 + def flow = flowFactory.getBuilder(switchPair) + .withSourceVlan(10) + .withDestinationVlan(10) + .build().create() then: "System allows tagged traffic on the default flow" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam(toFlowPayload(flow), 1000, 5) + def exam = flow.traffExam(traffExam, 1000, 5) withPool { [exam.forward, exam.reverse].eachParallel { direction -> def resources = traffExam.startExam(direction) @@ -380,32 +385,34 @@ class VxlanFlowSpec extends HealthCheckSpecification { } when: "Try to create a VXLAN flow" - def flow = flowHelperV2.randomFlow(switchPair) - flow.encapsulationType = VXLAN.toString() - flowHelperV2.addFlow(flow) + def flowEntity = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build() + flowEntity.create() then: "Human readable error is returned" def createError = thrown(HttpClientErrorException) - createError.rawStatusCode == 400 - def createErrorDetails = createError.responseBodyAsString.to(MessageError) - createErrorDetails.errorMessage == "Could not create flow" - createErrorDetails.errorDescription == getUnsupportedVxlanErrorDescription("source", switchPair.src.dpId, - [FlowEncapsulationType.TRANSIT_VLAN]) + def actualCreateError= getUnsupportedVxlanErrorDescription( + "source", switchPair.src.dpId, [FlowEncapsulationType.TRANSIT_VLAN]) + new FlowNotCreatedExpectedError(~/$actualCreateError/).matches(createError) when: "Create a VLAN flow" - flow.encapsulationType = FlowEncapsulationType.TRANSIT_VLAN.toString() - flowHelperV2.addFlow(flow) + flowEntity.tap { + it.encapsulationType = FlowEncapsulationType.TRANSIT_VLAN + } + def flow = flowEntity.create() and: "Try updated its encap type to VXLAN" - northboundV2.updateFlow(flow.flowId, flow.tap { it.encapsulationType = VXLAN.toString() }) + def updateFlowEntity = flow.tap { + it.encapsulationType = FlowEncapsulationType.VXLAN + } + flow.update(updateFlowEntity) then: "Human readable error is returned" def updateError = thrown(HttpClientErrorException) - updateError.rawStatusCode == 400 - def updateErrorDetails = updateError.responseBodyAsString.to(MessageError) - updateErrorDetails.errorMessage == "Could not update flow" - createErrorDetails.errorDescription == getUnsupportedVxlanErrorDescription("source", switchPair.src.dpId, - [FlowEncapsulationType.TRANSIT_VLAN]) + def actualUpdateDesc= getUnsupportedVxlanErrorDescription( + "source", switchPair.src.dpId, [FlowEncapsulationType.TRANSIT_VLAN]) + new FlowNotUpdatedExpectedError(~/$actualUpdateDesc/).matches(updateError) } @Tags(TOPOLOGY_DEPENDENT) @@ -440,12 +447,12 @@ class VxlanFlowSpec extends HealthCheckSpecification { } when: "Create a VXLAN flow" - def flow = flowHelperV2.randomFlow(switchPair) - flow.encapsulationType = VXLAN - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build().create() then: "Flow is built through vxlan-enabled path, even though it is not the shortest" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) != noVxlanPath + flow.retrieveAllEntityPaths().getPathNodes() != noVxlanPath } @Tags([LOW_PRIORITY, TOPOLOGY_DEPENDENT]) @@ -460,16 +467,16 @@ class VxlanFlowSpec extends HealthCheckSpecification { .supportedTransitEncapsulation.collect { it.toUpperCase() } when: "Try to create a flow" - def flow = flowHelperV2.randomFlow(switchPair).tap {it.encapsulationType = VXLAN} - flowHelperV2.addFlow(flow) + def flowEntity = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build() + flowEntity.create() then: "Human readable error is returned" - def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Could not create flow" - errorDetails.errorDescription == getUnsupportedVxlanErrorDescription("destination", switchPair.dst.dpId, + def err = thrown(HttpClientErrorException) + def actualErrDesc= getUnsupportedVxlanErrorDescription("destination", switchPair.dst.dpId, dstSupportedEncapsulationTypes) + new FlowNotCreatedExpectedError(~/$actualErrDesc/).matches(err) } def "System allows to create/update encapsulation type for a one-switch flow\ @@ -477,18 +484,19 @@ class VxlanFlowSpec extends HealthCheckSpecification { when: "Try to create a one-switch flow" def sw = topology.activeSwitches.find { switchHelper.isVxlanEnabled(it.dpId) } assumeTrue(sw as boolean, "Require at least 1 VXLAN supported switch") - def flow = flowHelperV2.singleSwitchFlow(sw).tap {it.encapsulationType = encapsulationCreate} - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw) + .withEncapsulationType(encapsulationCreate) + .build().create() then: "Flow is created with the #encapsulationCreate.toString() encapsulation type" - def flowInfo1 = northboundV2.getFlow(flow.flowId) - flowInfo1.encapsulationType == encapsulationCreate.toString().toLowerCase() + def flowInfo1 = flow.retrieveDetails() + flowInfo1.encapsulationType == encapsulationCreate and: "Correct rules are installed" - def flowInfoFromDb = database.getFlow(flow.flowId) + def flowInfoFromDb = flow.retrieveDetailsFromDB() // vxlan rules are not creating for a one-switch flow Wrappers.wait(RULES_INSTALLATION_TIME) { - verifyAll(northbound.getSwitchRules(sw.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(sw.dpId).getRules()) { rules -> !rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.instructions.applyActions.pushVxlan @@ -499,36 +507,38 @@ class VxlanFlowSpec extends HealthCheckSpecification { } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "Unable to ping a one-switch vxlan flow" - verifyAll(northbound.pingFlow(flow.flowId, new PingInput())) { + verifyAll(flow.ping()) { !forward !reverse error == "Flow ${flow.flowId} should not be one-switch flow" } when: "Try to update the encapsulation type to #encapsulationUpdate.toString()" - northboundV2.updateFlow(flowInfo1.flowId, - flowHelperV2.toRequest(flowInfo1.tap { it.encapsulationType = encapsulationUpdate })) + def updateEntity = flowInfo1.tap { + it.encapsulationType = encapsulationUpdate + } + flow.update(updateEntity) then: "The encapsulation type is changed to #encapsulationUpdate.toString()" - def flowInfo2 = northboundV2.getFlow(flow.flowId) - flowInfo2.encapsulationType == encapsulationUpdate.toString().toLowerCase() + def flowInfo2 = flow.retrieveDetails() + flowInfo2.encapsulationType == encapsulationUpdate and: "Flow is valid" Wrappers.wait(PATH_INSTALLATION_TIME) { - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } and: "Rules are recreated" - def flowInfoFromDb2 = database.getFlow(flow.flowId) + def flowInfoFromDb2 = flow.retrieveDetailsFromDB() [flowInfoFromDb.forwardPath.cookie.value, flowInfoFromDb.reversePath.cookie.value].sort() != [flowInfoFromDb2.forwardPath.cookie.value, flowInfoFromDb2.reversePath.cookie.value].sort() and: "New rules are installed correctly" Wrappers.wait(RULES_INSTALLATION_TIME) { - verifyAll(northbound.getSwitchRules(sw.dpId).flowEntries) { rules -> + verifyAll(switchRulesFactory.get(sw.dpId).getRules()) { rules -> !rules.find { it.cookie == flowInfoFromDb2.forwardPath.cookie.value }.instructions.applyActions.pushVxlan @@ -540,24 +550,9 @@ class VxlanFlowSpec extends HealthCheckSpecification { where: encapsulationCreate | encapsulationUpdate - FlowEncapsulationType.TRANSIT_VLAN | VXLAN - VXLAN | FlowEncapsulationType.TRANSIT_VLAN - - } + FlowEncapsulationType.TRANSIT_VLAN | FlowEncapsulationType.VXLAN + FlowEncapsulationType.VXLAN | FlowEncapsulationType.TRANSIT_VLAN - FlowPayload toFlowPayload(FlowRequestV2 flow) { - FlowEndpointV2 source = flow.source - FlowEndpointV2 destination = flow.destination - - FlowPayload.builder() - .id(flow.flowId) - .source(new FlowEndpointPayload(source.switchId, source.portNumber, source.vlanId, - new DetectConnectedDevicesPayload(false, false))) - .destination(new FlowEndpointPayload(destination.switchId, destination.portNumber, destination.vlanId, - new DetectConnectedDevicesPayload(false, false))) - .maximumBandwidth(flow.maximumBandwidth) - .ignoreBandwidth(flow.ignoreBandwidth) - .build() } /** @@ -578,8 +573,11 @@ class VxlanFlowSpec extends HealthCheckSpecification { } def getUnsupportedVxlanErrorDescription(endpointName, dpId, supportedEncapsulationTypes) { + String supportedEncTypes = supportedEncapsulationTypes.collect { it.toString().toUpperCase() } + .toString().replace('[', '\\[').replace(']', '\\]') + String vxlan = FlowEncapsulationType.VXLAN.toString().toUpperCase() return "Flow's $endpointName endpoint $dpId doesn't support requested encapsulation type " + - "$VXLAN. Choose one of the supported encapsulation types " + - "$supportedEncapsulationTypes or update switch properties and add needed encapsulation type." + "$vxlan. Choose one of the supported encapsulation types " + + "$supportedEncTypes or update switch properties and add needed encapsulation type." } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowDiversitySpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowDiversitySpec.groovy index cb38fca42b9..027ad9ad51b 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowDiversitySpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowDiversitySpec.groovy @@ -6,10 +6,10 @@ import static org.openkilda.functionaltests.extension.tags.Tag.HA_FLOW import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.model.HaFlowExtended import org.openkilda.functionaltests.helpers.HaFlowFactory +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.HaFlowExtended import org.openkilda.functionaltests.helpers.model.YFlowFactory -import org.openkilda.messaging.payload.flow.FlowState import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired @@ -28,6 +28,10 @@ class HaFlowDiversitySpec extends HealthCheckSpecification { @Autowired HaFlowFactory haFlowFactory + @Autowired + @Shared + FlowFactory flowFactory + def "Able to create diverse HA-Flows"() { given: "Switches with three not overlapping paths at least" def swT = topologyHelper.switchTriplets.findAll { @@ -94,12 +98,13 @@ class HaFlowDiversitySpec extends HealthCheckSpecification { def haFlow1 = haFlowFactory.getRandom(swT) and: "Create a regular multiSwitch Flow diverse with previously created HA-Flow" - def flowRequest = flowHelperV2.randomFlow(swT.shared, swT.ep1, false) - .tap { diverseFlowId = haFlow1.getHaFlowId() } - def flow = flowHelperV2.addFlow(flowRequest) + def flow = flowFactory.getBuilder(swT.shared, swT.ep1, false) + .withDiverseFlow(haFlow1.haFlowId).build() + .create() and: "Create an additional HA-Flow diverse with simple flow that has another HA-Flow in diverse group" - def haFlow2 = haFlowFactory.getBuilder(swT, false, haFlow1.occupiedEndpoints()).withDiverseFlow(flow.flowId) + def haFlow2 = haFlowFactory.getBuilder(swT, false, haFlow1.occupiedEndpoints()) + .withDiverseFlow(flow.flowId) .build().create() then: "Create response contains correct info about diverse flows" @@ -116,7 +121,7 @@ class HaFlowDiversitySpec extends HealthCheckSpecification { when: "Get Flow and Ha-Flows details" def haFlow1Details = haFlow1.retrieveDetails() def haFlow2Details = haFlow2.retrieveDetails() - def regularFlowDetails = northboundV2.getFlow(flow.flowId) + def regularFlowDetails = flow.retrieveDetails() then: "All get Flow responses have correct diverse flow IDs" haFlow1Details.diverseWithHaFlows == [haFlow2.haFlowId] as Set diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowPathSwapSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowPathSwapSpec.groovy index 0cda3a954da..f2541b7d5dd 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowPathSwapSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowPathSwapSpec.groovy @@ -11,6 +11,7 @@ import static org.openkilda.testing.Constants.PROTECTED_PATH_INSTALLATION_TIME import static org.openkilda.testing.Constants.STATS_LOGGING_TIMEOUT import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.haflow.HaFlowPathNotSwappedExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.HaFlowFactory import org.openkilda.functionaltests.helpers.Wrappers @@ -20,6 +21,7 @@ import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.testing.service.traffexam.TraffExamService import org.springframework.beans.factory.annotation.Autowired +import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative import spock.lang.Shared @@ -112,10 +114,8 @@ class HaFlowPathSwapSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - - def errorDescription = exc.responseBodyAsString.to(MessageError).errorDescription - errorDescription == "Could not swap paths: HA-flow ${haFlow.haFlowId} doesn't have protected path" + new HaFlowPathNotSwappedExpectedError(HttpStatus.BAD_REQUEST, + ~/Could not swap paths: HA-flow ${haFlow.haFlowId} doesn't have protected path/).matches(exc) } @Tags(LOW_PRIORITY) @@ -125,8 +125,7 @@ class HaFlowPathSwapSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap paths: HA-flow $NON_EXISTENT_FLOW_ID not found" + new HaFlowPathNotSwappedExpectedError(HttpStatus.NOT_FOUND, + ~/Could not swap paths: HA-flow $NON_EXISTENT_FLOW_ID not found/).matches(exc) } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowRerouteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowRerouteSpec.groovy index 9312df935bd..e304704647c 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowRerouteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/haflows/HaFlowRerouteSpec.groovy @@ -11,6 +11,7 @@ import org.openkilda.model.history.DumpType import org.openkilda.testing.service.northbound.model.HaFlowActionType import org.openkilda.testing.service.traffexam.TraffExamService import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Issue import spock.lang.Narrative import spock.lang.Shared @@ -45,6 +46,7 @@ class HaFlowRerouteSpec extends HealthCheckSpecification { Provider traffExamProvider @Tags([TOPOLOGY_DEPENDENT, ISL_RECOVER_ON_FAIL]) + @Issue("https://github.com/telstra/open-kilda/issues/5647 (hardware)") def "Valid HA-flow can be rerouted"() { given: "An HA-flow" def swT = topologyHelper.findSwitchTripletWithAlternativePaths() diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy index 8ba0492b589..5acd668e8bc 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowDiversitySpec.groovy @@ -1,5 +1,7 @@ package org.openkilda.functionaltests.spec.flows.yflows +import org.openkilda.functionaltests.helpers.factory.FlowFactory + import groovy.util.logging.Slf4j import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags @@ -26,6 +28,9 @@ class YFlowDiversitySpec extends HealthCheckSpecification { @Autowired @Shared YFlowFactory yFlowFactory + @Autowired + @Shared + FlowFactory flowFactory def "Able to create diverse Y-Flows"() { given: "Switches with three not overlapping paths at least" @@ -99,8 +104,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { def yFlow = yFlowFactory.getRandom(swT, false) and: "Simple multiSwitch flow on the same path as first sub-flow" - def flow = flowHelperV2.randomFlow(swT.shared, swT.ep1, false) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swT.shared, swT.ep1, false) def subFlowId = yFlow.subFlows.first().flowId def involvedIslSubFlow = yFlow.retrieveAllEntityPaths().subFlowPaths.find { it.flowId == subFlowId }.getInvolvedIsls() def involvedIslSimpleFlow = pathHelper.getInvolvedIsls(PathHelper.convert(northbound.getFlowPath(flow.flowId))) @@ -122,7 +126,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { } and: "Simple multi switch flow has the 'diverse_with' field" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.diverseWithYFlows.sort() == [yFlow.yFlowId].sort() it.diverseWith.empty } @@ -141,7 +145,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { assert yFlow.validate().asExpected and: "Simple flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Partially update Y-Flow to become not diverse with simple multiSwitch flow" def patchRequest = YFlowPatchPayload.builder().diverseFlowId("").build() @@ -155,7 +159,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { yFlow.diverseWithFlows.empty and: "Simple multi switch flow doesn't have the 'diverse_with' field" - northboundV2.getFlow(flow.flowId).diverseWithYFlows.empty + flow.retrieveDetails().diverseWithYFlows.empty } def "Able to create Y-Flow with one switch sub flow and diverse with simple multiSwitch flow"() { @@ -163,8 +167,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { def switchPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(2).random() and: "Simple multiSwitch flow" - def flow = flowHelperV2.randomFlow(switchPair.src, switchPair.dst, false) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair.src, switchPair.dst, false) when: "Create a Y-Flow with one switch sub flow and diversity with simple flow" def swT = topologyHelper.getSwitchTriplet(switchPair.src.dpId, switchPair.src.dpId, switchPair.dst.dpId) @@ -179,7 +182,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { yFlow.diverseWithFlows == [flow.flowId] as Set and: "Flow is diverse with Y-Flow" - with(northboundV2.getFlow(flow.flowId)) { + with(flow.retrieveDetails()) { it.diverseWithYFlows == [yFlow.yFlowId] as Set it.diverseWith.empty it.diverseWithHaFlows.empty @@ -189,7 +192,7 @@ class YFlowDiversitySpec extends HealthCheckSpecification { yFlow.validate().asExpected and: "Simple Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } def "Able to get Y-Flow paths with correct overlapping segments stats"() { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowPathSwapSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowPathSwapSpec.groovy index 0cb93437134..ddf20f3bc6e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowPathSwapSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/flows/yflows/YFlowPathSwapSpec.groovy @@ -1,12 +1,12 @@ package org.openkilda.functionaltests.spec.flows.yflows import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.yflow.YFlowPathNotSwappedExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.helpers.model.SwitchTriplet import org.openkilda.functionaltests.helpers.model.YFlowFactory import org.openkilda.functionaltests.model.stats.FlowStats -import org.openkilda.messaging.error.MessageError import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.service.traffexam.TraffExamService @@ -15,6 +15,7 @@ import org.openkilda.testing.service.traffexam.model.ExamReport import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired +import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative import spock.lang.Shared @@ -228,11 +229,8 @@ class YFlowPathSwapSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - - def errorDescription = exc.responseBodyAsString.to(MessageError).errorDescription - errorDescription == "Could not swap y-flow paths: sub-flow ${yFlow.subFlows[0].flowId} doesn't have a protected path" || - errorDescription == "Could not swap y-flow paths: sub-flow ${yFlow.subFlows[1].flowId} doesn't have a protected path" + new YFlowPathNotSwappedExpectedError(HttpStatus.BAD_REQUEST, + ~/Could not swap y-flow paths: sub-flow S\d.${yFlow.yFlowId} doesn't have a protected path/).matches(exc) } @Tags(LOW_PRIORITY) @@ -242,9 +240,7 @@ class YFlowPathSwapSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Y-flow $NON_EXISTENT_FLOW_ID not found" + new YFlowPathNotSwappedExpectedError(HttpStatus.NOT_FOUND, ~/Y-flow $NON_EXISTENT_FLOW_ID not found/).matches(exc) } @Tags([LOW_PRIORITY, ISL_RECOVER_ON_FAIL, ISL_PROPS_DB_RESET]) @@ -305,9 +301,9 @@ class YFlowPathSwapSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorDescription == - "Could not swap y-flow paths: the protected path of sub-flow ${initialPath.subFlowPaths.first().flowId} is not in ACTIVE state, but in INACTIVE/INACTIVE (forward/reverse) state" + new YFlowPathNotSwappedExpectedError(HttpStatus.BAD_REQUEST, + ~/Could not swap y-flow paths: the protected path of sub-flow ${initialPath.subFlowPaths.first().flowId} \ +is not in ACTIVE state, but in INACTIVE\\/INACTIVE \(forward\\/reverse\) state/).matches(exc) when: "Restore port status" islHelper.restoreIsl(islToBreak) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkMaintenanceSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkMaintenanceSpec.groovy index 3b4e60c9491..71f123ec571 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkMaintenanceSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkMaintenanceSpec.groovy @@ -1,19 +1,27 @@ package org.openkilda.functionaltests.spec.links -import org.openkilda.functionaltests.HealthCheckSpecification -import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper -import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.payload.flow.FlowState - import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE import static org.openkilda.testing.Constants.DEFAULT_COST import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME import static org.openkilda.testing.Constants.WAIT_OFFSET +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowEntityPath +import org.openkilda.messaging.payload.flow.FlowState + +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Shared + class LinkMaintenanceSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Tags(SMOKE) def "Maintenance mode can be set/unset for a particular link"() { given: "An active link" @@ -45,42 +53,39 @@ class LinkMaintenanceSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNPaths(2).random() and: "Create a couple of flows going through these switches" - def flow1 = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow1) - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) + def flow1 = flowFactory.getRandom(switchPair) + def flow1Path = flow1.retrieveAllEntityPaths() - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]) - flowHelperV2.addFlow(flow2) - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.flowId)) - - assert flow1Path == flow2Path + def flow2 = flowFactory.getRandom(switchPair, false, FlowState.UP, flow1.occupiedEndpoints()) + def flow2Path = flow2.retrieveAllEntityPaths() + assert flow1Path.getPathNodes() == flow2Path.getPathNodes() when: "Set maintenance mode without flows evacuation flag for the first link involved in flow paths" - def isl = pathHelper.getInvolvedIsls(flow1Path).first() + def isl = flow1Path.flowPath.getInvolvedIsls().first() islHelper.setLinkMaintenance(isl, true, false) then: "Flows are not evacuated (rerouted) and have the same paths" - PathHelper.convert(northbound.getFlowPath(flow1.flowId)) == flow1Path - PathHelper.convert(northbound.getFlowPath(flow2.flowId)) == flow2Path + flow1.retrieveAllEntityPaths() == flow1Path + flow2.retrieveAllEntityPaths() == flow2Path when: "Set maintenance mode again with flows evacuation flag for the same link" northbound.setLinkMaintenance(islUtils.toLinkUnderMaintenance(isl, true, true)) then: "Flows are evacuated (rerouted)" - def flow1PathUpdated, flow2PathUpdated + FlowEntityPath flow1PathUpdated, flow2PathUpdated Wrappers.wait(PATH_INSTALLATION_TIME + WAIT_OFFSET) { - [flow1, flow2].each { assert northboundV2.getFlowStatus(it.flowId).status == FlowState.UP } + [flow1, flow2].each { flow -> assert flow.retrieveFlowStatus().status == FlowState.UP } - flow1PathUpdated = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) - flow2PathUpdated = PathHelper.convert(northbound.getFlowPath(flow2.flowId)) + flow1PathUpdated = flow1.retrieveAllEntityPaths() + flow2PathUpdated = flow2.retrieveAllEntityPaths() assert flow1PathUpdated != flow1Path assert flow2PathUpdated != flow2Path } and: "Link under maintenance is not involved in new flow paths" - !(isl in pathHelper.getInvolvedIsls(flow1PathUpdated)) - !(isl in pathHelper.getInvolvedIsls(flow2PathUpdated)) + !flow1PathUpdated.flowPath.getInvolvedIsls().contains(isl) + !flow2PathUpdated.flowPath.getInvolvedIsls().contains(isl) } @Tags(ISL_RECOVER_ON_FAIL) @@ -89,18 +94,15 @@ class LinkMaintenanceSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNPaths(2).random() and: "Create a couple of flows going through these switches" - def flow1 = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow1) - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) - - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]) - flowHelperV2.addFlow(flow2) - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.flowId)) + def flow1 = flowFactory.getRandom(switchPair) + def flow1Path = flow1.retrieveAllEntityPaths() - assert flow1Path == flow2Path + def flow2 = flowFactory.getRandom(switchPair, false, FlowState.UP, flow1.occupiedEndpoints()) + def flow2Path = flow2.retrieveAllEntityPaths() + assert flow1Path.getPathNodes() == flow2Path.getPathNodes() and: "Make only one alternative path available for both flows" - def flow1ActualIsl = pathHelper.getInvolvedIsls(flow1Path).first() + def flow1ActualIsl = flow1Path.flowPath.getInvolvedIsls().first() def altIsls = topology.getRelatedIsls(switchPair.src) - flow1ActualIsl /* altIsls can have only 1 element (the only one alt ISL). In this case it will be set under maintenance mode, and breaking the other @@ -115,17 +117,16 @@ class LinkMaintenanceSpec extends HealthCheckSpecification { islHelper.breakIsl(flow1ActualIsl) then: "Flows are rerouted to alternative path with link under maintenance" - Wrappers.wait(rerouteDelay + WAIT_OFFSET*2) { - [flow1, flow2].each { assert northboundV2.getFlowStatus(it.flowId).status == FlowState.UP } + Wrappers.wait(rerouteDelay + WAIT_OFFSET * 2) { + [flow1, flow2].each { flow -> assert flow.retrieveFlowStatus().status == FlowState.UP } - def flow1PathUpdated = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) - def flow2PathUpdated = PathHelper.convert(northbound.getFlowPath(flow2.flowId)) + def flow1PathUpdated = flow1.retrieveAllEntityPaths() + def flow2PathUpdated = flow2.retrieveAllEntityPaths() assert flow1PathUpdated != flow1Path assert flow2PathUpdated != flow2Path - - assert islUnderMaintenance in pathHelper.getInvolvedIsls(flow1PathUpdated) - assert islUnderMaintenance in pathHelper.getInvolvedIsls(flow2PathUpdated) + assert flow1PathUpdated.flowPath.getInvolvedIsls().contains(islUnderMaintenance) + assert flow2PathUpdated.flowPath.getInvolvedIsls().contains(islUnderMaintenance) } } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkSpec.groovy index 33c2e1dbf80..849c6ca59cb 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/LinkSpec.groovy @@ -1,20 +1,5 @@ package org.openkilda.functionaltests.spec.links -import org.openkilda.functionaltests.HealthCheckSpecification -import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper -import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.error.MessageError -import org.openkilda.messaging.info.event.IslInfoData -import org.openkilda.messaging.info.event.SwitchChangeType -import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.SwitchId -import org.openkilda.northbound.dto.v1.links.LinkParametersDto -import org.openkilda.testing.model.topology.TopologyDefinition.Isl -import org.springframework.beans.factory.annotation.Value -import org.springframework.web.client.HttpClientErrorException -import spock.lang.See - import static org.junit.jupiter.api.Assumptions.assumeTrue import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL import static org.openkilda.functionaltests.extension.tags.Tag.LOCKKEEPER @@ -29,9 +14,40 @@ import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME import static org.openkilda.testing.Constants.WAIT_OFFSET import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.InvalidRequestParametersExpectedError +import org.openkilda.functionaltests.error.MissingServletRequestParameterException +import org.openkilda.functionaltests.error.UnableToParseRequestArgumentsException +import org.openkilda.functionaltests.error.link.LinkIsInIllegalStateExpectedError +import org.openkilda.functionaltests.error.link.LinkNotFoundExpectedError +import org.openkilda.functionaltests.error.link.LinkPropertiesNotUpdatedExpectedError +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan +import org.openkilda.messaging.error.MessageError +import org.openkilda.messaging.info.event.IslInfoData +import org.openkilda.messaging.info.event.SwitchChangeType +import org.openkilda.messaging.payload.flow.FlowState +import org.openkilda.model.SwitchId +import org.openkilda.northbound.dto.v1.links.LinkParametersDto +import org.openkilda.testing.model.topology.TopologyDefinition.Isl + +import org.springframework.beans.factory.annotation.Autowired + +import org.springframework.beans.factory.annotation.Value +import org.springframework.web.client.HttpClientErrorException +import spock.lang.See +import spock.lang.Shared + @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/network-discovery") class LinkSpec extends HealthCheckSpecification { + + @Autowired + @Shared + FlowFactory flowFactory + @Value('${antiflap.cooldown}') int antiflapCooldown @@ -120,23 +136,26 @@ class LinkSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().random() and: "Forward flow from source switch to destination switch" - def flow1 = flowHelperV2.randomFlow(switchPair).tap { it.pinned = true } - flowHelperV2.addFlow(flow1) + def flow1 = flowFactory.getBuilder(switchPair).withPinned(true).build().create() + List busyEndpoints = flow1.occupiedEndpoints() and: "Reverse flow from destination switch to source switch" - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]).tap { it.pinned = true } - flowHelperV2.addFlow(flow2) + def flow2 = flowFactory.getBuilder(switchPair, false, busyEndpoints) + .withPinned(true).build() + .create() + busyEndpoints.addAll(flow2.occupiedEndpoints()) and: "Forward flow from source switch to some 'internal' switch" - def islToInternal = pathHelper.getInvolvedIsls(PathHelper.convert(northbound.getFlowPath(flow1.flowId))).first() - def flow3 = flowHelperV2.randomFlow(islToInternal.srcSwitch, islToInternal.dstSwitch, false, [flow1, flow2]) - .tap { it.pinned = true } - flowHelperV2.addFlow(flow3) + def islToInternal = flow1.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() + def flow3 = flowFactory.getBuilder(islToInternal.srcSwitch, islToInternal.dstSwitch, false, busyEndpoints) + .withPinned(true).build() + .create() + busyEndpoints.addAll(flow3.occupiedEndpoints()) and: "Reverse flow from 'internal' switch to source switch" - def flow4 = flowHelperV2.randomFlow(islToInternal.dstSwitch, islToInternal.srcSwitch, false, - [flow1, flow2, flow3]).tap { it.pinned = true } - flowHelperV2.addFlow(flow4) + def flow4 = flowFactory.getBuilder(islToInternal.dstSwitch, islToInternal.srcSwitch, false, busyEndpoints) + .withPinned(true).build() + .create() when: "Get all flows going through the link from source switch to 'internal' switch" def linkFlows = northbound.getLinkFlows(islToInternal.srcSwitch.dpId, islToInternal.srcPort, @@ -146,7 +165,7 @@ class LinkSpec extends HealthCheckSpecification { [flow1, flow2, flow3, flow4].each { assert it.flowId in linkFlows*.id } when: "Get all flows going through the link from some 'internal' switch to destination switch" - def islFromInternal = pathHelper.getInvolvedIsls(PathHelper.convert(northbound.getFlowPath(flow1.flowId))).last() + def islFromInternal = flow1.retrieveAllEntityPaths().flowPath.getInvolvedIsls().last() linkFlows = northbound.getLinkFlows(islFromInternal.srcSwitch.dpId, islFromInternal.srcPort, islFromInternal.dstSwitch.dpId, islFromInternal.dstPort) @@ -160,9 +179,9 @@ class LinkSpec extends HealthCheckSpecification { then: "All flows go to 'Down' status" Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - [flow1, flow2, flow3, flow4].each { - assert northboundV2.getFlowStatus(it.flowId).status == FlowState.DOWN - def isls = pathHelper.getInvolvedIsls(northbound.getFlowPath(it.flowId)) + [flow1, flow2, flow3, flow4].each { flow -> + assert flow.retrieveFlowStatus().status == FlowState.DOWN + def isls = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() assert isls.contains(islToInternal) || isls.contains(islToInternal.reversed) } @@ -188,7 +207,7 @@ class LinkSpec extends HealthCheckSpecification { then: "All flows go to 'Up' status" Wrappers.wait(rerouteDelay + PATH_INSTALLATION_TIME) { - [flow1, flow2, flow3, flow4].each { assert northboundV2.getFlowStatus(it.flowId).status == FlowState.UP } + [flow1, flow2, flow3, flow4].each { flow -> assert flow.retrieveFlowStatus().status == FlowState.UP } } } @@ -236,9 +255,7 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (404 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorMessage == - "There is no ISL between $srcSwId-$srcSwPort and $dstSwId-$dstSwPort." + new LinkNotFoundExpectedError("There is no ISL between $srcSwId-$srcSwPort and $dstSwId-$dstSwPort.").matches(exc) where: srcSwId | srcSwPort | dstSwId | dstSwPort | item @@ -254,14 +271,14 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.contains("Invalid portId:") + new UnableToParseRequestArgumentsException("Invalid portId: ${invalidValue}", + ~/Can not parse arguments when create "get flows for link" request/).matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" - getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | invalidValue + getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" | -1 + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" | -2 + getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" | -3 } def "Unable to get flows without full specifying a particular link (#item is missing)"() { @@ -270,15 +287,14 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.contains("parameter '$item' is not present") + new MissingServletRequestParameterException("Required $itemType parameter \'$item\' is not present").matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - null | null | null | null | "src_switch" - getIsl().srcSwitch.dpId | null | null | null | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | null | null | "dst_switch" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | null | "dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | itemType + null | null | null | null | "src_switch" | "SwitchId" + getIsl().srcSwitch.dpId | null | null | null | "src_port" | "Integer" + getIsl().srcSwitch.dpId | getIsl().srcPort | null | null | "dst_switch" | "SwitchId" + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | null | "dst_port" | "Integer" } def "Unable to delete a nonexistent link"() { @@ -290,8 +306,8 @@ class LinkSpec extends HealthCheckSpecification { then: "Get 404 NotFound error" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.contains("ISL was not found") + new LinkNotFoundExpectedError("There is no ISL between $parameters.srcSwitch-$parameters.srcPort " + + "and $parameters.dstSwitch-$parameters.dstPort.").matches(exc) } def "Unable to delete an active link"() { @@ -303,8 +319,9 @@ class LinkSpec extends HealthCheckSpecification { then: "Get 400 BadRequest error because the link is active" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.contains("ISL must NOT be in active state") + new LinkIsInIllegalStateExpectedError("Link with following parameters is in illegal state: " + + "source \'${isl.srcSwitch.dpId}_${isl.srcPort}\', destination \'${isl.dstSwitch.dpId}_${isl.dstPort}\'. " + + "ISL must NOT be in active state.").matches(exc) } @Tags(ISL_RECOVER_ON_FAIL) @@ -348,16 +365,14 @@ class LinkSpec extends HealthCheckSpecification { switchPair.paths[1..-1].each { pathHelper.makePathMorePreferable(switchPair.paths.first(), it) } and: "Create a couple of flows going through these switches" - def flow1 = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow1) - def flow1Path = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) + def flow1 = flowFactory.getRandom(switchPair) + def flow1Path = flow1.retrieveAllEntityPaths() - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]) - flowHelperV2.addFlow(flow2) - def flow2Path = PathHelper.convert(northbound.getFlowPath(flow2.flowId)) + def flow2 = flowFactory.getRandom(switchPair, false, FlowState.UP, flow1.occupiedEndpoints()) + def flow2Path = flow2.retrieveAllEntityPaths() - assert flow1Path == switchPair.paths.first() - assert flow2Path == switchPair.paths.first() + assert flow1Path.getPathNodes() == switchPair.paths.first() + assert flow2Path.getPathNodes() == switchPair.paths.first() and: "Delete link props from all links of alternative paths to allow rerouting flows" northbound.deleteLinkProps(northbound.getLinkProps(topology.isls)) @@ -366,15 +381,15 @@ class LinkSpec extends HealthCheckSpecification { switchPair.paths[1..-1].each { pathHelper.makePathMorePreferable(it, switchPair.paths.first()) } when: "Submit request for rerouting flows" - def isl = pathHelper.getInvolvedIsls(flow1Path).first() + def isl = flow1Path.flowPath.getInvolvedIsls().first() def response = northbound.rerouteLinkFlows(isl.srcSwitch.dpId, isl.srcPort, isl.dstSwitch.dpId, isl.dstPort) then: "Flows are rerouted" response.containsAll([flow1, flow2]*.flowId) Wrappers.wait(PATH_INSTALLATION_TIME + WAIT_OFFSET) { - [flow1, flow2].each { assert northboundV2.getFlowStatus(it.flowId).status == FlowState.UP } - assert PathHelper.convert(northbound.getFlowPath(flow1.flowId)) != flow1Path - assert PathHelper.convert(northbound.getFlowPath(flow2.flowId)) != flow2Path + [flow1, flow2].each { flow -> assert flow.retrieveFlowStatus().status == FlowState.UP } + assert flow1.retrieveAllEntityPaths() != flow1Path + assert flow2.retrieveAllEntityPaths() != flow2Path } } @@ -384,9 +399,7 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (404 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorMessage == - "There is no ISL between $srcSwId-$srcSwPort and $dstSwId-$dstSwPort." + new LinkNotFoundExpectedError("There is no ISL between $srcSwId-$srcSwPort and $dstSwId-$dstSwPort.").matches(exc) where: srcSwId | srcSwPort | dstSwId | dstSwPort | item @@ -402,14 +415,14 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.contains("Invalid portId:") + new UnableToParseRequestArgumentsException("Invalid portId: ${invalidValue}", + ~/Can not parse arguments when create "reroute flows for link" request/).matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" - getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | invalidValue + getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" | -1 + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" | -2 + getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" | -3 } def "Unable to reroute flows without full specifying a particular link (#item is missing)"() { @@ -418,15 +431,14 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.contains("parameter '$item' is not present") + new MissingServletRequestParameterException("Required $itemType parameter \'$item\' is not present").matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - null | null | null | null | "src_switch" - getIsl().srcSwitch.dpId | null | null | null | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | null | null | "dst_switch" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | null | "dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | itemType + null | null | null | null | "src_switch" | "SwitchId" + getIsl().srcSwitch.dpId | null | null | null | "src_port" | "Integer" + getIsl().srcSwitch.dpId | getIsl().srcPort | null | null | "dst_switch" | "SwitchId" + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | null | "dst_port" | "Integer" } def "Get links with specifying query parameters: #description"() { @@ -473,14 +485,14 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.contains("Invalid portId:") + new UnableToParseRequestArgumentsException("Invalid portId: ${invalidValue}", + ~/Can not parse arguments when create 'get links' request/).matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" - getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | invalidValue + getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" | -1 + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" | -2 + getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" | -3 } @Tags([SMOKE]) @@ -515,13 +527,13 @@ class LinkSpec extends HealthCheckSpecification { when: "Create a flow going through this ISL" def flowMaxBandwidth = 12345 - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(isl.srcSwitch, isl.dstSwitch).tap { it.maximumBandwidth = flowMaxBandwidth}) + flowFactory.getBuilder(isl.srcSwitch, isl.dstSwitch).withBandwidth(flowMaxBandwidth).build().create() and: "Update max bandwidth for the link" def offset = 10000 def newMaxBandwidth = initialMaxBandwidth - offset - northbound.updateLinkMaxBandwidth(isl.srcSwitch.dpId, isl.srcPort, isl.dstSwitch.dpId, isl.dstPort, - newMaxBandwidth) + islHelper.updateLinkMaxBandwidthUsingApi(isl, newMaxBandwidth) + def links = northbound.getActiveLinks() def linkProps = northbound.getLinkProps(topology.isls) @@ -543,9 +555,7 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage == "Can't create/update link props" - exc.responseBodyAsString.to(MessageError).errorDescription == "Not enough available bandwidth for operation" + new LinkPropertiesNotUpdatedExpectedError(~/Not enough available bandwidth for operation/).matches(exc) when: "Update max bandwidth to the value equal to max bandwidth of the created flow" northbound.updateLinkMaxBandwidth(isl.srcSwitch.dpId, isl.srcPort, isl.dstSwitch.dpId, isl.dstPort, @@ -597,14 +607,14 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.matches("Invalid value of (source|destination) port") + new InvalidRequestParametersExpectedError("Invalid value of $invalidEndpoint port", + ~/Port number can't be negative/).matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" - getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | invalidEndpoint + getIsl().srcSwitch.dpId | -1 | getIsl().dstSwitch.dpId | getIsl().dstPort | "src_port" | "source" + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | -2 | "dst_port" | "destination" + getIsl().srcSwitch.dpId | -3 | getIsl().dstSwitch.dpId | -4 | "src_port & dst_port" | "source" } def "Unable to update max bandwidth without full specifying a particular link (#item is missing)"() { @@ -613,37 +623,33 @@ class LinkSpec extends HealthCheckSpecification { then: "An error is received (400 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage.contains("parameter '$item' is not present") + new MissingServletRequestParameterException("Required $itemType parameter \'$item\' is not present").matches(exc) where: - srcSwId | srcSwPort | dstSwId | dstSwPort | item - null | null | null | null | "src_switch" - getIsl().srcSwitch.dpId | null | null | null | "src_port" - getIsl().srcSwitch.dpId | getIsl().srcPort | null | null | "dst_switch" - getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | null | "dst_port" + srcSwId | srcSwPort | dstSwId | dstSwPort | item | itemType + null | null | null | null | "src_switch" | "SwitchId" + getIsl().srcSwitch.dpId | null | null | null | "src_port" | "Integer" + getIsl().srcSwitch.dpId | getIsl().srcPort | null | null | "dst_switch" | "SwitchId" + getIsl().srcSwitch.dpId | getIsl().srcPort | getIsl().dstSwitch.dpId | null | "dst_port" | "Integer" } @Tags(ISL_RECOVER_ON_FAIL) def "Unable to delete inactive link with flowPath"() { given: "An inactive link with flow on it" def switchPair = switchPairs.all().neighbouring().random() - def flow = flowHelperV2.randomFlow(switchPair) - flow.pinned = true - flowHelperV2.addFlow(flow) - def flowPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) + def flow = flowFactory.getBuilder(switchPair).withPinned(true).build().create() - def isl = pathHelper.getInvolvedIsls(flowPath)[0] + def isl = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() islHelper.breakIsl(isl) when: "Try to delete the link" northbound.deleteLink(islUtils.toLinkParameters(isl)) - def linkIsActive = false then: "Get 400 BadRequest error because the link with flow path" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.contains("This ISL is busy by flow paths.") + new LinkIsInIllegalStateExpectedError("Link with following parameters is in illegal state: " + + "source \'${isl.srcSwitch.dpId}_${isl.srcPort}\', destination \'${isl.dstSwitch.dpId}_${isl.dstPort}\'. " + + "This ISL is busy by flow paths.").matches(exc) } @Tags(ISL_RECOVER_ON_FAIL) @@ -652,10 +658,9 @@ class LinkSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().neighbouring().withAtLeastNNonOverlappingPaths(2).random() and: "An active link with flow on it" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) - def flowPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def isl = pathHelper.getInvolvedIsls(flowPath)[0] + def flow = flowFactory.getRandom(switchPair) + def flowPath = flow.retrieveAllEntityPaths() + def isl = flowPath.flowPath.getInvolvedIsls().first() when: "Delete the link using force" def response = islHelper.deleteIsl(isl, true) @@ -666,11 +671,11 @@ class LinkSpec extends HealthCheckSpecification { !islUtils.getIslInfo(isl.reversed) and: "Flow is not rerouted and UP" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveAllEntityPaths() == flowPath + flow.retrieveFlowStatus().status == FlowState.UP and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Removed link becomes active again (port brought DOWN/UP)" antiflap.portDown(isl.srcSwitch.dpId, isl.srcPort) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/UnstableIslSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/UnstableIslSpec.groovy index cb12ac778f7..73bdd10af9e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/UnstableIslSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/links/UnstableIslSpec.groovy @@ -1,14 +1,5 @@ package org.openkilda.functionaltests.spec.links -import org.openkilda.functionaltests.HealthCheckSpecification -import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper -import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.model.SwitchFeature -import org.springframework.beans.factory.annotation.Value - -import java.time.Instant - import static org.junit.jupiter.api.Assumptions.assumeTrue import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL @@ -17,8 +8,24 @@ import static org.openkilda.messaging.info.event.IslChangeType.FAILED import static org.openkilda.testing.Constants.WAIT_OFFSET import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.model.SwitchFeature + +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Value +import spock.lang.Shared + +import java.time.Instant + class UnstableIslSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Value('${pce.isl.cost.when.unstable}') int islUnstableCost @@ -123,12 +130,11 @@ class UnstableIslSpec extends HealthCheckSpecification { Wrappers.wait(WAIT_OFFSET) { assert northbound.getLink(islToUpdate).cost == newCost.toInteger() } when: "Create a flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) then: "Flow is created on the stable path(secondPath)" Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) == secondPath + assert flow.retrieveAllEntityPaths().getPathNodes() == secondPath } when: "Mark first path as stable(update the 'time_unstable' field in db)" @@ -137,14 +143,14 @@ class UnstableIslSpec extends HealthCheckSpecification { and: "Reroute the flow" Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - with(northboundV2.rerouteFlow(flow.flowId)) { + with(flow.reroute()) { it.rerouted } } then: "Flow is rerouted" Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) == firstPath + assert flow.retrieveAllEntityPaths().getPathNodes() == firstPath } } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/logging/CheckLoggingSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/logging/CheckLoggingSpec.groovy index 669f9c7839a..ca4ea877a5d 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/logging/CheckLoggingSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/logging/CheckLoggingSpec.groovy @@ -1,22 +1,24 @@ package org.openkilda.functionaltests.spec.logging -import groovy.util.logging.Slf4j +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.SwitchNotFoundExpectedError +import org.openkilda.functionaltests.error.flow.FlowNotFoundExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.error.MessageError import org.openkilda.testing.service.elastic.ElasticQueryBuilder import org.openkilda.testing.service.elastic.ElasticService import org.openkilda.testing.service.elastic.model.KildaTags + +import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID - @Slf4j @Tags([SMOKE]) @Narrative("This specification ensures that all logging facilities are up and running after Kilda deployment") @@ -53,8 +55,7 @@ class CheckLoggingSpec extends HealthCheckSpecification { then: "An error is received (404 code)" def switchExc = thrown(HttpClientErrorException) - switchExc.rawStatusCode == 404 - switchExc.responseBodyAsString.to(MessageError).errorMessage.contains(switchErrorMsg) + new SwitchNotFoundExpectedError(NON_EXISTENT_SWITCH_ID).matches(switchExc) and: "Northbound should log these actions within 30 seconds" int timeout = 31 @@ -75,8 +76,7 @@ class CheckLoggingSpec extends HealthCheckSpecification { then: "An error is received (404 code)" def flowExc = thrown(HttpClientErrorException) - flowExc.rawStatusCode == 404 - flowExc.responseBodyAsString.to(MessageError).errorMessage.contains(flowErrorMsg(flowId)) + new FlowNotFoundExpectedError("Can not get flow: Flow ${flowId} not found", ~/Flow not found/).matches(flowExc) and: "Storm should log these actions within 30 seconds" int timeout = 31 diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathCheckSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathCheckSpec.groovy index 68fca9ac681..71f26b164b8 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathCheckSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathCheckSpec.groovy @@ -1,23 +1,33 @@ package org.openkilda.functionaltests.spec.network -import org.openkilda.functionaltests.HealthCheckSpecification -import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.model.Path -import org.openkilda.messaging.info.event.PathNode -import spock.lang.See - import static groovyx.gpars.GParsPool.withPool import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE import static org.openkilda.model.FlowEncapsulationType.TRANSIT_VLAN import static org.openkilda.model.FlowEncapsulationType.VXLAN -import static org.openkilda.model.PathComputationStrategy.COST + +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.Path +import org.openkilda.functionaltests.helpers.model.PathComputationStrategy +import org.openkilda.functionaltests.model.stats.Direction +import org.openkilda.messaging.info.event.PathNode + +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.See +import spock.lang.Shared @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/solutions/path-validation/path-validation.md") class PathCheckSpec extends HealthCheckSpecification { private static final String PCE_PATH_COMPUTATION_SUCCESS_MESSAGE = "The path has been computed successfully" + + @Autowired + @Shared + FlowFactory flowFactory + @Tags(SMOKE) def "No path validation errors for valid path without limitations"() { given: "Path for non-neighbouring switches" @@ -74,11 +84,12 @@ class PathCheckSpec extends HealthCheckSpecification { withPool { switchPair.paths.findAll { it != path }.eachParallel { pathHelper.makePathMorePreferable(path, it) } } - def flow = flowHelperV2.addFlow( - flowHelperV2.randomFlow(switchPair, false).tap { it.pathComputationStrategy = COST }) + def flow = flowFactory.getBuilder(switchPair, false) + .withPathComputationStrategy(PathComputationStrategy.COST).build() + .create() when: "Check the path (equal to the flow) if the computation strategy would be LATENCY and max_latency would be too low" - def pathCheckResult = pathHelper.getPathCheckResult(path, flow.getFlowId(), 1, 2) + def pathCheckResult = pathHelper.getPathCheckResult(path, flow.flowId, 1, 2) then: "Path check result returns latency validation errors (1 per tier1 and tier 2, per forward and revers paths)" verifyAll(pathCheckResult) { @@ -93,26 +104,26 @@ class PathCheckSpec extends HealthCheckSpecification { def "Path intersection check errors are returned for each segment of existing flow"() { given: "Flow has been created successfully" def switchPair = switchPairs.all().nonNeighbouring().first() - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair, false)) - def flowPathDetails = northbound.getFlowPath(flow.flowId) + def flow = flowFactory.getRandom(switchPair, false) + def flowPathDetails = flow.retrieveAllEntityPaths() and: "Path with intersected segment(s) for verification has been collected" - def flowForwardPath = pathHelper.getPathNodes(flowPathDetails.forwardPath) + def flowForwardPath = flowPathDetails.getPathNodes() //at least one common ISl def intersectingPath = switchPair.paths.findAll { it.size() > 4 && it.intersect(flowForwardPath).size() > 1 }.first() and: "Involved ISls have been collected" - def flowInvolvedISLs = new Path(flowPathDetails.forwardPath + flowPathDetails.reversePath, topology).getInvolvedIsls() + def flowInvolvedISLs = flowPathDetails.flowPath.getInvolvedIsls(Direction.FORWARD) + flowPathDetails.flowPath.getInvolvedIsls(Direction.REVERSE) def intersectedPathInvolvedISLs = new Path(pathHelper.convertToPathNodePayload(intersectingPath), topology).getInvolvedIsls() def commonISLs = flowInvolvedISLs.intersect(intersectedPathInvolvedISLs) assert !commonISLs.isEmpty(), "Path for verification has no intersected segment(s) with the flow." when: "Check if the potential path has intersections with existing one" - def pathCheckResult = pathHelper.getPathCheckResult(intersectingPath, flow.getFlowId()) + def pathCheckResult = pathHelper.getPathCheckResult(intersectingPath, flow.flowId) then: "Path check reports expected amount of intersecting segments" verifyAll (pathCheckResult) { - getValidationMessages().findAll { it.contains("The following segment intersects with the flow ${flow.getFlowId()}") }.size() + getValidationMessages().findAll { it.contains("The following segment intersects with the flow ${flow.flowId}") }.size() == commonISLs.size() getPceResponse() == PCE_PATH_COMPUTATION_SUCCESS_MESSAGE } @@ -126,15 +137,16 @@ class PathCheckSpec extends HealthCheckSpecification { .includeSwitch(firstSwitchPair.dst).random() and:"Two flows in one diverse group have been created" - def flow1 = flowHelperV2.addFlow(flowHelperV2.randomFlow(firstSwitchPair, false)) - def flow2 = flowHelperV2.addFlow(flowHelperV2.randomFlow(secondSwitchPair, false) - .tap {it.diverseFlowId = flow1.flowId}) + def flow1 = flowFactory.getRandom(firstSwitchPair, false) + def flow2 = flowFactory.getBuilder(secondSwitchPair, false) + .withDiverseFlow(flow1.flowId).build() + .create() and: "Paths for both flows have been collected" - def flow1Path = pathHelper.getPathNodes(northbound.getFlowPath(flow1.flowId).forwardPath) + def flow1Path = flow1.retrieveAllEntityPaths().getPathNodes() def flow2Path = flow2.source.switchId == flow1.destination.switchId ? - pathHelper.getPathNodes(northbound.getFlowPath(flow2.flowId).forwardPath) : - pathHelper.getPathNodes(northbound.getFlowPath(flow2.flowId).reversePath) + flow2.retrieveAllEntityPaths().getPathNodes(Direction.FORWARD) : + flow2.retrieveAllEntityPaths().getPathNodes(Direction.REVERSE) when: "Check potential path that has NO intersection with both flows from diverse group" LinkedList pathToCheck = switchPairs.all().neighbouring().excludePairs([firstSwitchPair, secondSwitchPair]) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathComputationSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathComputationSpec.groovy index 7f2bf446c70..49fc3eb69e9 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathComputationSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathComputationSpec.groovy @@ -1,22 +1,32 @@ package org.openkilda.functionaltests.spec.network +import static org.openkilda.functionaltests.helpers.model.PathComputationStrategy.* + +import org.openkilda.functionaltests.helpers.factory.FlowFactory + import org.junit.jupiter.api.parallel.ResourceLock import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.ResourceLockConstants import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.PathComputationStrategy import org.openkilda.northbound.dto.v1.flows.FlowPatchDto import static org.openkilda.testing.Constants.WAIT_OFFSET +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Shared + @ResourceLock(ResourceLockConstants.DEFAULT_PATH_COMPUTATION) class PathComputationSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + def "Default path computation strategy is used when flow does not specify it"() { given: "Default path computation strategy is COST" - kildaConfiguration.updatePathComputationStrategy(PathComputationStrategy.COST) + kildaConfiguration.updatePathComputationStrategy(COST.toString()) and: "Switch pair with two paths at least" def swPair = switchPairs.all().withAtLeastNPaths(2).random() @@ -29,38 +39,40 @@ class PathComputationSpec extends HealthCheckSpecification { latencyIsls.each { islHelper.updateIslLatency(it, 1) } when: "Create flow without selecting path strategy" - def flow = flowHelperV2.randomFlow(swPair).tap { it.pathComputationStrategy = null } - def createResponse = flowHelperV2.addFlow(flow) + def flowCreateResponse = flowFactory.getBuilder(swPair) + .withPathComputationStrategy(null).build().sendCreateRequest() + def flow = flowCreateResponse.waitForBeingInState(FlowState.UP) then: "Flow is created with 'Cost' strategy (current default)" - createResponse.pathComputationStrategy == PathComputationStrategy.COST.toString().toLowerCase() - northboundV2.getFlow(flow.flowId).pathComputationStrategy == PathComputationStrategy.COST.toString().toLowerCase() + flowCreateResponse.pathComputationStrategy == COST + flow.pathComputationStrategy == COST and: "Flow is actually built on the path with the least cost" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == costEffectivePath + flow.retrieveAllEntityPaths().getPathNodes() == costEffectivePath when: "Update default strategy to LATENCY" - kildaConfiguration.updatePathComputationStrategy(PathComputationStrategy.LATENCY) + kildaConfiguration.updatePathComputationStrategy(LATENCY.toString()) then: "Existing flow remains with COST strategy and on the same path" - northboundV2.getFlow(flow.flowId).pathComputationStrategy == PathComputationStrategy.COST.toString().toLowerCase() - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == costEffectivePath + flow.retrieveDetails().pathComputationStrategy == COST + flow.retrieveAllEntityPaths().getPathNodes() == costEffectivePath and: "Manual reroute of the flow responds that flow is already on the best path" - !northboundV2.rerouteFlow(flow.flowId).rerouted + !flow.reroute().rerouted when: "Create a new flow without specifying path computation strategy" - def flow2 = flowHelperV2.randomFlow(swPair).tap { it.pathComputationStrategy = null } + def flow2 = flowFactory.getBuilder(swPair).withPathComputationStrategy(null).build() //re-set latencies in DB one more time in case they were recalculated automatically to higher values latencyIsls.each { database.updateIslLatency(it, 1) } - def createResponse2 = flowHelperV2.addFlow(flow2) + def createResponse2 = flow2.sendCreateRequest() + flow2 = flow2.waitForBeingInState(FlowState.UP) then: "New flow is created with 'Latency' strategy (current default)" - createResponse2.pathComputationStrategy == PathComputationStrategy.LATENCY.toString().toLowerCase() - northboundV2.getFlow(flow2.flowId).pathComputationStrategy == PathComputationStrategy.LATENCY.toString().toLowerCase() + createResponse2.pathComputationStrategy == LATENCY + flow2.pathComputationStrategy == LATENCY and: "New flow actually uses path with the least latency (ignoring cost)" - pathHelper.convert(northbound.getFlowPath(flow2.flowId)) == latencyEffectivePath + flow2.retrieveAllEntityPaths().getPathNodes() == latencyEffectivePath } def "Flow path computation strategy can be updated from LATENCY to COST"() { @@ -75,19 +87,18 @@ class PathComputationSpec extends HealthCheckSpecification { latencyIsls.each { islHelper.updateIslLatency(it, 1) } when: "Create flow using Latency strategy" - def flow = flowHelperV2.randomFlow(swPair) - .tap { it.pathComputationStrategy = PathComputationStrategy.LATENCY.toString() } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withPathComputationStrategy(LATENCY).build() + .create() then: "Flow is built on the least-latency path" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == latencyEffectivePath + flow.retrieveAllEntityPaths().getPathNodes() == latencyEffectivePath when: "Update flow path strategy to 'Cost'" - flowHelperV2.updateFlow(flow.flowId, - flow.tap { it.pathComputationStrategy = PathComputationStrategy.COST.toString() }) + flow.update(flow.tap{ it.pathComputationStrategy = COST }) then: "Flow path has changed to the least-cost path" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == costEffectivePath + flow.retrieveAllEntityPaths().getPathNodes() == costEffectivePath } def "Target flow path computation strategy is not applied immediately in case flow was updated partially"() { @@ -95,32 +106,29 @@ class PathComputationSpec extends HealthCheckSpecification { def swPair = switchPairs.all().withAtLeastNPaths(2).random() and: "A flow with cost strategy" - def latencyStrategy = PathComputationStrategy.LATENCY.toString().toLowerCase() - def costStrategy = PathComputationStrategy.COST.toString().toLowerCase() - def flow = flowHelperV2.randomFlow(swPair).tap { it.pathComputationStrategy = costStrategy } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withPathComputationStrategy(COST).build() + .create() when: "Update path computation strategy(cost -> latency) via partialUpdate" - northbound.partialUpdate(flow.flowId, new FlowPatchDto().tap { - it.targetPathComputationStrategy = latencyStrategy - }) + flow.partialUpdateV1(new FlowPatchDto().tap{ it.targetPathComputationStrategy = LATENCY.toString()}) then: "Path computation strategy is not changed" - with(northbound.getFlow(flow.flowId)) { - pathComputationStrategy == costStrategy - targetPathComputationStrategy == latencyStrategy + with(flow.retrieveDetailsV1()) { + pathComputationStrategy == COST + targetPathComputationStrategy == LATENCY.toString().toLowerCase() } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Sync the flow" - northbound.synchronizeFlow(flow.flowId) - Wrappers.wait(WAIT_OFFSET / 2) { assert northbound.getFlowStatus(flow.flowId).status == FlowState.UP } + flow.sync() + Wrappers.wait(WAIT_OFFSET / 2) { assert flow.retrieveFlowStatus().status == FlowState.UP } then: "Path computation strategy is updated and targetPathComputationStrategy is deleted" - with(northbound.getFlow(flow.flowId)) { - pathComputationStrategy == latencyStrategy + with(flow.retrieveDetails()) { + pathComputationStrategy == LATENCY !targetPathComputationStrategy } } @@ -130,37 +138,36 @@ class PathComputationSpec extends HealthCheckSpecification { def swPair = switchPairs.all().withAtLeastNPaths(2).random() and: "A flow with cost strategy" - def latencyStrategy = PathComputationStrategy.LATENCY.toString().toLowerCase() - def costStrategy = PathComputationStrategy.COST.toString().toLowerCase() - def flow = flowHelperV2.randomFlow(swPair).tap { it.pathComputationStrategy = costStrategy } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withPathComputationStrategy(COST).build() + .create() when: "Update path computation strategy(cost -> latency) via partialUpdate" - northbound.partialUpdate(flow.flowId, new FlowPatchDto().tap { - it.targetPathComputationStrategy = latencyStrategy + flow.partialUpdateV1(new FlowPatchDto().tap { + it.targetPathComputationStrategy = LATENCY.toString() }) and: "Reroute the flow" - northboundV2.rerouteFlow(flow.flowId) - Wrappers.wait(WAIT_OFFSET / 2) { assert northbound.getFlowStatus(flow.flowId).status == FlowState.UP } + flow.reroute() + Wrappers.wait(WAIT_OFFSET / 2) { assert flow.retrieveFlowStatus().status == FlowState.UP } then: "Path computation strategy is updated and targetPathComputationStrategy is deleted" - with(northbound.getFlow(flow.flowId)) { - pathComputationStrategy == latencyStrategy + with(flow.retrieveDetails()) { + pathComputationStrategy == LATENCY !targetPathComputationStrategy } when: "Update path computation strategy(latency -> cost) via partialUpdate" - northbound.partialUpdate(flow.flowId, new FlowPatchDto().tap { - it.targetPathComputationStrategy = costStrategy + flow.partialUpdateV1(new FlowPatchDto().tap { + it.targetPathComputationStrategy = COST.toString() }) and: "Update the flow" - flowHelperV2.updateFlow(flow.flowId, flow.tap { it.pathComputationStrategy = null }) + flow.update(flow.tap { it.pathComputationStrategy = null }) then: "Path computation strategy is updated and targetPathComputationStrategy is deleted" - with(northbound.getFlow(flow.flowId)) { - pathComputationStrategy == costStrategy + with(flow.retrieveDetails()) { + pathComputationStrategy == COST !targetPathComputationStrategy } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathsSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathsSpec.groovy index 84530f57910..d44f45d67c1 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathsSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/network/PathsSpec.groovy @@ -1,16 +1,5 @@ package org.openkilda.functionaltests.spec.network -import org.openkilda.functionaltests.HealthCheckSpecification -import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.functionaltests.helpers.model.SwitchPair -import org.openkilda.messaging.error.MessageError -import org.openkilda.model.PathComputationStrategy -import org.openkilda.northbound.dto.v1.switches.SwitchPropertiesDto -import org.openkilda.testing.model.topology.TopologyDefinition.Switch -import org.openkilda.testing.service.northbound.NorthboundService -import org.springframework.web.client.HttpClientErrorException - import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE import static org.openkilda.model.FlowEncapsulationType.TRANSIT_VLAN @@ -22,11 +11,28 @@ import static org.openkilda.testing.service.northbound.payloads.PathRequestParam import static org.openkilda.testing.service.northbound.payloads.PathRequestParameter.MAX_PATH_COUNT import static org.openkilda.testing.service.northbound.payloads.PathRequestParameter.PATH_COMPUTATION_STRATEGY import static org.openkilda.testing.service.northbound.payloads.PathRequestParameter.PROTECTED -import static org.springframework.http.HttpStatus.BAD_REQUEST -import static org.springframework.http.HttpStatus.NOT_FOUND + +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.PathsNotReturnedExpectedError +import org.openkilda.functionaltests.error.SwitchNotFoundExpectedError +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchPair +import org.openkilda.model.PathComputationStrategy +import org.openkilda.northbound.dto.v1.switches.SwitchPropertiesDto +import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.web.client.HttpClientErrorException +import spock.lang.Shared class PathsSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Tags(SMOKE) def "Get paths between not neighboring switches"() { given: "Two active not neighboring switches" @@ -35,7 +41,7 @@ class PathsSpec extends HealthCheckSpecification { .random() and: "Create a flow to reduce available bandwidth on some path between these two switches" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair)) + flowFactory.getRandom(switchPair) when: "Get paths between switches" def paths = switchPair.getPathsFromApi() @@ -64,7 +70,7 @@ class PathsSpec extends HealthCheckSpecification { .random() and: "Create a flow to reduce available bandwidth on some path between these two switches" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair)) + flowFactory.getRandom(switchPair) when: "Get paths between switches using the LATENCY strategy" def paths = switchPair.getPathsFromApi([(PATH_COMPUTATION_STRATEGY): LATENCY]) @@ -80,20 +86,28 @@ class PathsSpec extends HealthCheckSpecification { } @Tags(LOW_PRIORITY) - def "Unable to get paths for #problemDescription"() { + def "Unable to get paths for non-existing switch"() { when: "Try to get paths between #problemDescription" - switchPair(topology.getSwitches().first(), northbound).getPathsFromApi() + def switchPair = SwitchPair.withNonExistingDstSwitch(topology.getSwitches().first(), northbound) + switchPair.getPathsFromApi() + + then: + "Get error because request is invalid" + def exc = thrown(HttpClientErrorException) + new SwitchNotFoundExpectedError(switchPair.dst.dpId, ~/Switch not found./).matches(exc) + } + @Tags(LOW_PRIORITY) + def "Unable to get paths for one switch"() { + when: "Try to get paths between #problemDescription" + def switchPair = SwitchPair.singleSwitchInstance(topology.getSwitches().first(), northbound) + switchPair.getPathsFromApi() then: "Get error because request is invalid" def exc = thrown(HttpClientErrorException) - exc.statusCode == expectedStatus + new PathsNotReturnedExpectedError("Source and destination switch IDs are equal: '${switchPair.src.dpId}'", ~/Bad request./).matches(exc) - where: - problemDescription | switchPair |expectedStatus - "one switch" | { Switch sw, NorthboundService nb -> SwitchPair.singleSwitchInstance(sw, nb)}| BAD_REQUEST - "non-existing switch" |{ Switch sw, NorthboundService nb -> SwitchPair.withNonExistingDstSwitch(sw, nb)} | NOT_FOUND } def "Unable to get paths with max_latency strategy without max latency parameter"() { @@ -107,11 +121,8 @@ class PathsSpec extends HealthCheckSpecification { then: "Human readable error is returned" def error = thrown(HttpClientErrorException) - error.statusCode == BAD_REQUEST - def errorDetails = error.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Missed max_latency parameter." - errorDetails.errorDescription == "MAX_LATENCY path computation strategy requires non null max_latency " + - "parameter. If max_latency will be equal to 0 LATENCY strategy will be used instead of MAX_LATENCY." + new PathsNotReturnedExpectedError("Missed max_latency parameter.", ~/MAX_LATENCY path computation strategy requires non null max_latency \ +parameter. If max_latency will be equal to 0 LATENCY strategy will be used instead of MAX_LATENCY./).matches(error) } @Tags(LOW_PRIORITY) @@ -134,11 +145,10 @@ class PathsSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.statusCode == BAD_REQUEST - def errorDetails = exc.responseBodyAsString.to(MessageError) - errorDetails.errorMessage == "Switch $switchPair.src.dpId doesn't support $VXLAN " + + new PathsNotReturnedExpectedError("Switch $switchPair.src.dpId doesn't support $VXLAN " + "encapsulation type. Choose one of the supported encapsulation types $encapsTypesWithoutVxlan or " + - "update switch properties and add needed encapsulation type." + "update switch properties and add needed encapsulation type.", ~/Bad request./).matches(exc) + } @Tags(LOW_PRIORITY) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy index 22512673d9e..b6c0a34c033 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42FlowRttSpec.groovy @@ -1,9 +1,34 @@ package org.openkilda.functionaltests.spec.server42 +import static java.util.concurrent.TimeUnit.SECONDS +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.ResourceLockConstants.S42_TOGGLE +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.helpers.model.FlowEncapsulationType.VXLAN +import static org.openkilda.functionaltests.model.stats.Direction.FORWARD +import static org.openkilda.functionaltests.model.stats.Direction.REVERSE +import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RTT +import static org.openkilda.functionaltests.model.stats.Origin.FLOW_MONITORING +import static org.openkilda.functionaltests.model.stats.Origin.SERVER_42 +import static org.openkilda.functionaltests.model.switches.Manufacturer.WB5164 +import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE +import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.SERVER42_STATS_LAG +import static org.openkilda.testing.Constants.STATS_FROM_SERVER42_LOGGING_TIMEOUT +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.builder.FlowBuilder +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowDirection +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.helpers.model.SwitchPairs import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory @@ -13,7 +38,6 @@ import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.northbound.dto.v2.flows.SwapFlowPayload import groovy.time.TimeCategory @@ -26,27 +50,6 @@ import spock.lang.ResourceLock import spock.lang.Shared import spock.util.mop.Use -import static java.util.concurrent.TimeUnit.SECONDS -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.ResourceLockConstants.S42_TOGGLE -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.model.stats.Direction.FORWARD -import static org.openkilda.functionaltests.model.stats.Direction.REVERSE -import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RTT -import static org.openkilda.functionaltests.model.stats.Origin.FLOW_MONITORING -import static org.openkilda.functionaltests.model.stats.Origin.SERVER_42 -import static org.openkilda.functionaltests.model.switches.Manufacturer.WB5164 -import static org.openkilda.model.FlowEncapsulationType.VXLAN -import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VLAN_COOKIE -import static org.openkilda.model.cookie.Cookie.SERVER_42_FLOW_RTT_OUTPUT_VXLAN_COOKIE -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.SERVER42_STATS_LAG -import static org.openkilda.testing.Constants.STATS_FROM_SERVER42_LOGGING_TIMEOUT -import static org.openkilda.testing.Constants.WAIT_OFFSET - @Use(TimeCategory) @Narrative("Verify that statistic is collected from server42 Rtt") /* On local environment these tests will use stubs without sending real rtt packets across the network. @@ -58,6 +61,11 @@ switch timestamps, thus we may see no stats in otsdb if time on switch is incorr @Isolated //s42 toggle affects all switches in the system, may lead to excess rules during sw validation in other tests class Server42FlowRttSpec extends HealthCheckSpecification { + + @Autowired + @Shared + FlowFactory flowFactory + @Shared @Autowired FlowStats flowStats @@ -73,10 +81,10 @@ class Server42FlowRttSpec extends HealthCheckSpecification { @IterationTag(tags = [HARDWARE], iterationNameRegex = /(NS|WB)/) def "Create a #flowDescription flow with server42 Rtt feature and check datapoints in tsdb"() { given: "Two active switches, src has server42 connected" - def switchPair = switchPairFilter(switchPairs.all().withBothSwitchesConnectedToServer42()).random() + SwitchPair switchPair = switchPairFilter(switchPairs.all().withBothSwitchesConnectedToServer42()).random() when: "Set server42FlowRtt toggle to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src and dst switches" @@ -84,40 +92,30 @@ class Server42FlowRttSpec extends HealthCheckSpecification { [server42Switch, switchPair.dst].each { switchHelper.setServer42FlowRttForSwitch(it, true) } and: "Create a flow" - def flow = flowHelperV2.randomFlow(switchPair) - flow.tap(flowTap) - flowHelperV2.addFlow(flow) + FlowExtended flow = expectedFlowEntity(flowFactory.getBuilder(switchPair)).create() then: "Check if stats for forward are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() } where: - flowDescription | switchPairFilter | flowTap - "default flow" | { SwitchPairs swPairs -> swPairs } | { FlowRequestV2 fl -> - fl.source.vlanId = 0 - fl.destination.vlanId = 0 - } + flowDescription | switchPairFilter | expectedFlowEntity + "default flow" | { SwitchPairs swPairs -> swPairs } | { FlowBuilder builder -> builder.withSourceVlan(0).withDestinationVlan(0).build() } + "protected flow" | { SwitchPairs swPairs -> swPairs - .withAtLeastNNonOverlappingPaths(2)}| { FlowRequestV2 fl -> - fl.allocateProtectedPath = true } + .withAtLeastNNonOverlappingPaths(2) } | { FlowBuilder builder -> builder.withProtectedPath(true).build() } + "vxlan flow on NS switch" | { SwitchPairs swPairs -> swPairs - .withBothSwitchesVxLanEnabled() - .withSourceSwitchNotManufacturedBy(WB5164) - } | { FlowRequestV2 fl -> - fl.encapsulationType = VXLAN } - "qinq flow" | { SwitchPairs swPairs -> swPairs } | { FlowRequestV2 fl -> - fl.source.vlanId = 10 - fl.source.innerVlanId = 100 - fl.destination.vlanId = 20 - fl.destination.innerVlanId = 200 - } + .withBothSwitchesVxLanEnabled() + .withSourceSwitchNotManufacturedBy(WB5164) } | { FlowBuilder builder -> builder.withEncapsulationType(VXLAN).build() } + + "qinq flow" | { SwitchPairs swPairs -> swPairs } | { FlowBuilder builder -> builder.withSourceVlan(10).withSourceInnerVlan(100) + .withDestinationVlan(20).withDestinationInnerVlan(200).build() } "vxlan flow on WB switch" | { SwitchPairs swPairs -> swPairs - .withBothSwitchesVxLanEnabled() - .withSourceSwitchManufacturedBy(WB5164) - } | { FlowRequestV2 fl -> - fl.encapsulationType = VXLAN } + .withBothSwitchesVxLanEnabled() + .withSourceSwitchManufacturedBy(WB5164) } | { FlowBuilder builder -> builder.withEncapsulationType(VXLAN).build() } + } def "Flow rtt stats are available in forward and reverse directions for new flows"() { @@ -125,7 +123,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { SwitchPair switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() and: "server42FlowRtt feature toggle is set to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src and dst switches" @@ -133,16 +131,14 @@ class Server42FlowRttSpec extends HealthCheckSpecification { [server42Switch, switchPair.dst].each { switchHelper.setServer42FlowRttForSwitch(it, true) } when: "Create a flow for forward metric" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) and: "Create a reversed flow for backward metric" - def reversedFlow = flowHelperV2.randomFlow(switchPair.reversed, false, [flow]).tap { - //don't pick same ports as flow1 in order to get expected amount of s42_input rules - source.portNumber = (topology.getAllowedPortsForSwitch(switchPair.dst) - flow.destination.portNumber)[0] - destination.portNumber = (topology.getAllowedPortsForSwitch(switchPair.src) - flow.source.portNumber)[0] - } - flowHelperV2.addFlow(reversedFlow) + def reversedFlow = flowFactory.getBuilder(switchPair.reversed, false, flow.occupiedEndpoints()) + //don't pick same ports as flow1 in order to get expected amount of s42_input rules + .withSourcePort((topology.getAllowedPortsForSwitch(switchPair.dst) - flow.destination.portNumber)[0]) + .withDestinationPort((topology.getAllowedPortsForSwitch(switchPair.src) - flow.source.portNumber)[0]).build() + .create() then: "Server42 input/ingress rules are installed" Wrappers.wait(RULES_INSTALLATION_TIME) { @@ -158,101 +154,13 @@ class Server42FlowRttSpec extends HealthCheckSpecification { } and: "Involved switches pass switch validation" - switchHelper.validateAndCollectFoundDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() + switchHelper.validateAndCollectFoundDiscrepancies(flow.retrieveAllEntityPaths().getInvolvedSwitches()).isEmpty() and: "Check if stats for forward and reverse flows are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(reversedFlow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - } - } - - def "Stats are available only if both global and switch toggles are 'on' on both endpoints"() { - /*This test runs the last (by alphabet) on jenkins, because if it runs before other test, - switchHelper.waitForS42SwRulesSetup() call in the next tests fails. No idea why.*/ - given: "Two active switches with having server42" - def switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() - def statsWaitSeconds = 4 - - and: "server42FlowRtt toggle is turned off" - featureToggles.server42FlowRtt(false) - switchHelper.waitForS42SwRulesSetup(false) - - and: "server42FlowRtt is turned off on src and dst" - [switchPair.src, switchPair.dst].each{ sw -> switchHelper.setServer42FlowRttForSwitch(sw, false, false) } - - and: "Flow for forward metric is created" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) - - and: "Reversed flow for backward metric is created" - def reversedFlow = flowHelperV2.randomFlow(switchPair.reversed, false, [flow]) - flowHelperV2.addFlow(reversedFlow) - - expect: "Involved switches pass switch validation" - Wrappers.wait(RULES_INSTALLATION_TIME) { //wait for s42 rules - switchHelper.synchronizeAndCollectFixedDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() - } - - when: "Wait for several seconds" - SECONDS.sleep(statsWaitSeconds) - - then: "Expect no flow rtt stats for forward flow" - flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() - - and: "Expect no flow rtt stats for reversed flow" - flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() - - when: "Enable global rtt toggle" - featureToggles.server42FlowRtt(true) - switchHelper.waitForS42SwRulesSetup() - - and: "Wait for several seconds" - def checkpointTime = new Date().getTime() - SECONDS.sleep(statsWaitSeconds) - - then: "Expect no flow rtt stats for forward flow" - flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() - - and: "Expect no flow rtt stats for reversed flow" - flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() - - when: "Enable switch rtt toggle on src and dst" - switchHelper.setServer42FlowRttForSwitch(switchPair.src, true) - switchHelper.setServer42FlowRttForSwitch(switchPair.dst, true) - checkpointTime = new Date().getTime() - - then: "Stats for forward and reverse flow are available" - Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) - //https://github.com/telstra/open-kilda/issues/4678 - //assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(reversedFlow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() } - - when: "Disable switch rtt toggle on dst (still enabled on src)" - switchHelper.setServer42FlowRttForSwitch(switchPair.dst, false) - checkpointTime = new Date().getTime() - - then: "Stats for forward and reverse flow are available" - Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { - def stats = flowStats.of(flow.getFlowId()) - assert stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) - //https://github.com/telstra/open-kilda/issues/4678 - //assert stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) - } - - when: "Disable global toggle" - featureToggles.server42FlowRtt(false) - - and: "Wait for several seconds" - SECONDS.sleep(statsWaitSeconds) - checkpointTime = new Date().getTime() - - then: "Expect no flow rtt stats for forward flow" - !flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) - - and: "Expect no flow rtt stats for reversed flow" - !flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) } @Tags([TOPOLOGY_DEPENDENT]) @@ -261,25 +169,24 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withBothSwitchesConnectedToSameServer42Instance().random() and: "server42FlowRtt feature enabled globally and on src/dst switch" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() [switchPair.src, switchPair.dst].each { sw -> switchHelper.setServer42FlowRttForSwitch(sw, true) } when: "Create a flow" def checkpointTime = new Date() - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) then: "Involved switches pass switch validation" Wrappers.wait(RULES_INSTALLATION_TIME) { //wait for s42 rules - switchHelper.validateAndCollectFoundDiscrepancies(pathHelper.getInvolvedSwitches(flow.flowId)*.getDpId()).isEmpty() + switchHelper.validateAndCollectFoundDiscrepancies(flow.retrieveAllEntityPaths().getInvolvedSwitches()).isEmpty() } and: "Stats for both directions are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() } when: "Disable flow rtt on dst switch" @@ -291,12 +198,12 @@ class Server42FlowRttSpec extends HealthCheckSpecification { then: "Stats are available in forward direction" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) } and: "Stats are not available in reverse direction" - !flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + !flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) } @Tags(HARDWARE) //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) @@ -304,19 +211,18 @@ class Server42FlowRttSpec extends HealthCheckSpecification { given: "A switch pair connected to server42" def switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() //enable server42 in featureToggle and on the switches - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() def server42Switch = switchPair.src - [server42Switch, switchPair.dst].collectEntries { sw -> switchHelper.setServer42FlowRttForSwitch(sw, true) } + [server42Switch, switchPair.dst].each { sw -> switchHelper.setServer42FlowRttForSwitch(sw, true) } and: "A flow on the given switch pair" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() } when: "Delete ingress server42 rule related to the flow on the src switch" @@ -332,28 +238,22 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def timeWhenMissingRuleIsDetected = new Date().getTime() and: "Flow is valid and UP" - northbound.validateFlow(flow.flowId).each { validationInfo -> - - if (validationInfo.direction == "forward") { - assert !validationInfo.asExpected - } - else { - assert validationInfo.asExpected - } - } + def existingDiscrepancies = flow.validateAndCollectDiscrepancies() + existingDiscrepancies.containsKey(FlowDirection.FORWARD) + !existingDiscrepancies.containsKey(FlowDirection.REVERSE) - northbound.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == FlowState.UP and: "server42 stats for forward direction are not increased" - !flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + !flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) and: "server42 stats for reverse direction are increased" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET) { - flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) } when: "Synchronize the flow" - with(northbound.synchronizeFlow(flow.flowId)) { !it.rerouted } + with(flow.sync()) { !it.rerouted } then: "Missing ingress server42 rule is reinstalled on the src switch" Wrappers.wait(RULES_INSTALLATION_TIME) { @@ -364,7 +264,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { then: "server42 stats for forward direction are available again" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET, 1) { - flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsReinstalled) + flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsReinstalled) } } @@ -380,21 +280,19 @@ class Server42FlowRttSpec extends HealthCheckSpecification { .getReversed() and: "server42 is enabled on the src sw of the first switch pair" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() switchHelper.setServer42FlowRttForSwitch(fl1SwPair.src, true) and: "Two flows on the given switch pairs" - def flow1 = flowHelperV2.randomFlow(fl1SwPair) - def flow2 = flowHelperV2.randomFlow(fl2SwPair) - flowHelperV2.addFlow(flow1) - flowHelperV2.addFlow(flow2) + def flow1 = flowFactory.getRandom(fl1SwPair) + def flow2 = flowFactory.getRandom(fl2SwPair) //make sure stats for the flow1 in forward directions are available and not available for the flow2 Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow1.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow2.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + assert flowStats.of(flow1.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow2.flowId).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() } when: "Try to swap src endpoints for two flows" @@ -415,17 +313,16 @@ class Server42FlowRttSpec extends HealthCheckSpecification { it.secondFlow.destination == flow2Dst } - def flow1Updated = northboundV2.getFlow(flow1.flowId) - def flow2Updated = northboundV2.getFlow(flow2.flowId) + def flow1Updated = flow1.retrieveDetails() + def flow2Updated = flow2.retrieveDetails() flow1Updated.source == flow1Src flow1Updated.destination == flow1Dst flow2Updated.source == flow2Src flow2Updated.destination == flow2Dst and: "Flows validation doesn't show any discrepancies" - [flow1, flow2].each { - northbound.validateFlow(it.flowId).each { direction -> assert direction.asExpected } - } + flow1.validateAndCollectDiscrepancies().isEmpty() + flow2.validateAndCollectDiscrepancies().isEmpty() and: "All switches are valid" def involvedSwitches = [fl1SwPair.src, fl1SwPair.dst, fl2SwPair.src, fl2SwPair.dst]*.dpId.unique() @@ -435,12 +332,12 @@ class Server42FlowRttSpec extends HealthCheckSpecification { and: "server42 stats are available for the flow2 in the forward direction" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow2.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow2.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() } and: "server42 stats are not available any more for the flow1 in the forward direction" //give one second extra after swap - !flowStats.of(flow1.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42) + !flowStats.of(flow1.flowId).get(FLOW_RTT, FORWARD, SERVER_42) .hasNonZeroValuesAfter(timeWhenEndpointWereSwapped + 1000) } @@ -449,7 +346,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withOnlySourceSwitchConnectedToServer42().random() when: "Set server42FlowRtt toggle to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src switch" @@ -457,19 +354,18 @@ class Server42FlowRttSpec extends HealthCheckSpecification { switchHelper.setServer42FlowRttForSwitch(switchPair.src, true) and: "Create a flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) then: "Stats from server42 only for forward direction are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() } and: "Stats from flow monitoring feature for reverse direction only are available" Wrappers.wait(flowSlaCheckIntervalSeconds * 3, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, FLOW_MONITORING).isEmpty() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, FLOW_MONITORING).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, FLOW_MONITORING).isEmpty() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, FLOW_MONITORING).hasNonZeroValues() } when: "Disable server42FlowRtt on the src switch" @@ -477,18 +373,18 @@ class Server42FlowRttSpec extends HealthCheckSpecification { then: "Stats from flow monitoring feature for forward direction are available" Wrappers.wait(flowSlaCheckIntervalSeconds + WAIT_OFFSET * 2, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, FLOW_MONITORING).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, FLOW_MONITORING).hasNonZeroValues() } } @Tags(HARDWARE) //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) def "Flow rtt stats are still available after updating a #data.flowDescription flow"() { given: "Two active switches, connected to the server42" - def switchPair = data.switchPair() + SwitchPair switchPair = data.switchPair() assumeTrue(switchPair != null, "Was not able to find a switchPair with a server42 connection") and: "server42FlowRtt toggle is set to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src and dst switches" @@ -496,9 +392,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { [server42Switch, switchPair.dst].each { sw -> switchHelper.setServer42FlowRttForSwitch(sw, true) } and: "A flow" - def flow = flowHelperV2.randomFlow(switchPair) - flow.tap(data.flowTap) - flowHelperV2.addFlow(flow) + FlowExtended flow = data.flowEntity(switchPair).create() when: "Update the flow(vlan/innerVlan) via partialUpdate on the src/dst endpoint" def newSrcInnerVlanId = (flow.source.innerVlanId == 0) ? 0 : flow.source.innerVlanId + 1 @@ -513,17 +407,17 @@ class Server42FlowRttSpec extends HealthCheckSpecification { innerVlanId = newDstInnerVlanId } } - flowHelperV2.partialUpdate(flow.flowId, updateRequest) + flow.partialUpdate(updateRequest) def flowUpdateTime = new Date().getTime() then: "Check if stats for forward/reverse directions are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "The src switch is valid" switchHelper.synchronizeAndCollectFixedDiscrepancies(switchPair.toList()*.getDpId()).isEmpty() @@ -537,16 +431,17 @@ class Server42FlowRttSpec extends HealthCheckSpecification { .withBothSwitchesVxLanEnabled() .withSourceSwitchNotManufacturedBy(WB5164) .random()} , - flowTap : { FlowRequestV2 fl -> fl.encapsulationType = VXLAN } + flowEntity : { SwitchPair swPair -> + flowFactory.getBuilder(swPair).withEncapsulationType(VXLAN).build() } + ], [ flowDescription: "qinq", switchPair : {switchPairs.all().withBothSwitchesConnectedToServer42().random()}, - flowTap : { FlowRequestV2 fl -> - fl.source.vlanId = 10 - fl.source.innerVlanId = 100 - fl.destination.vlanId = 20 - fl.destination.innerVlanId = 200 + flowEntity : { SwitchPair swPair -> + flowFactory.getBuilder(swPair).withSourceVlan(10).withSourceInnerVlan(100) + .withDestinationVlan(20).withDestinationInnerVlan(200).build() + } ] ] @@ -558,7 +453,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { given: "Two active switches, src has server42 connected with incorrect config in swProps" def switchPair = switchPairs.all().withOnlySourceSwitchConnectedToServer42().random() - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() switchHelper.setServer42FlowRttForSwitch(switchPair.src, true) @@ -587,13 +482,12 @@ class Server42FlowRttSpec extends HealthCheckSpecification { !switchHelper.synchronizeAndCollectFixedDiscrepancies(switchPair.src.dpId).isPresent() when: "Create a flow on the given switch pair" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) then: "Flow rtt stats are not available due to incorrect s42 port on the src switch" Wrappers.timedLoop(STATS_FROM_SERVER42_LOGGING_TIMEOUT / 2) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() } when: "Set correct config for the server42 on the src switch" @@ -619,8 +513,8 @@ class Server42FlowRttSpec extends HealthCheckSpecification { and: "Flow rtt stats are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() } } @@ -630,7 +524,7 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() and: "server42FlowRtt toggle is set to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on src/dst switches" @@ -641,15 +535,99 @@ class Server42FlowRttSpec extends HealthCheckSpecification { def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, portsForLag as Set).logicalPortNumber and: "Create a flow" - def flow = flowHelperV2.randomFlow(switchPair).tap { - it.source.portNumber = lagPort - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withSourcePort(lagPort).build().create() then: "Stats from server42 for forward/reverse directions are available" Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() - assert flowStats.of(flow.getFlowId()).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + def "Stats are available only if both global and switch toggles are 'on' on both endpoints"() { + /*This test runs the last (by alphabet) on jenkins, because if it runs before other test, + switchHelper.waitForS42SwRulesSetup() call in the next tests fails. No idea why.*/ + given: "Two active switches with having server42" + def switchPair = switchPairs.all().withBothSwitchesConnectedToServer42().random() + def statsWaitSeconds = 4 + + and: "server42FlowRtt toggle is turned off" + featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(false) + switchHelper.waitForS42SwRulesSetup(false) + + and: "server42FlowRtt is turned off on src and dst" + [switchPair.src, switchPair.dst].each{ sw -> switchHelper.setServer42FlowRttForSwitch(sw, false, false) } + + and: "Flow for forward metric is created" + def flow = flowFactory.getRandom(switchPair) + + and: "Reversed flow for backward metric is created" + def reversedFlow = flowFactory.getRandom(switchPair.reversed, false, FlowState.UP, flow.occupiedEndpoints()) + + + expect: "Involved switches pass switch validation" + Wrappers.wait(RULES_INSTALLATION_TIME) { //wait for s42 rules + switchHelper.synchronizeAndCollectFixedDiscrepancies(flow.retrieveAllEntityPaths().getInvolvedSwitches()).isEmpty() } + + when: "Wait for several seconds" + SECONDS.sleep(statsWaitSeconds) + + then: "Expect no flow rtt stats for forward flow" + flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + + and: "Expect no flow rtt stats for reversed flow" + flowStats.of(reversedFlow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + + when: "Enable global rtt toggle" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + and: "Wait for several seconds" + def checkpointTime = new Date().getTime() + SECONDS.sleep(statsWaitSeconds) + + then: "Expect no flow rtt stats for forward flow" + flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + + and: "Expect no flow rtt stats for reversed flow" + flowStats.of(reversedFlow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + + when: "Enable switch rtt toggle on src and dst" + switchHelper.setServer42FlowRttForSwitch(switchPair.src, true) + switchHelper.setServer42FlowRttForSwitch(switchPair.dst, true) + checkpointTime = new Date().getTime() + + then: "Stats for forward and reverse flow are available" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { + assert flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + //https://github.com/telstra/open-kilda/issues/4678 + //assert flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + + when: "Disable switch rtt toggle on dst (still enabled on src)" + switchHelper.setServer42FlowRttForSwitch(switchPair.dst, false) + checkpointTime = new Date().getTime() + + then: "Stats for forward and reverse flow are available" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { + def stats = flowStats.of(flow.flowId) + assert stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + //https://github.com/telstra/open-kilda/issues/4678 + //assert stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + + when: "Disable global toggle" + featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(false) + + and: "Wait for several seconds" + SECONDS.sleep(statsWaitSeconds) + checkpointTime = new Date().getTime() + + then: "Expect no flow rtt stats for forward flow" + !flowStats.of(flow.flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + + and: "Expect no flow rtt stats for reversed flow" + !flowStats.of(flow.flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy index 6125f89627c..ed79c38a9c9 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42HaFlowRttSpec.groovy @@ -1,6 +1,7 @@ package org.openkilda.functionaltests.spec.server42 import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.ResourceLockConstants.S42_TOGGLE import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT import static org.openkilda.functionaltests.helpers.model.FlowEncapsulationType.VXLAN @@ -25,8 +26,12 @@ import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.cookie.CookieBase.CookieType import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Isolated +import spock.lang.ResourceLock import spock.lang.Shared +@ResourceLock(S42_TOGGLE) +@Isolated //s42 toggle affects all switches in the system, may lead to excess rules during sw validation in other tests class Server42HaFlowRttSpec extends HealthCheckSpecification { @Shared @@ -50,7 +55,7 @@ class Server42HaFlowRttSpec extends HealthCheckSpecification { assert swT, "There is no switch triplet for the further ha-flow creation" when: "Set server42FlowRtt toggle to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on all switches" @@ -96,7 +101,7 @@ class Server42HaFlowRttSpec extends HealthCheckSpecification { assert swT, "There is no switch triplet for the ha-flow creation" and: "Set server42FlowRtt toggle to true" - featureToggles.server42FlowRtt(true) + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) switchHelper.waitForS42SwRulesSetup() and: "server42FlowRtt is enabled on all switches" diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42YFlowRttSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42YFlowRttSpec.groovy new file mode 100644 index 00000000000..e2733f1482f --- /dev/null +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/server42/Server42YFlowRttSpec.groovy @@ -0,0 +1,661 @@ +package org.openkilda.functionaltests.spec.server42 + +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.ResourceLockConstants.S42_TOGGLE +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.helpers.Wrappers.timedLoop +import static org.openkilda.functionaltests.helpers.model.FlowEncapsulationType.VXLAN +import static org.openkilda.functionaltests.model.stats.Direction.FORWARD +import static org.openkilda.functionaltests.model.stats.Direction.REVERSE +import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RTT +import static org.openkilda.functionaltests.model.stats.Origin.FLOW_MONITORING +import static org.openkilda.functionaltests.model.stats.Origin.SERVER_42 +import static org.openkilda.testing.Constants.PROTECTED_PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.SERVER42_STATS_LAG +import static org.openkilda.testing.Constants.STATS_FROM_SERVER42_LOGGING_TIMEOUT +import static org.openkilda.testing.Constants.WAIT_OFFSET + +import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.builder.YFlowBuilder +import org.openkilda.functionaltests.helpers.model.FlowWithSubFlowsEntityPath +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.helpers.model.SwitchTriplet +import org.openkilda.functionaltests.helpers.model.YFlowExtended +import org.openkilda.functionaltests.helpers.model.YFlowFactory +import org.openkilda.functionaltests.model.stats.FlowStats +import org.openkilda.messaging.payload.flow.FlowState +import org.openkilda.model.SwitchId +import org.openkilda.model.cookie.Cookie +import org.openkilda.model.cookie.CookieBase.CookieType +import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint +import org.openkilda.northbound.dto.v2.yflows.SubFlowPatchPayload +import org.openkilda.northbound.dto.v2.yflows.YFlowPatchPayload +import org.openkilda.northbound.dto.v2.yflows.YFlowPatchSharedEndpointEncapsulation + +import org.springframework.beans.factory.annotation.Autowired +import org.springframework.beans.factory.annotation.Value +import spock.lang.Isolated +import spock.lang.ResourceLock +import spock.lang.Shared + +@ResourceLock(S42_TOGGLE) +@Isolated //s42 toggle affects all switches in the system, may lead to excess rules during sw validation in other tests +class Server42YFlowRttSpec extends HealthCheckSpecification { + @Shared + @Autowired + FlowStats flowStats + + @Autowired + @Shared + YFlowFactory yFlowFactory + + @Shared + @Value('${flow.sla.check.interval.seconds}') + Integer flowSlaCheckIntervalSeconds + + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + + @Shared + SwitchTriplet switchTripletWithYPointOnSharedEp + + @Shared + SwitchTriplet switchTripletWithYPointOnSubFlowEnd + + def setupSpec() { + switchTripletWithYPointOnSharedEp = topologyHelper.findSwitchTripletWithSharedEpInTheMiddleOfTheChainServer42Support() + switchTripletWithYPointOnSubFlowEnd = topologyHelper.findSwitchTripletWithSharedEpEp1Ep2InChainServer42Support() + } + + @Tags(TOPOLOGY_DEPENDENT) + def "Create an Y-Flow (#description) with server42 Rtt feature and check datapoints in tsdb"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + def swT = isSharedEndpointYPoint ? switchTripletWithYPointOnSharedEp : switchTripletWithYPointOnSubFlowEnd + assert swT, "There is no switch triplet for the further Y-Flow creation" + + when: "Set server42FlowRtt toggle to true" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + and: "server42FlowRtt is enabled on all switches" + def initialSwitchesProps = [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true, true)] } + + and: "Create a Y-Flow" + YFlowExtended yFlow = setupRequiredParams(yFlowFactory.getBuilder(swT)).create() + assert isSharedEndpointYPoint ? yFlow.sharedEndpoint.switchId == yFlow.yPoint : yFlow.sharedEndpoint.switchId != yFlow.yPoint + + then: "Check if stats for FORWARD and REVERSE directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { + def subFlow1Stats = flowStats.of(yFlow.subFlows.first().flowId) + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + + and: "Check if stats for FORWARD and REVERSE directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + SERVER42_STATS_LAG, 1) { + def subFlow2Stats = flowStats.of(yFlow.subFlows.last().flowId) + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + + when: "Delete Y-Flow" + yFlow && yFlow.delete() + + then: "All Server42 flow-related rules are deleted" + withPool { + Wrappers.wait(RULES_INSTALLATION_TIME) { + initialSwitchesProps.keySet().eachParallel { sw -> + assert switchRulesFactory.get(sw.dpId).getRules().findAll { + new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, + CookieType.SERVER_42_FLOW_RTT_INGRESS] + }.empty + } + } + } + + where: + description | isSharedEndpointYPoint | setupRequiredParams + "ep1 and ep2 default port, shared ep is y-point, encapsulation TRANSIT_VLAN" | true | { YFlowBuilder builder -> builder.withEp1Vlan(0).withEp2Vlan(0).build() } + "ep1 and ep2 default port, ep1/ep2 is y-point, encapsulation VXLAN" | false | { YFlowBuilder builder -> builder.withEp1Vlan(0).withEp2Vlan(0).withEncapsulationType(VXLAN).build() } + "shared ep qnq, shared ep is y-point, encapsulation VXLAN" | true | { YFlowBuilder builder -> builder.withSharedEpQnQ().withEncapsulationType(VXLAN).build() } + "tagged flow, shared ep is y-point, encapsulation VXLAN" | true | { YFlowBuilder builder -> builder.withEncapsulationType(VXLAN).build() } + "ep1 and ep2 are same switch+port, ep1/ep2 is y-point, encapsulation TRANSIT_VLAN" | false | { YFlowBuilder builder -> builder.withEp1AndEp2SameSwitchAndPort().build() } + "ep1 is the full port, ep1/ep2 is y-point, encapsulation TRANSIT_VLAN" | false | { YFlowBuilder builder -> builder.withEp1Vlan(0).build() } + "all endpoints qnq, shared ep is y-point, encapsulation TRANSIT_VLAN" | true | { YFlowBuilder builder -> builder.withSharedEpQnQ().withEp1QnQ().withEp2QnQ().build() } + "tagged flow, shared ep is y-point, protected path, encapsulation VXLAN" | true | { YFlowBuilder builder -> builder.withProtectedPath(true).withEncapsulationType(VXLAN).build() } + "ep1+ep2 qnq, ep1/ep2 is y-point, encapsulation TRANSIT_VLAN" | false | { YFlowBuilder builder -> builder.withEp1QnQ().withEp2QnQ().build() } + } + + @Tags([TOPOLOGY_DEPENDENT]) + def "Y-Flow rtt stats are available if both endpoints are connected to the same server42(same pop)"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + def swT = isSharedEndpointYPoint ? switchTripletWithYPointOnSharedEp : switchTripletWithYPointOnSubFlowEnd + + and: "server42FlowRtt feature enabled globally and on src/dst switch" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + when: "Create a Y-Flow" + def yFlow = yFlowFactory.getRandom(swT) + assert isSharedEndpointYPoint ? yFlow.sharedEndpoint.switchId == yFlow.yPoint : yFlow.sharedEndpoint.switchId != yFlow.yPoint + + + then: "Involved switches pass switch validation" + List involvedSwitches = yFlow.retrieveAllEntityPaths().getInvolvedSwitches() + Wrappers.wait(RULES_INSTALLATION_TIME) { + switchHelper.validateAndCollectFoundDiscrepancies(involvedSwitches).isEmpty() + } + + and: "Stats for both directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + and: "Stats for both directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + when: "Disable flow rtt on shared switch" + //for y-flow shared switch is src sw + switchHelper.setServer42FlowRttForSwitch(swT.shared, false) + Wrappers.wait(RULES_INSTALLATION_TIME, 3) { + assert !switchHelper.validateAndCollectFoundDiscrepancies(swT.shared.dpId).isPresent() + } + + then: "Stats are available in REVERSE direction for both sub-flows" + def checkpointTime = new Date().getTime() + SERVER42_STATS_LAG * 1000 + + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll() { + assert flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert flowStats.of(yFlow.subFlows.last().flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + } + + and: "Stats are absent in FORWARD direction for both sub-flows" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll() { + assert !flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert !flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + } + + where: + isSharedEndpointYPoint << [true, false] + } + + @Tags(LOW_PRIORITY) + def "Rtt statistic is available for a Y-Flow in case switch is not connected to server42"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + and: "Switches triplet with ONLY shared switch that supports server42 feature" + def swT = topologyHelper.findSwitchTripletWithOnlySharedSwServer42Support() + assumeTrue(swT as boolean, "Unable to find requested switchTriplet") + + and: "server42FlowRtt feature enabled globally and switch ON for appropriate switches(swT)" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + [swT.shared].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + when: "Create a Y-Flow" + def yFlow = yFlowFactory.getRandom(swT) + + then: "Stats from server42 only for FORWARD direction are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + } + } + + and: "Stats from server42 only for FORWARD direction are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + } + } + + and: "Flow monitoring stats for REVERSE direction are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, REVERSE, FLOW_MONITORING).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, FORWARD, FLOW_MONITORING).isEmpty() + } + } + + and: "Flow monitoring stats for REVERSE direction are available for the second sub-flow" + Wrappers.wait(flowSlaCheckIntervalSeconds * 3, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, REVERSE, FLOW_MONITORING).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, FORWARD, FLOW_MONITORING).isEmpty() + } + } + + when: "Disable server42FlowRtt on the src switch" + switchHelper.setServer42FlowRttForSwitch(swT.shared, false) + + then: "Flow monitoring stats for FORWARD direction are available for both sub-flows" + Wrappers.wait(flowSlaCheckIntervalSeconds * 3, 1) { + verifyAll { + assert flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, FLOW_MONITORING).hasNonZeroValues() + assert flowStats.of(yFlow.subFlows.last().flowId).get(FLOW_RTT, FORWARD, FLOW_MONITORING).hasNonZeroValues() + } + } + } + + @Tags(LOW_PRIORITY) + def "Able to swapEndpoint for a Y-Flow with enabled server42 on it"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + and: "Switches triplet doesn't contain WB164 switch" + def swT = switchTripletWithYPointOnSharedEp + + and: "server42FlowRtt feature enabled globally and switch ON for appropriate switches(swT)" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + def initialSwitchesProps = [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + and: "Create a Y-Flow" + def yFlow = yFlowFactory.getBuilder(swT).withProtectedPath(true).build().create() + assert yFlow.protectedPathYPoint + String subFlow1 = yFlow.getSubFlows().first().flowId + String subFlow2 = yFlow.getSubFlows().last().flowId + + def yFlowPathBeforeSwap = yFlow.retrieveAllEntityPaths() + def subFlow1ProtectedPathBeforeSwap = yFlowPathBeforeSwap.subFlowPaths.find { it.flowId == subFlow1 }.protectedPath.forward.retrieveNodes() + def subFlow2ProtectedPathBeforeSwap = yFlowPathBeforeSwap.subFlowPaths.find { it.flowId == subFlow2 }.protectedPath.forward.retrieveNodes() + + and: "Stats are available for both FORWARD and REVERSE directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + and: "Stats are available for both FORWARD and REVERSE directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + when: "Try to swap src endpoints for two flows" + yFlow.swap() + + then: "Endpoints are successfully swapped" + FlowWithSubFlowsEntityPath yFlowPathAfterSwap + Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { + yFlowPathAfterSwap = yFlow.retrieveAllEntityPaths() + assert yFlowPathAfterSwap.subFlowPaths.find { it.flowId == subFlow1 }.path.forward.retrieveNodes() == subFlow1ProtectedPathBeforeSwap + assert yFlowPathAfterSwap.subFlowPaths.find { it.flowId == subFlow2 }.path.forward.retrieveNodes() == subFlow2ProtectedPathBeforeSwap + assert yFlow.retrieveDetails().status == FlowState.UP + } + + and: "Y-Flow validation passes" + yFlow.validate().asExpected + + and: "All switches are valid" + def involvedSwitches = yFlowPathAfterSwap.getInvolvedSwitches() + Wrappers.wait(RULES_INSTALLATION_TIME) { + involvedSwitches.each { swId -> + switchHelper.validate(swId).isAsExpected() + } + } + + and: "Stats are available for both FORWARD and REVERSE directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + and: "Stats are available for both FORWARD and REVERSE directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + when: "Delete Y-Flow" + yFlow && yFlow.delete() + + then: "All Server42 flow-related rules are deleted" + withPool { + Wrappers.wait(RULES_INSTALLATION_TIME) { + initialSwitchesProps.keySet().eachParallel { sw -> + assert switchRulesFactory.get(sw.dpId).getRules().findAll { + new Cookie(it.cookie).getType() in [CookieType.SERVER_42_FLOW_RTT_INPUT, + CookieType.SERVER_42_FLOW_RTT_INGRESS] + }.empty + } + } + } + } + + @Tags(HARDWARE) + //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) + def "Able to synchronize a Y-Flow (install missing server42 rules)"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + def swT = isSharedEndpointYPoint ? switchTripletWithYPointOnSharedEp : switchTripletWithYPointOnSubFlowEnd + + and: "server42FlowRtt feature enabled globally and switch ON for appropriate switches(swT)" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + and: "Create a Y-Flow" + def yFlow = yFlowFactory.getRandom(swT) + assert isSharedEndpointYPoint ? yFlow.sharedEndpoint.switchId == yFlow.yPoint : yFlow.sharedEndpoint.switchId != yFlow.yPoint + + and: "Stats for both directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + and: "Stats for both directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValues() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValues() + } + } + + when: "Delete ingress server42 rule related to the flow on the shared switches" + def switchRules = switchRulesFactory.get(swT.shared.dpId) + def cookiesToDelete = switchRules.getRulesByCookieType(CookieType.SERVER_42_FLOW_RTT_INGRESS).cookie + cookiesToDelete.each { cookie -> switchRules.delete(cookie) } + def timeWhenMissingRuleIsDetected = new Date().getTime() + SERVER42_STATS_LAG * 1000 + + then: "System detects missing rule on the shared switch" + Wrappers.wait(RULES_DELETION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(swT.shared.dpId).get() + .rules.missing*.getCookie().sort() == cookiesToDelete.sort() + } + + and: "Y-Flow is valid and UP" + verifyAll(yFlow.validate()) { validationResult -> + assert !validationResult.asExpected + validationResult.getSubFlowValidationResults().findAll { it.direction == "FORWARD" }.each { + assert !it.asExpected + } + validationResult.getSubFlowValidationResults().findAll { it.direction == "REVERSE" }.each { + assert it.asExpected + } + } + + yFlow.retrieveDetails().status == FlowState.UP + + then: "Stats are available in REVERSE direction for both sub-flows" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll() { + assert flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + assert flowStats.of(yFlow.subFlows.last().flowId).get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + } + } + + and: "Stats are absent in FORWARD direction for both sub-flows" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll() { + assert !flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + assert !flowStats.of(yFlow.subFlows.last().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsDetected) + } + } + + when: "Synchronize the Y-Flow" + yFlow.sync() + + then: "Missing ingress server42 rule is reinstalled on the shared switch" + Wrappers.wait(RULES_INSTALLATION_TIME) { + assert !switchHelper.validateAndCollectFoundDiscrepancies(swT.shared.dpId).isPresent() + assert switchRules.getRulesByCookieType(CookieType.SERVER_42_FLOW_RTT_INGRESS).cookie.size() == 2 + } + def timeWhenMissingRuleIsReinstalled = new Date().getTime() + + then: "Server42 stats for FORWARD direction are available again" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll() { + assert flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsReinstalled) + assert flowStats.of(yFlow.subFlows.last().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(timeWhenMissingRuleIsReinstalled) + } + } + + where: + isSharedEndpointYPoint << [true, false] + } + + @Tags(HARDWARE) + //not supported on a local env (the 'stub' service doesn't send real traffic through a switch) + def "Y-Flow rtt stats are still available after updating Y-Flow: #description"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + and: "Switches triplet doesn't contain WB164 switch" + def swT = topologyHelper.findSwitchTripletServer42SupportWithSharedEpInTheMiddleOfTheChainExceptWBSw() + + and: "server42FlowRtt feature enabled globally and switch ON for appropriate switches(swT)" + !featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, true)] } + + and: "Create a Y-Flow" + def yFlow = yFlowFactory.getRandom(swT) + + when: "Update the Y-Flow: #description" + //Y-Flow has been modified in the scope of an update request preparation + YFlowPatchPayload yFlowUpdateParams = updateRequest(yFlow) + yFlow.partialUpdate(yFlowUpdateParams) + + then: "Y-Flow is 'Up' after updating" + verifyAll(yFlow.retrieveDetails()) { yFlowAfterUpdate -> + assert yFlowAfterUpdate.encapsulationType == yFlow.encapsulationType + assert yFlowAfterUpdate.subFlows.endpoint.sort() == yFlow.subFlows.endpoint.sort() + assert yFlowAfterUpdate.subFlows.sharedEndpoint.sort() == yFlow.subFlows.sharedEndpoint.sort() + } + + and: "Stats for both directions are available for the first sub-flow" + def flowUpdateTime = new Date().getTime() + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) + } + } + + and: "Stats for both directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(flowUpdateTime) + } + } + + and: "Y-Flow is valid" + yFlow.validate().asExpected + + and: "Each switch from triplet is valid" + [swT.shared, swT.ep1, swT.ep2].each { + !switchHelper.synchronizeAndCollectFixedDiscrepancies(it.dpId).isPresent() + } + + where: + description | updateRequest + "update to VXLAN" | { YFlowExtended flow -> + flow.tap { it.encapsulationType = VXLAN } + return YFlowPatchPayload.builder().encapsulationType(VXLAN.toString()).build() + } + + "update to qnq" | { YFlowExtended flow -> + def updateRequest = YFlowPatchPayload.builder() + .subFlows([SubFlowPatchPayload.builder() + .endpoint(FlowPatchEndpoint.builder() + .innerVlanId(new Random().nextInt(4095)) + .build()) + .sharedEndpoint(YFlowPatchSharedEndpointEncapsulation.builder() + .innerVlanId(new Random().nextInt(4095)) + .build()) + .flowId(flow.subFlows.flowId.first()) + .build(), + SubFlowPatchPayload.builder() + .endpoint(FlowPatchEndpoint.builder() + .innerVlanId(new Random().nextInt(4095)) + .build()) + .sharedEndpoint(YFlowPatchSharedEndpointEncapsulation.builder() + .innerVlanId(new Random().nextInt(4095)) + .build()) + .flowId(flow.subFlows.flowId.last()) + .build()]) + .build() + updateRequest.subFlows.each { newParam -> + flow.subFlows.find { subFlow -> subFlow.flowId == newParam.flowId }.tap { + it.endpoint.innerVlanId = newParam.endpoint.innerVlanId + it.sharedEndpoint.innerVlanId = newParam.sharedEndpoint.innerVlanId + } + } + return updateRequest + + } + } + + @Tags(LOW_PRIORITY) + def "Y-Flow rtt stats are available only if both global and switch toggles are 'ON' on both endpoints"() { + given: "Three active switches with server42 connected" + assumeTrue((topology.getActiveServer42Switches().size() >= 3), "Unable to find active server42") + + def swT = isSharedEndpointYPoint ? switchTripletWithYPointOnSharedEp : switchTripletWithYPointOnSubFlowEnd + def statsWaitSeconds = 4 + + and: "server42FlowRtt toggle is turned off" + featureToggles.getFeatureToggles().server42FlowRtt && featureToggles.server42FlowRtt(false) + switchHelper.waitForS42SwRulesSetup(false) + + and: "server42FlowRtt is turned off on all switches" + def initialSwitchesProps = [swT.shared, swT.ep1, swT.ep2].collectEntries { sw -> [sw, switchHelper.setServer42FlowRttForSwitch(sw, false, false)] } + + when: "Create a Y-Flow" + def yFlow = yFlowFactory.getRandom(swT) + assert isSharedEndpointYPoint ? yFlow.sharedEndpoint.switchId == yFlow.yPoint : yFlow.sharedEndpoint.switchId != yFlow.yPoint + + then: "Involved switches pass switch validation" + List involvedSwitches = yFlow.retrieveAllEntityPaths().getInvolvedSwitches() + Wrappers.wait(RULES_INSTALLATION_TIME) { + assert switchHelper.validateAndCollectFoundDiscrepancies(involvedSwitches).isEmpty() + } + + and: "Expect no Y-Flow rtt stats for FORWARD and REVERSE directions" + timedLoop(statsWaitSeconds) { + [flowStats.of(yFlow.subFlows.first().flowId), flowStats.of(yFlow.subFlows.last().flowId)].each { stats -> + assert stats.get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + assert stats.get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + } + } + + when: "Enable global rtt toggle" + featureToggles.server42FlowRtt(true) + switchHelper.waitForS42SwRulesSetup() + + then: "Expect no flow rtt stats for FORWARD and REVERSE direction for the first sub-flow" + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + } + + and: "Expect no flow rtt stats for FORWARD and REVERSE direction for the second sub-flow" + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).isEmpty() + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).isEmpty() + } + + when: "Enable switch rtt toggle on src and dst" + [swT.shared, swT.ep1, swT.ep2].each { + switchHelper.setServer42FlowRttForSwitch(it, true, true) + } + def checkpointTime = new Date().getTime() + + then: "Stats for FORWARD and REVERSE directions are available for the first sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + } + + and: "Stats for FORWARD and REVERSE directions are available for the second sub-flow" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT, 1) { + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + assert subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + } + + when: "Disable switch rtt toggle on ep1 and ep2 ends" + switchHelper.setServer42FlowRttForSwitch(swT.ep1, false, true) + switchHelper.setServer42FlowRttForSwitch(swT.ep2, false, true) + checkpointTime = new Date().getTime() + + then: "Stats for FORWARD direction are available for both sub-flows" + Wrappers.wait(STATS_FROM_SERVER42_LOGGING_TIMEOUT + WAIT_OFFSET, 1) { + assert flowStats.of(yFlow.subFlows.first().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert flowStats.of(yFlow.subFlows.last().flowId).get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + + when: "Disable global toggle" + featureToggles.server42FlowRtt(false) + switchHelper.waitForS42SwRulesSetup(false) + + and: "Wait for several seconds" + checkpointTime = new Date().getTime() + + then: "Expect no flow rtt stats for FORWARD and REVERSE direction for the first sub-flow" + verifyAll(flowStats.of(yFlow.subFlows.first().flowId)) { subFlow1Stats -> + assert !subFlow1Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + assert !subFlow1Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + + and: "Expect no flow rtt stats for FORWARD and REVERSE direction for the second sub-flow" + verifyAll(flowStats.of(yFlow.subFlows.last().flowId)) { subFlow2Stats -> + !subFlow2Stats.get(FLOW_RTT, FORWARD, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + !subFlow2Stats.get(FLOW_RTT, REVERSE, SERVER_42).hasNonZeroValuesAfter(checkpointTime) + } + + where: + isSharedEndpointYPoint << [true, false] + } + +} diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/FlowStatSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/FlowStatSpec.groovy index 9dcf52e61c1..1a636cc8a19 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/FlowStatSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/FlowStatSpec.groovy @@ -1,15 +1,17 @@ package org.openkilda.functionaltests.spec.stats +import static org.openkilda.functionaltests.model.stats.Direction.* +import static org.openkilda.messaging.payload.flow.FlowState.* + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.model.stats.FlowStats -import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.northbound.dto.v2.flows.FlowPatchEndpoint import org.openkilda.northbound.dto.v2.flows.FlowPatchV2 import org.openkilda.testing.service.traffexam.TraffExamService import org.openkilda.testing.service.traffexam.model.Exam -import org.openkilda.testing.tools.FlowTrafficExamBuilder import org.springframework.beans.factory.annotation.Autowired import spock.lang.Narrative import spock.lang.Shared @@ -27,6 +29,10 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET @Narrative("Verify that statistic is collected for different type of flow") class FlowStatSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Autowired Provider traffExamProvider @@ -49,17 +55,16 @@ class FlowStatSpec extends HealthCheckSpecification { def srcSwitchId = switchPair.getSrc().getDpId() and: "Flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build() + .create() + + def flowPathInfo = flow.retrieveAllEntityPaths() + def mainPath = flowPathInfo.getPathNodes(FORWARD, false) + def protectedPath = flowPathInfo.getPathNodes(FORWARD, true) when: "Generate traffic on the given flow" def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(flowHelperV2.toV1(flow), - (int) flow.maximumBandwidth, 5).tap { udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward.tap { udp = true } //generate two points of stat just to be sure that stat is not collected for protected path 2.times { count -> exam.setResources(traffExam.startExam(exam)) @@ -68,10 +73,10 @@ class FlowStatSpec extends HealthCheckSpecification { } then: "Stats collects stat for main path cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value - def stats = flowStats.of(flow.getFlowId()) + def stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() @@ -80,19 +85,19 @@ class FlowStatSpec extends HealthCheckSpecification { !stats.get(FLOW_RAW_BYTES, srcSwitchId, protectedReverseCookie).hasNonZeroValues() when: "Swap main and protected path" - northbound.swapFlowPath(flow.flowId) + flow.swapFlowPath() Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - def newFlowPathInfo = northbound.getFlowPath(flow.flowId) - assert pathHelper.convert(newFlowPathInfo) == currentProtectedPath - assert pathHelper.convert(newFlowPathInfo.protectedPath) == currentPath + assert flow.retrieveFlowStatus().status == UP + def newFlowPathInfo = flow.retrieveAllEntityPaths() + assert newFlowPathInfo.getPathNodes(FORWARD, false) == protectedPath + assert newFlowPathInfo.getPathNodes(FORWARD, true) == mainPath } and: "Wait till stats from old main path are collected" Wrappers.wait(statsRouterRequestInterval, 3) { def oldStats = stats statsHelper."force kilda to collect stats"() - stats = flowStats.of(flow.getFlowId()) + stats = flowStats.of(flow.flowId) assert oldStats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).getNewestTimeStamp() == stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).getNewestTimeStamp() @@ -109,7 +114,7 @@ class FlowStatSpec extends HealthCheckSpecification { } then: "System collects stats for previous egress cookie of protected path with non zero value" - def newFlowStats = stats.of(flow.getFlowId()) + def newFlowStats = stats.of(flow.flowId) newFlowStats.get(FLOW_RAW_BYTES, srcSwitchId, protectedReverseCookie).hasNonZeroValues() } @@ -123,30 +128,28 @@ class FlowStatSpec extends HealthCheckSpecification { def srcSwitchId = switchPair.getSrc().getDpId() and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build() + .create() - def flowPathInfo = northbound.getFlowPath(flow.flowId) - assert flowPathInfo.protectedPath - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() + def mainPath = flowPathInfo.getPathNodes(FORWARD, false) + def protectedPath = flowPathInfo.getPathNodes(FORWARD, true) when: "Generate traffic on the given flow" def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(flowHelperV2.toV1(flow), - (int) flow.maximumBandwidth, 3).tap { udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 3).forward.tap{ udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "Stats is not empty for main path cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value def stats Wrappers.wait(statsRouterInterval) { - stats = flowStats.of(flow.getFlowId()) + stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() } @@ -156,19 +159,19 @@ class FlowStatSpec extends HealthCheckSpecification { !stats.get(FLOW_RAW_BYTES, srcSwitchId, protectedReverseCookie).hasNonZeroValues() when: "Make the current and protected path less preferable than alternatives" - def alternativePaths = switchPair.paths.findAll { it != currentPath && it != currentProtectedPath } - alternativePaths.each { pathHelper.makePathMorePreferable(it, currentPath) } - alternativePaths.each { pathHelper.makePathMorePreferable(it, currentProtectedPath) } + def alternativePaths = switchPair.paths.findAll { it != mainPath && it != protectedPath } + alternativePaths.each { pathHelper.makePathMorePreferable(it, mainPath) } + alternativePaths.each { pathHelper.makePathMorePreferable(it, protectedPath) } and: "Init intentional reroute" - def rerouteResponse = northbound.rerouteFlow(flow.flowId) - rerouteResponse.rerouted - Wrappers.wait(WAIT_OFFSET) { assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + def rerouteResponse = flow.reroute() + assert rerouteResponse.rerouted + Wrappers.wait(WAIT_OFFSET) { assert flow.retrieveFlowStatus().status == UP } - def flowPathInfoAfterRerouting = northbound.getFlowPath(flow.flowId) - def newCurrentPath = pathHelper.convert(flowPathInfoAfterRerouting) - newCurrentPath != currentPath - newCurrentPath != currentProtectedPath + def flowPathInfoAfterRerouting = flow.retrieveAllEntityPaths() + def newMainPath = flowPathInfoAfterRerouting.getPathNodes(FORWARD, false) + newMainPath != mainPath + newMainPath != protectedPath and: "Generate traffic on the flow" exam.setResources(traffExam.startExam(exam)) @@ -177,12 +180,12 @@ class FlowStatSpec extends HealthCheckSpecification { then: "Stats is not empty for new main path cookies" - def newFlowInfo = database.getFlow(flow.flowId) + def newFlowInfo = flow.retrieveDetailsFromDB() def newMainForwardCookie = newFlowInfo.forwardPath.cookie.value def newMainReverseCookie = newFlowInfo.reversePath.cookie.value def newFlowStats Wrappers.wait(statsRouterInterval) { - newFlowStats = flowStats.of(flow.getFlowId()) + newFlowStats = flowStats.of(flow.flowId) newFlowStats.get(FLOW_RAW_BYTES, srcSwitchId, newMainForwardCookie).hasNonZeroValues() newFlowStats.get(FLOW_RAW_BYTES, srcSwitchId, newMainReverseCookie).hasNonZeroValues() } @@ -203,30 +206,27 @@ class FlowStatSpec extends HealthCheckSpecification { def srcSwitchId = switchPair.getSrc().getDpId() and: "A flow with protected path" - def flow = flowHelper.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build() + .createV1() - def flowPathInfo = northbound.getFlowPath(flow.id) - assert flowPathInfo.protectedPath - def currentPath = pathHelper.convert(flowPathInfo) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) + def flowPathInfo = flow.retrieveAllEntityPaths() + assert !flowPathInfo.flowPath.protectedPath.isPathAbsent() + def protectedPath = flowPathInfo.getPathNodes(FORWARD, true) when: "Generate traffic on the given flow" def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(flow, (int) flow.maximumBandwidth, 3) - .tap { udp = true} + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 3).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "System collects stats for main path cookies" - def flowInfo = database.getFlow(flow.getId()) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value def stats Wrappers.wait(statsRouterInterval) { - stats = flowStats.of(flow.getId()) + stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() } @@ -236,11 +236,11 @@ class FlowStatSpec extends HealthCheckSpecification { !stats.get(FLOW_RAW_BYTES, srcSwitchId, protectedReverseCookie).hasNonZeroValues() when: "Break ISL on the main path (bring port down) to init auto swap" - def islToBreak = pathHelper.getInvolvedIsls(currentPath)[0] + def islToBreak = flowPathInfo.flowPath.path.forward.getInvolvedIsls().first() islHelper.breakIsl(islToBreak) Wrappers.wait(PROTECTED_PATH_INSTALLATION_TIME) { - assert northboundV2.getFlowStatus(flow.id).status == FlowState.UP - assert pathHelper.convert(northbound.getFlowPath(flow.id)) == currentProtectedPath + assert flow.retrieveFlowStatus().status == UP + assert flow.retrieveAllEntityPaths().getPathNodes(FORWARD, false) == protectedPath } def timeAfterSwap = new Date().getTime() @@ -250,9 +250,9 @@ class FlowStatSpec extends HealthCheckSpecification { statsHelper."force kilda to collect stats"() then: "System collects stats for previous egress cookie of protected path with non zero value" - def newFlowStats = flowStats.of(flow.getId()) + def newFlowStats = flowStats.of(flow.flowId) Wrappers.wait(statsRouterInterval) { - flowStats.of(flow.getId()).get(FLOW_RAW_BYTES, srcSwitchId, protectedReverseCookie).hasNonZeroValues() + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitchId, protectedReverseCookie).hasNonZeroValues() } and: "System doesn't collect stats for previous main path cookies due to main path is broken" @@ -271,25 +271,21 @@ class FlowStatSpec extends HealthCheckSpecification { def srcSwitchId = switchPair.getSrc().getDpId() and: "A flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withProtectedPath(true).build() + .create() and: "All alternative paths are unavailable (bring ports down on the source switch)" - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def currentProtectedPath = pathHelper.convert(flowPathInfo.protectedPath) - def protectedIsls = pathHelper.getInvolvedIsls(currentProtectedPath) - def altIsls = topology.getRelatedIsls(switchPair.src) - - pathHelper.getInvolvedIsls(pathHelper.convert(flowPathInfo.forwardPath)) - - protectedIsls.first() + def flowPathInfo = flow.retrieveAllEntityPaths() + def mainPathIsls = flowPathInfo.flowPath.path.forward.getInvolvedIsls() + def protectedPathIsls = flowPathInfo.flowPath.protectedPath.forward.getInvolvedIsls() + def altIsls = topology.getRelatedIsls(switchPair.src) - mainPathIsls - protectedPathIsls.first() islHelper.breakIsls(altIsls) - when: "Break ISL on a protected path (bring port down) for changing the flow state to DEGRADED" - islHelper.breakIsl(protectedIsls.first()) + islHelper.breakIsl(protectedPathIsls.first()) Wrappers.wait(WAIT_OFFSET) { - verifyAll(northboundV2.getFlow(flow.flowId)) { - status == "Degraded" + verifyAll(flow.retrieveDetails()) { + status == DEGRADED statusDetails.mainPath == "Up" statusDetails.protectedPath == "Down" } @@ -297,18 +293,17 @@ class FlowStatSpec extends HealthCheckSpecification { and: "Generate traffic on the given flow" def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(flowHelperV2.toV1(flow), - (int) flow.maximumBandwidth, 3).tap { udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 3).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "System collects stats for main path cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value Wrappers.wait(statsRouterInterval) { - def stats = flowStats.of(flow.getFlowId()) + def stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() } @@ -321,28 +316,27 @@ class FlowStatSpec extends HealthCheckSpecification { def srcSwitchId = switchPair.getSrc().getDpId() and: "An unmetered flow" - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = 0 - flow.ignoreBandwidth = true - flow.pinned = true - flow.periodicPings = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(0) + .withIgnoreBandwidth(true) + .withPinned(true) + .withPeriodicPing(true).build() + .create() when: "Generate traffic on the given flow" Date startTime = new Date() def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(flowHelperV2.toV1(flow), - (int) flow.maximumBandwidth, 3).tap { udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 3).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "System collects stats for egress/ingress cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value Wrappers.wait(statsRouterInterval) { - def stats = flowStats.of(flow.getFlowId()) + def stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() } @@ -358,27 +352,24 @@ class FlowStatSpec extends HealthCheckSpecification { def srcFlowPort = (topology.getAllowedPortsForSwitch( topology.find(switchPair.src.dpId)) - traffgenPortOnSrcSw).last() - def flow = flowHelperV2.randomFlow(switchPair).tap { it.source.portNumber = srcFlowPort } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withSourcePort(srcFlowPort).build() + .create() - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { - source = new FlowPatchEndpoint().tap {portNumber = traffgenPortOnSrcSw } - }) + flow = flow.partialUpdate(new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { portNumber = traffgenPortOnSrcSw } }) when: "Generate traffic on the flow" def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(northbound.getFlow(flow.flowId), - (int) flow.maximumBandwidth, 5).tap { udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "System collects stats for ingress/egress cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value Wrappers.wait(statsRouterInterval) { - def stats = flowStats.of(flow.getFlowId()) + def stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() } @@ -392,28 +383,26 @@ class FlowStatSpec extends HealthCheckSpecification { and: "A flow with updated vlan on src endpoint via partial update" def traffgenPortOnSrcSw = topology.activeTraffGens.find { it.switchConnected == switchPair.src}.switchPort - def flow = flowHelperV2.randomFlow(switchPair).tap { it.source.portNumber = traffgenPortOnSrcSw; it.source.vlanId = 100} - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withSourcePort(traffgenPortOnSrcSw) + .withSourceVlan(100).build() + .create() - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { - source = new FlowPatchEndpoint().tap { vlanId = vlanId ?: 100 + 1 } - }) + flow = flow.partialUpdate(new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { vlanId = vlanId ?: 100 + 1 } }) when: "Generate traffic on the flow" - Date startTime = new Date() def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(northbound.getFlow(flow.flowId), - (int) flow.maximumBandwidth, 5).tap { udp = true } + Exam exam =flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "System collects stats for ingress/egress cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value Wrappers.wait(statsRouterInterval) { - def stats = flowStats.of(flow.getFlowId()) + def stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, srcSwitchId, mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, srcSwitchId, mainReverseCookie).hasNonZeroValues() } @@ -425,28 +414,23 @@ class FlowStatSpec extends HealthCheckSpecification { and: "A flow with updated inner vlan on src endpoint via partial update" - def flow = flowHelperV2.randomFlow(switchPair, true) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair, true) - flowHelperV2.partialUpdate(flow.flowId, new FlowPatchV2().tap { - source = new FlowPatchEndpoint().tap { innerVlanId = flow.source.vlanId - 1 } - }) + flow = flow.partialUpdate( new FlowPatchV2().tap { source = new FlowPatchEndpoint().tap { innerVlanId = flow.source.vlanId - 1 } }) when: "Generate traffic on the flow" - Date startTime = new Date() def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(northbound.getFlow(flow.flowId), - (int) flow.maximumBandwidth, 5) + def exam = flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "System collects stats for ingress/egress cookies" - def flowInfo = database.getFlow(flow.flowId) + def flowInfo = flow.retrieveDetailsFromDB() def mainForwardCookie = flowInfo.forwardPath.cookie.value def mainReverseCookie = flowInfo.reversePath.cookie.value Wrappers.wait(statsRouterInterval) { - def stats = flowStats.of(flow.getFlowId()) + def stats = flowStats.of(flow.flowId) stats.get(FLOW_RAW_BYTES, switchPair.getSrc().getDpId(), mainForwardCookie).hasNonZeroValues() stats.get(FLOW_RAW_BYTES, switchPair.getSrc().getDpId(), mainReverseCookie).hasNonZeroValues() } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/HaFlowStatSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/HaFlowStatSpec.groovy index 751880eccf6..af956ba731f 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/HaFlowStatSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/HaFlowStatSpec.groovy @@ -101,10 +101,10 @@ class HaFlowStatSpec extends HealthCheckSpecification { where: description | direction | subFlow - "sub-flow-a" | FORWARD | haFlow.subFlows.flowId.find { it.contains("haflow-a") } - "sub-flow-a" | REVERSE | haFlow.subFlows.flowId.find { it.contains("haflow-a") } - "sub-flow-b" | FORWARD | haFlow.subFlows.flowId.find { it.contains("haflow-b") } - "sub-flow-b" | REVERSE | haFlow.subFlows.flowId.find { it.contains("haflow-b") } + "sub-flow-a" | FORWARD | haFlow.subFlows.haSubFlowId.find { it.contains("ha-flow-a") } + "sub-flow-a" | REVERSE | haFlow.subFlows.haSubFlowId.find { it.contains("ha-flow-a") } + "sub-flow-b" | FORWARD | haFlow.subFlows.haSubFlowId.find { it.contains("ha-flow-b") } + "sub-flow-b" | REVERSE | haFlow.subFlows.haSubFlowId.find { it.contains("ha-flow-b") } } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/MflStatSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/MflStatSpec.groovy index bee1d23a4cf..8df9de8fecb 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/MflStatSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/MflStatSpec.groovy @@ -1,8 +1,17 @@ package org.openkilda.functionaltests.spec.stats +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RAW_BYTES +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RO +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.model.stats.FlowStats import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.payload.flow.FlowState @@ -13,6 +22,7 @@ import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.traffexam.TraffExamService import org.openkilda.testing.service.traffexam.model.Exam import org.openkilda.testing.tools.FlowTrafficExamBuilder + import org.springframework.beans.factory.annotation.Autowired import spock.lang.Narrative import spock.lang.See @@ -20,14 +30,6 @@ import spock.lang.Shared import javax.inject.Provider -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.model.stats.FlowStatsMetric.FLOW_RAW_BYTES -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RO -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW - @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/fl-statistics") @Narrative("""Now we have two FL instances: Management and Statistics. - FL Stats: collect statistics only from the switches. @@ -40,8 +42,11 @@ class MflStatSpec extends HealthCheckSpecification { FlowStats flowStats @Autowired - Provider traffExamProvider + @Shared + FlowFactory flowFactory + @Autowired + Provider traffExamProvider //TODO: split these long tests into set of the smaller ones after https://github.com/telstra/open-kilda/pull/5256 // is merged into development @Tags([LOW_PRIORITY]) @@ -49,27 +54,26 @@ class MflStatSpec extends HealthCheckSpecification { given: "A flow" assumeTrue(topology.activeTraffGens.size() > 1, "Require at least 2 switches with connected traffgen") def (Switch srcSwitch, Switch dstSwitch) = topology.activeTraffGens*.switchConnected - def flow = flowHelper.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = 100 - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch).withBandwidth(100).build() + .createV1() + def waitInterval = 10 //seconds when: "Generate traffic on the given flow" def startTime = new Date().getTime() def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam).buildExam(flow, (int) flow.maximumBandwidth, 5) - .tap{ udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward.tap{ udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() statsHelper."force kilda to collect stats"() then: "Stat in TSDB is created" Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()).hasNonZeroValuesAfter(startTime) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()).hasNonZeroValuesAfter(startTime) } when: "Leave src switch only with management controller and disconnect from stats" - def statsBlockData = lockKeeper.knockoutSwitch(srcSwitch, RO) + def statsBlockData = switchHelper.knockoutSwitchFromStatsController(srcSwitch) switchIsConnectedToFl(srcSwitch.dpId, true, false) def timeWhenSwitchWasDisconnectedFromFloodlight = new Date().getTime() @@ -81,14 +85,14 @@ class MflStatSpec extends HealthCheckSpecification { def statFromMgmtController //first 60 seconds - trying to retrieve stats from management controller, next 60 seconds from stat controller Wrappers.wait(statsRouterRequestInterval * 2 + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchWasDisconnectedFromFloodlight) } when: "Leave src switch only with stats controller and disconnect from management" lockKeeper.reviveSwitch(srcSwitch, statsBlockData) switchIsConnectedToFl(srcSwitch.dpId, true, true) - def mgmtBlockData = lockKeeper.knockoutSwitch(srcSwitch, RW) + def mgmtBlockData = switchHelper.knockoutSwitch(srcSwitch, RW) switchIsConnectedToFl(srcSwitch.dpId, false, true) def timeWhenSwitchWasDisconnectedFromManagement = new Date().getTime() @@ -99,7 +103,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should be collected because statistic controller is set" def statFromStatsController Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchWasDisconnectedFromManagement) } @@ -116,7 +120,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should not be collected because it is disconnected from controllers" def statAfterDeletingControllers Wrappers.timedLoop(statsRouterRequestInterval) { - assert !flowStats.of(flow.getId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + assert !flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchWasDisconnectedFromBoth) sleep((waitInterval * 1000).toLong()) } @@ -132,7 +136,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Old statistic should be collected" Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(startTime) } } @@ -143,27 +147,26 @@ class MflStatSpec extends HealthCheckSpecification { given: "A flow" assumeTrue(topology.activeTraffGens.size() > 1, "Require at least 2 switches with connected traffgen") def (Switch srcSwitch, Switch dstSwitch) = topology.activeTraffGens*.switchConnected - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = 100 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(100).build() + .create() when: "Generate traffic on the given flow" def startTime = new Date().getTime() def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam) - .buildExam(flowHelperV2.toV1(flow), (int) flow.maximumBandwidth, 5).tap { udp = true } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() then: "Stat in TSDB is created" def waitInterval = 10 Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(startTime) } when: "Src switch is only left with management controller (no stats controller)" - def statsBlockData = lockKeeper.knockoutSwitch(srcSwitch, RO) + def statsBlockData = switchHelper.knockoutSwitchFromStatsController(srcSwitch) def needToRestoreConnectionToStats = true switchIsConnectedToFl(srcSwitch.dpId, true, false) def timeWhenSwitchWasDisconnectedFromFloodlight = new Date().getTime() @@ -175,7 +178,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should be collected because management controller is still set" //first 60 seconds - trying to retrieve stats from management controller, next 60 seconds from stat controller Wrappers.wait(statsRouterRequestInterval * 2 + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchWasDisconnectedFromFloodlight) } @@ -194,7 +197,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should be collected because statistic controller is set" Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchWasDisconnectedFromManagement) } @@ -212,7 +215,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should not be collected because it is disconnected from controllers" Wrappers.timedLoop(statsRouterRequestInterval) { sleep((waitInterval * 1000).toLong()) - assert !flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + assert !flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchWasDisconnectedFromBoth) } @@ -225,12 +228,12 @@ class MflStatSpec extends HealthCheckSpecification { Wrappers.wait(discoveryInterval + WAIT_OFFSET) { assert srcSwitch.dpId in northbound.getActiveSwitches()*.switchId assert northbound.getAllLinks().findAll { it.state == IslChangeType.FAILED }.empty - assert northbound.getFlowStatus(flow.flowId).status != FlowState.DOWN + assert flow.retrieveFlowStatus().status != FlowState.DOWN } then: "Old statistic should be collected" Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(startTime) } @@ -244,13 +247,13 @@ class MflStatSpec extends HealthCheckSpecification { flHelper.filterRegionsByMode(it.regions, RO).size() == 2 } assumeTrue(srcSwitch != null, "This test requires a tg switch in 2 RW regions and 2 RO regions") def dstSwitch = topology.activeTraffGens*.switchConnected.find { it.dpId != srcSwitch.dpId } - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = 100 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(100).build() + .create() when: "Src switch is only left with 1 management controller (no stats controllers)" def regionToStay = findMgmtFls(northboundV2.getSwitchConnections(srcSwitch.dpId))*.regionName.first() - def blockData = lockKeeper.knockoutSwitch(srcSwitch, srcSwitch.regions - regionToStay) + def blockData = switchHelper.knockoutSwitch(srcSwitch, srcSwitch.regions - regionToStay) Wrappers.wait(WAIT_OFFSET / 2) { with (northboundV2.getSwitchConnections(srcSwitch.dpId).connections) { it*.regionName == [regionToStay] it*.connectMode == [SwitchConnectMode.READ_WRITE.toString()] @@ -259,10 +262,7 @@ class MflStatSpec extends HealthCheckSpecification { and: "Generate traffic on the given flow" def startTime = new Date().getTime() def traffExam = traffExamProvider.get() - Exam exam = new FlowTrafficExamBuilder(topology, traffExam) - .buildExam(flowHelperV2.toV1(flow), (int) flow.maximumBandwidth, 5).tap { - udp = true - } + Exam exam = flow.traffExam(traffExam, flow.maximumBandwidth, 5).forward.tap { udp = true } exam.setResources(traffExam.startExam(exam)) assert traffExam.waitExam(exam).hasTraffic() @@ -270,7 +270,7 @@ class MflStatSpec extends HealthCheckSpecification { def waitInterval = 10 //first 60 seconds - trying to retrieve stats from management controller, next 60 seconds from stat controller Wrappers.wait(statsRouterRequestInterval * 2 + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(startTime) } @@ -281,7 +281,7 @@ class MflStatSpec extends HealthCheckSpecification { } // '.first' in the line below, just for getting String instead of Array. regionToStay = (findMgmtFls(northboundV2.getSwitchConnections(srcSwitch.dpId))*.regionName - regionToStay).first() - blockData = lockKeeper.knockoutSwitch(srcSwitch, srcSwitch.regions - regionToStay) + blockData = switchHelper.knockoutSwitch(srcSwitch, srcSwitch.regions - regionToStay) Wrappers.wait(WAIT_OFFSET / 2) { assert northboundV2.getSwitchConnections(srcSwitch.dpId).connections*.regionName == [regionToStay] } @@ -294,7 +294,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should be collected (second RW switch available)" //first 60 seconds - trying to retrieve stats from management controller, next 60 seconds from stat controller Wrappers.wait(statsRouterRequestInterval * 2 + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchLeftWithoutStatsControllers) } @@ -304,7 +304,7 @@ class MflStatSpec extends HealthCheckSpecification { assert northboundV2.getSwitchConnections(srcSwitch.dpId).connections.size() == 4 } regionToStay = findStatFls(northboundV2.getSwitchConnections(srcSwitch.dpId))*.regionName.first() - blockData = lockKeeper.knockoutSwitch(srcSwitch, srcSwitch.regions - regionToStay) + blockData = switchHelper.knockoutSwitch(srcSwitch, srcSwitch.regions - regionToStay) Wrappers.wait(WAIT_OFFSET / 2) { assert northboundV2.getSwitchConnections(srcSwitch.dpId).connections*.regionName == [regionToStay] } @@ -316,7 +316,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should be collected (first RO switch available)" Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchLeftWithoutManagementControllers) } @@ -338,7 +338,7 @@ class MflStatSpec extends HealthCheckSpecification { then: "Stat on the src switch should be collected (second RO switch available)" Wrappers.wait(statsRouterRequestInterval + WAIT_OFFSET, waitInterval) { - flowStats.of(flow.getFlowId()).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) + flowStats.of(flow.flowId).get(FLOW_RAW_BYTES, srcSwitch.getDpId()) .hasNonZeroValuesAfter(timeWhenSwitchLeftWithForeginStatsController) } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/SimulateStatsSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/SimulateStatsSpec.groovy index 47aa024a885..f097f2e5dd3 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/SimulateStatsSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/SimulateStatsSpec.groovy @@ -1,5 +1,8 @@ package org.openkilda.functionaltests.spec.stats +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended + import groovy.time.TimeCategory import org.apache.kafka.clients.producer.KafkaProducer import org.apache.kafka.clients.producer.ProducerRecord @@ -59,7 +62,7 @@ class SimulateStatsSpec extends HealthCheckSpecification { @Shared FlowStats stats @Shared - FlowRequestV2 flow + FlowExtended flow @Shared KafkaProducer producer @Shared @@ -70,12 +73,14 @@ class SimulateStatsSpec extends HealthCheckSpecification { final int tableId = 0 @Shared Switch sw + @Autowired + @Shared + FlowFactory flowFactory @Override def setupSpec() { def (Switch src, Switch dst) = topology.activeSwitches - flow = flowHelperV2.randomFlow(src, dst) - flowHelperV2.addFlow(flow) + flow = flowFactory.getRandom(src, dst) def srcRules = northbound.getSwitchRules(src.dpId).flowEntries.findAll { !new Cookie(it.cookie).serviceFlag } producer = new KafkaProducer(producerProps) sw = topology.activeSwitches.first() @@ -98,7 +103,7 @@ class SimulateStatsSpec extends HealthCheckSpecification { producer.send(new ProducerRecord(statsTopic, sw.dpId.toString(), buildMessage(data).toJson())).get() producer.flush() wait(statsRouterRequestInterval + WAIT_OFFSET) { - stats = flowStats.of(flow.getFlowId()) + stats = flowStats.of(flow.flowId) assert stats.get(FLOW_RAW_PACKETS, inPort, outPort).hasValue(NOVI_MAX_PACKET_COUNT) } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/YFlowStatSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/YFlowStatSpec.groovy index 59ce3cfa4c5..43c9e805815 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/YFlowStatSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/stats/YFlowStatSpec.groovy @@ -77,6 +77,8 @@ class YFlowStatSpec extends HealthCheckSpecification { } statsHelper."force kilda to collect stats"() assert yFlowStats.of(yFlow.yFlowId).get(Y_FLOW_SHARED_BITS).getDataPoints().size() > 2 + assert flowStats.of(yFlow.getSubFlows().get(0).getFlowId()).get(FLOW_INGRESS_BITS).getDataPoints().size() > 2 + assert flowStats.of(yFlow.getSubFlows().get(1).getFlowId()).get(FLOW_INGRESS_BITS).getDataPoints().size() > 2 } stats = yFlowStats.of(yFlow.yFlowId) subflow1Stats = flowStats.of(yFlow.getSubFlows().get(0).getFlowId()) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy index feaa36f4dd5..31a108d3dc4 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/FlowRulesSpec.groovy @@ -1,23 +1,42 @@ package org.openkilda.functionaltests.spec.switches +import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs +import static groovyx.gpars.GParsPool.withPool +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL +import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW +import static spock.util.matcher.HamcrestSupport.expect + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.SwitchNotFoundExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.command.switches.DeleteRulesAction -import org.openkilda.messaging.error.MessageError import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.model.FlowEncapsulationType +import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType -import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder + import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative @@ -25,23 +44,6 @@ import spock.lang.Shared import javax.inject.Provider -import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs -import static groovyx.gpars.GParsPool.withPool -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL -import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW -import static spock.util.matcher.HamcrestSupport.expect - @Narrative("""Verify how Kilda behaves with switch rules (either flow rules or default rules) under different circumstances: e.g. persisting rules on newly connected switch, installing default rules on new switch etc.""") @@ -49,7 +51,12 @@ class FlowRulesSpec extends HealthCheckSpecification { @Autowired Provider traffExamProvider - + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory @Shared Switch srcSwitch, dstSwitch @Shared @@ -82,8 +89,7 @@ class FlowRulesSpec extends HealthCheckSpecification { @Tags([VIRTUAL, SMOKE, SWITCH_RECOVER_ON_FAIL]) def "Pre-installed flow rules are not deleted from a new switch connected to the controller"() { given: "A switch with proper flow rules installed (including default) and not connected to the controller" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) + flowFactory.getRandom(srcSwitch, dstSwitch) def defaultPlusFlowRules = [] Wrappers.wait(RULES_INSTALLATION_TIME) { @@ -105,8 +111,7 @@ class FlowRulesSpec extends HealthCheckSpecification { @IterationTag(tags = [SMOKE_SWITCHES], iterationNameRegex = /delete-action=DROP_ALL\)/) def "Able to delete rules from a switch (delete-action=#data.deleteRulesAction)"() { given: "A switch with some flow rules installed" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) + flowFactory.getRandom(srcSwitch, dstSwitch) when: "Delete rules from the switch" def expectedRules = data.getExpectedRules(srcSwitch, srcSwDefaultRules) @@ -185,8 +190,7 @@ class FlowRulesSpec extends HealthCheckSpecification { @Tags([SMOKE, SMOKE_SWITCHES]) def "Able to delete switch rules by cookie/priority #data.identifier"() { given: "A switch with some flow rules installed" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) + flowFactory.getRandom(srcSwitch, dstSwitch) when: "Delete switch rules by #data.identifier" //exclude the "SERVER_42_INPUT" rule, this rule has less priority than usual flow rule @@ -217,9 +221,7 @@ class FlowRulesSpec extends HealthCheckSpecification { def "Attempt to delete switch rules by supplying non-existing #data.description leaves all rules intact"() { given: "A switch with some flow rules installed" assumeTrue(data.description != "priority", "https://github.com/telstra/open-kilda/issues/1701") - - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) + flowFactory.getRandom(srcSwitch, dstSwitch) def ingressRule = (northbound.getSwitchRules(srcSwitch.dpId).flowEntries - data.defaultRules).find { new Cookie(it.cookie).serviceFlag @@ -228,7 +230,6 @@ class FlowRulesSpec extends HealthCheckSpecification { data.defaultRules = (data.defaultRules + ingressRule + sharedRulesCount) } - when: "Delete switch rules by non-existing #data.description" def deletedRules = switchHelper.deleteSwitchRules(data.switch.dpId, data.value) @@ -254,7 +255,7 @@ class FlowRulesSpec extends HealthCheckSpecification { @IterationTag(tags = [SMOKE], iterationNameRegex = /inPort/) def "Able to delete switch rules by #data.description"() { given: "A switch with some flow rules installed" - flowHelperV2.addFlow(flow) + flow.create() def cookiesBefore = northbound.getSwitchRules(data.switch.dpId).flowEntries*.cookie.sort() def s42IsEnabled = switchHelper.getCachedSwProps(data.switch.dpId).server42FlowRtt @@ -276,7 +277,7 @@ class FlowRulesSpec extends HealthCheckSpecification { where: data << [[description : "inPort", - flow : buildFlow(), + flow : flowFactory.getBuilder(srcSwitch, dstSwitch).build(), switch : srcSwitch, inVlan : null, encapsulationType: null, @@ -284,7 +285,7 @@ class FlowRulesSpec extends HealthCheckSpecification { removedRules : 3 ].tap { inPort = flow.source.portNumber }, [description : "inVlan", - flow : buildFlow(), + flow : flowFactory.getBuilder(srcSwitch, dstSwitch).build(), switch : srcSwitch, inPort : null, encapsulationType: "TRANSIT_VLAN", @@ -292,14 +293,14 @@ class FlowRulesSpec extends HealthCheckSpecification { removedRules : 1 ].tap { inVlan = flow.source.vlanId }, [description : "inPort and inVlan", - flow : buildFlow(), + flow : flowFactory.getBuilder(srcSwitch, dstSwitch).build(), switch : srcSwitch, encapsulationType: "TRANSIT_VLAN", outPort : null, removedRules : 1 ].tap { inVlan = flow.source.vlanId; inPort = flow.source.portNumber }, [description : "outPort", - flow : buildFlow(), + flow : flowFactory.getBuilder(srcSwitch, dstSwitch).build(), switch : dstSwitch, inPort : null, inVlan : null, @@ -307,14 +308,13 @@ class FlowRulesSpec extends HealthCheckSpecification { removedRules : 1 ].tap { outPort = flow.destination.portNumber }, ] - flow = data.flow as FlowRequestV2 + flow = data.flow as FlowExtended } @IterationTag(tags = [SMOKE], iterationNameRegex = /inVlan/) def "Attempt to delete switch rules by supplying non-existing #data.description keeps all rules intact"() { given: "A switch with some flow rules installed" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) + flowFactory.getRandom(srcSwitch, dstSwitch) def originalRules = northbound.getSwitchRules(data.switch.dpId).flowEntries*.cookie.sort() when: "Delete switch rules by non-existing #data.description" @@ -369,13 +369,13 @@ class FlowRulesSpec extends HealthCheckSpecification { switchPair.paths.findAll { it != longPath }.each { pathHelper.makePathMorePreferable(longPath, it) } and: "Create a transit-switch flow going through these switches" - def flow = flowHelperV2.randomFlow(switchPair) - flow.maximumBandwidth = maximumBandwidth - flow.ignoreBandwidth = maximumBandwidth ? false : true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withBandwidth(maximumBandwidth) + .withIgnoreBandwidth(maximumBandwidth ? false : true).build() + .create() and: "Remove flow rules so that they become 'missing'" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() def defaultPlusFlowRulesMap = involvedSwitches.collectEntries { switchId -> [switchId, northbound.getSwitchRules(switchId).flowEntries] } @@ -384,12 +384,13 @@ class FlowRulesSpec extends HealthCheckSpecification { def swProps = switchHelper.getCachedSwProps(switchId) def switchIdInSrcOrDst = (switchId in [switchPair.src.dpId, switchPair.dst.dpId]) def defaultAmountOfFlowRules = 2 // ingress + egress - def amountOfServer42Rules = (switchIdInSrcOrDst && swProps.server42FlowRtt ? 1 : 0) - if (swProps.server42FlowRtt) { - if ((flow.destination.getSwitchId() == switchId && flow.destination.vlanId) || ( - flow.source.getSwitchId() == switchId && flow.source.vlanId)) - amountOfServer42Rules += 1 + def amountOfServer42Rules = 0 + if(swProps.server42FlowRtt && switchIdInSrcOrDst) { + amountOfServer42Rules +=1 + switchId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules + switchId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules } + def rulesCount = defaultAmountOfFlowRules + amountOfServer42Rules + (switchIdInSrcOrDst ? 1 : 0) [switchId, (rulesCount)] @@ -435,8 +436,7 @@ class FlowRulesSpec extends HealthCheckSpecification { then: "An error is received (404 code)" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 - exc.responseBodyAsString.to(MessageError).errorMessage == "Switch '$NON_EXISTENT_SWITCH_ID' not found" + new SwitchNotFoundExpectedError("Switch '${NON_EXISTENT_SWITCH_ID}' not found", ~/Error in switch validation/).matches(exc) where: action | method @@ -450,18 +450,22 @@ class FlowRulesSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() and: "Create a flow with protected path" - def flow = flowHelperV2.randomFlow(switchPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) - - flowHelper.verifyRulesOnProtectedFlow(flow.flowId) - def flowPathInfo = northbound.getFlowPath(flow.flowId) - def mainFlowPath = flowPathInfo.forwardPath - def protectedFlowPath = flowPathInfo.protectedPath.forwardPath - def commonNodeIds = mainFlowPath*.switchId.intersect(protectedFlowPath*.switchId) - def uniqueNodes = (protectedFlowPath.findAll { !commonNodeIds.contains(it.switchId) } + mainFlowPath.findAll { + def flow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true).build() + .create() + + def flowPathInfo = flow.retrieveAllEntityPaths() + + HashMap> flowInvolvedSwitchesWithRules = flowPathInfo.getInvolvedSwitches() + .collectEntries{ [(it): switchRulesFactory.get(it).getRules()] } as HashMap> + flow.verifyRulesForProtectedFlowOnSwitches(flowInvolvedSwitchesWithRules) + + def mainFlowPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def protectedFlowPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + List commonNodeIds = mainFlowPath*.switchId.intersect(protectedFlowPath*.switchId) + List uniqueNodes = (protectedFlowPath.findAll { !commonNodeIds.contains(it.switchId) } + mainFlowPath.findAll { !commonNodeIds.contains(it.switchId) - })*.switchId + })*.switchId.unique() def rulesOnSwitchesBefore = (commonNodeIds + uniqueNodes).collectEntries { [it, northbound.getSwitchRules(it).flowEntries.sort { it.cookie }] } @@ -525,9 +529,8 @@ class FlowRulesSpec extends HealthCheckSpecification { def (srcSwitch, dstSwitch) = [isl.srcSwitch, isl.dstSwitch] and: "Create a flow going through these switches" - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.addFlow(flow) - def flowInfo = database.getFlow(flow.flowId) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) + def flowInfo = flow.retrieveDetailsFromDB() def flowRulesSrcSw = getFlowRules(srcSwitch) def flowRulesDstSw = getFlowRules(dstSwitch) def sharedRuleSrcSw = flowRulesSrcSw.find { new Cookie(it.cookie).getType() == CookieType.SHARED_OF_FLOW && @@ -543,9 +546,7 @@ class FlowRulesSpec extends HealthCheckSpecification { when: "Start traffic examination" def traffExam = traffExamProvider.get() - def examVlanFlow = new FlowTrafficExamBuilder(topology, traffExam).buildBidirectionalExam( - flowHelperV2.toV1(flow), 100, 2 - ) + def examVlanFlow = flow.traffExam(traffExam, 100, 2) withPool { [examVlanFlow.forward, examVlanFlow.reverse].eachParallel { direction -> def resources = traffExam.startExam(direction) @@ -591,19 +592,19 @@ class FlowRulesSpec extends HealthCheckSpecification { } when: "Break the flow ISL (bring switch port down) to cause flow rerouting" - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def actualFlowPath = flow.retrieveAllEntityPaths() // Switches may have parallel links, so we need to get involved ISLs. - def islToFail = pathHelper.getInvolvedIsls(flowPath).first() + def islToFail = actualFlowPath.flowPath.getInvolvedIsls().first() islHelper.breakIsl(islToFail) then: "The flow was rerouted after reroute timeout" def flowInfoAfterReroute - def rulesAfterRerouteSrcSw - def rulesAfterRerouteDstSw + List rulesAfterRerouteSrcSw + List rulesAfterRerouteDstSw Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert PathHelper.convert(northbound.getFlowPath(flow.flowId)) != flowPath - flowInfoAfterReroute = database.getFlow(flow.flowId) + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveAllEntityPaths() != actualFlowPath + flowInfoAfterReroute = flow.retrieveDetailsFromDB() rulesAfterRerouteSrcSw = getFlowRules(srcSwitch) rulesAfterRerouteDstSw = getFlowRules(dstSwitch) //system doesn't reinstall shared rule @@ -662,13 +663,13 @@ class FlowRulesSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withBothSwitchesVxLanEnabled().random() and: "Create a flow with vxlan encapsulation" - def flow = flowHelperV2.randomFlow(switchPair) - flow.encapsulationType = FlowEncapsulationType.VXLAN - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN).build() + .create() and: "Delete flow rules so that they become 'missing'" - def flowInfoFromDb = database.getFlow(flow.flowId) - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId + def flowInfoFromDb = flow.retrieveDetailsFromDB() + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() def transitSwitchIds = involvedSwitches[1..-2] def defaultPlusFlowRulesMap = involvedSwitches.collectEntries { switchId -> [switchId, northbound.getSwitchRules(switchId).flowEntries] @@ -678,12 +679,13 @@ class FlowRulesSpec extends HealthCheckSpecification { def swProps = switchHelper.getCachedSwProps(switchId) def switchIdInSrcOrDst = (switchId in [switchPair.src.dpId, switchPair.dst.dpId]) def defaultAmountOfFlowRules = 2 // ingress + egress - def amountOfServer42Rules = (switchIdInSrcOrDst && swProps.server42FlowRtt ? 1 : 0) - if (swProps.server42FlowRtt) { - if ((flow.destination.getSwitchId() == switchId && flow.destination.vlanId) || ( - flow.source.getSwitchId() == switchId && flow.source.vlanId)) - amountOfServer42Rules += 1 + def amountOfServer42Rules = 0 + if(swProps.server42FlowRtt && switchIdInSrcOrDst) { + amountOfServer42Rules +=1 + switchId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules + switchId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules } + def rulesCount = defaultAmountOfFlowRules + amountOfServer42Rules + (switchIdInSrcOrDst ? 1 : 0) [switchId, rulesCount] } @@ -758,9 +760,6 @@ class FlowRulesSpec extends HealthCheckSpecification { .ignoring("durationSeconds")) } - FlowRequestV2 buildFlow() { - flowHelperV2.randomFlow(srcSwitch, dstSwitch) - } List filterRules(List rules, inPort, inVlan, outPort) { if (inPort) { @@ -775,24 +774,7 @@ class FlowRulesSpec extends HealthCheckSpecification { return rules } - void checkTrafficCountersInRules(FlowEndpointV2 flowEndpoint, isTrafficThroughRuleExpected) { - def rules = northbound.getSwitchRules(flowEndpoint.switchId).flowEntries - def ingressRule = filterRules(rules, flowEndpoint.portNumber, flowEndpoint.vlanId, null)[0] - def egressRule = filterRules(rules, null, null, flowEndpoint.portNumber).find { - it.instructions.applyActions.setFieldActions*.fieldValue.contains(flowEndpoint.vlanId.toString()) - } - - assert ingressRule.flags.contains("RESET_COUNTS") - assert isTrafficThroughRuleExpected == (ingressRule.packetCount > 0) - assert isTrafficThroughRuleExpected == (ingressRule.byteCount > 0) - - assert !egressRule.flags.contains("RESET_COUNTS") - assert isTrafficThroughRuleExpected == (egressRule.packetCount > 0) - assert isTrafficThroughRuleExpected == (egressRule.byteCount > 0) - - } - List getFlowRules(Switch sw) { - northbound.getSwitchRules(sw.dpId).flowEntries.findAll { !(it.cookie in sw.defaultCookies) }.sort() + switchRulesFactory.get(sw.dpId).getRules().findAll { !(it.cookie in sw.defaultCookies) }.sort() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy index 1643795db02..8e0f180648e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/LagPortSpec.groovy @@ -1,5 +1,14 @@ package org.openkilda.functionaltests.spec.switches +import static groovyx.gpars.GParsPool.withPool +import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.model.MeterId.LACP_REPLY_METER_ID +import static org.openkilda.model.cookie.Cookie.DROP_SLOW_PROTOCOLS_LOOP_COOKIE +import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.LagNotCreatedExpectedError import org.openkilda.functionaltests.error.LagNotDeletedExpectedError @@ -7,21 +16,17 @@ import org.openkilda.functionaltests.error.LagNotDeletedWithNotFoundExpectedErro import org.openkilda.functionaltests.error.LagNotUpdatedExpectedError import org.openkilda.functionaltests.error.flow.FlowNotCreatedExpectedError import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.grpc.speaker.model.LogicalPortDto -import org.openkilda.messaging.error.MessageError import org.openkilda.messaging.model.grpc.LogicalPortType -import org.openkilda.model.FlowPathDirection import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType import org.openkilda.model.cookie.PortColourCookie -import org.openkilda.northbound.dto.v1.flows.PingInput -import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointPayload import org.openkilda.northbound.dto.v2.switches.LagPortRequest import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.grpc.GrpcService import org.openkilda.testing.service.traffexam.TraffExamService -import org.openkilda.testing.tools.FlowTrafficExamBuilder + import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative @@ -30,13 +35,6 @@ import spock.lang.Shared import javax.inject.Provider -import static groovyx.gpars.GParsPool.withPool -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.model.MeterId.LACP_REPLY_METER_ID -import static org.openkilda.model.cookie.Cookie.DROP_SLOW_PROTOCOLS_LOOP_COOKIE -import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW @See("https://github.com/telstra/open-kilda/blob/develop/docs/design/LAG-for-ports/README.md") @Narrative("Verify that flow can be created on a LAG port.") @@ -48,11 +46,12 @@ class LagPortSpec extends HealthCheckSpecification { @Autowired @Shared GrpcService grpc - @Autowired @Shared Provider traffExamProvider - + @Autowired + @Shared + FlowFactory flowFactory @Shared Integer lagOffset = 2000 @@ -152,20 +151,21 @@ class LagPortSpec extends HealthCheckSpecification { def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, portsArray as Set).logicalPortNumber when: "Create a flow" - def flow = flowHelperV2.randomFlow(switchPair, true).tap { source.portNumber = lagPort } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withSourcePort(lagPort) + .build().create() then: "Flow is valid and pingable" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - verifyAll(northbound.pingFlow(flow.flowId, new PingInput())) { + flow.validateAndCollectDiscrepancies().isEmpty() + verifyAll(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } and: "System allows traffic on the flow" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flow.tap { source.portNumber = traffgenSrcSwPort }), 1000, 3) + //the physical port with traffGen used for LAG port creation should be specified + def exam = flow.deepCopy().tap{ source.portNumber = traffgenSrcSwPort }.traffExam(traffExam, 1000, 3) withPool { [exam.forward, exam.reverse].eachParallel { direction -> def resources = traffExam.startExam(direction) @@ -182,23 +182,21 @@ class LagPortSpec extends HealthCheckSpecification { .withAtLeastNTraffgensOnSource(2).random() def traffgenSrcSwPort = swPair.src.traffGens[0].switchPort def traffgenDstSwPort = swPair.src.traffGens[1].switchPort - def payload = new LagPortRequest(portNumbers: [traffgenSrcSwPort]) def lagPort = switchHelper.createLagLogicalPort(swPair.src.dpId, [traffgenSrcSwPort] as Set).logicalPortNumber when: "Create a flow" - def flow = flowHelperV2.singleSwitchFlow(swPair).tap { - source.portNumber = lagPort - destination.portNumber = traffgenDstSwPort - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withSourcePort(lagPort) + .withDestinationPort(traffgenDstSwPort) + .build().create() then: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() and: "System allows traffic on the flow" def traffExam = traffExamProvider.get() - def exam = new FlowTrafficExamBuilder(topology, traffExam) - .buildBidirectionalExam(flowHelperV2.toV1(flow.tap { source.portNumber = traffgenSrcSwPort }), 1000, 3) + //the physical port with traffGen used for LAG port creation should be specified + def exam = flow.deepCopy().tap { source.portNumber = traffgenSrcSwPort }.traffExam(traffExam, 1000, 3) withPool { [exam.forward, exam.reverse].eachParallel { direction -> def resources = traffExam.startExam(direction) @@ -235,15 +233,13 @@ class LagPortSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().random() def portsArray = topology.getAllowedPortsForSwitch(switchPair.src)[-2, -1] def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, portsArray as Set).logicalPortNumber - def flow = flowHelperV2.randomFlow(switchPair).tap { source.portNumber = lagPort } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair).withSourcePort(lagPort).build().create() when: "When delete LAG port" northboundV2.deleteLagLogicalPort(switchPair.src.dpId, lagPort) then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - def errorDetails = exc.responseBodyAsString.to(MessageError) new LagNotDeletedExpectedError(~/Couldn\'t delete LAG port \'$lagPort\' from switch $switchPair.src.dpId \ because flows \'\[$flow.flowId\]\' use it as endpoint/).matches(exc) } @@ -251,8 +247,7 @@ because flows \'\[$flow.flowId\]\' use it as endpoint/).matches(exc) def "Unable to create LAG on a port with flow on it"() { given: "Active switch with flow on it" def sw = topology.activeSwitches.first() - def flow = flowHelperV2.singleSwitchFlow(sw) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(sw, sw) when: "Create a LAG port with flow's port" switchHelper.createLagLogicalPort(sw.dpId, [flow.source.portNumber] as Set) @@ -267,19 +262,18 @@ because flows \'\[$flow.flowId\]\' use it as endpoint/).matches(exc) given: "An active switch with LAG port on it" def sw = topology.activeSwitches.first() def portsArray = topology.getAllowedPortsForSwitch(sw)[-2, -1] - def payload = new LagPortRequest(portNumbers: portsArray) + def flowSourcePort = portsArray[0] def lagPort = switchHelper.createLagLogicalPort(sw.dpId, portsArray as Set).logicalPortNumber when: "Create flow on ports which are in inside LAG group" - def flow = flowHelperV2.singleSwitchFlow(sw).tap { - source.portNumber = portsArray[0] - destination.portNumber = portsArray[1] - } - flowHelperV2.addFlow(flow) + flowFactory.getBuilder(sw, sw) + .withSourcePort(flowSourcePort) + .withDestinationPort(portsArray[1]) + .build().sendCreateRequest() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - new FlowNotCreatedExpectedError("Could not create flow", ~/Port $flow.source.portNumber \ + new FlowNotCreatedExpectedError(~/Port $flowSourcePort \ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) } @@ -287,19 +281,9 @@ on switch $sw.dpId is used as part of LAG port $lagPort/).matches(exc) def "Unable to create a LAG port with port which is used as mirrorPort"() { given: "A flow with mirrorPoint" def swP = switchPairs.all().neighbouring().random() - def flow = flowHelperV2.randomFlow(swP, false) - flowHelperV2.addFlow(flow) - + def flow = flowFactory.getRandom(swP, false) def mirrorPort = topology.getAllowedPortsForSwitch(swP.src).last() - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.FORWARD.toString().toLowerCase()) - .mirrorPointSwitchId(swP.src.dpId) - .sinkEndpoint(FlowEndpointV2.builder().switchId(swP.src.dpId).portNumber(mirrorPort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - flowHelperV2.createMirrorPoint(flow.flowId, mirrorEndpoint) + def mirrorEndpoint = flow.createMirrorPoint(swP.src.dpId, mirrorPort, randomVlan()) when: "Create a LAG port with port which is used as mirrorPort" switchHelper.createLagLogicalPort(swP.src.dpId, [mirrorPort] as Set) @@ -864,11 +848,10 @@ occupied by other LAG group\(s\)./).matches(exc) assert testPorts.size > 1 def maximumBandwidth = testPorts.sum { northbound.getPort(switchPair.src.dpId, it).currentSpeed } def lagPort = switchHelper.createLagLogicalPort(switchPair.src.dpId, testPorts as Set).logicalPortNumber - def flow = flowHelperV2.randomFlow(switchPair).tap { - source.portNumber = lagPort - it.maximumBandwidth = maximumBandwidth - } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withSourcePort(lagPort) + .withBandwidth(maximumBandwidth as Long) + .build().create() when: "Decrease LAG port bandwidth by deleting one port to make it lower than connected flows bandwidth sum" def updatePayload = new LagPortRequest(portNumbers: [testPorts.get(0)]) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy index b530548e56a..3388f3072a2 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/MetersSpec.groovy @@ -22,10 +22,12 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET import static spock.util.matcher.HamcrestSupport.expect import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.MeterExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers -import org.openkilda.messaging.error.MessageError +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.messaging.info.meter.MeterEntry import org.openkilda.messaging.info.rule.FlowEntry import org.openkilda.messaging.info.rule.SwitchFlowEntries @@ -36,9 +38,11 @@ import org.openkilda.testing.Constants import org.openkilda.testing.model.topology.TopologyDefinition.Switch import groovy.transform.Memoized +import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value import org.springframework.web.client.HttpClientErrorException import spock.lang.Narrative +import spock.lang.Shared import java.math.RoundingMode @@ -54,6 +58,14 @@ class MetersSpec extends HealthCheckSpecification { static CENTEC_MAX_BURST = 32000 // Driven by the Centec specification static final String NOT_OVS_REGEX = /^(?!.*\bOVS\b).*/ + @Autowired + @Shared + FlowFactory flowFactory + + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory + @Value('${burst.coefficient}') double burstCoefficient @@ -71,7 +83,7 @@ class MetersSpec extends HealthCheckSpecification { def defaultMeters = northbound.getAllMeters(sw.dpId) when: "A flow is created and its meter is deleted" - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchFlow(sw)) + def flow = flowFactory.getRandom(sw, sw) def meterToDelete = northbound.getAllMeters(sw.dpId).meterEntries.find { !defaultMeters.meterEntries*.meterId.contains(it.meterId) }.meterId @@ -82,7 +94,7 @@ class MetersSpec extends HealthCheckSpecification { !northbound.getAllMeters(sw.dpId).meterEntries.find { it.meterId == meterToDelete } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "No excessive meters are installed on the switch" Wrappers.wait(WAIT_OFFSET) { @@ -103,11 +115,12 @@ class MetersSpec extends HealthCheckSpecification { assumeTrue(switches as boolean, "Unable to find required switches in topology") when: "Try to delete meter with invalid ID" - northbound.deleteMeter(switches[0].dpId, meterId) + SwitchId swId = switches[0].dpId + northbound.deleteMeter(swId, meterId) then: "Got BadRequest because meter ID is invalid" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 + new MeterExpectedError("Meter id must be positive.", ~/$swId/).matches(exc) where: meterId | switches | switchType @@ -209,9 +222,9 @@ on a #switchType switch"() { assert defaultMeters and: "Create a single-switch flow" - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.ignoreBandwidth = ignoreBandwidth - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw) + .withIgnoreBandwidth(ignoreBandwidth).build() + .create() then: "New meters should appear after flow setup" def newMeters = northbound.getAllMeters(sw.dpId) @@ -228,10 +241,10 @@ on a #switchType switch"() { !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() and: "Flow validation shows no discrepancies in meters" - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "New meters should disappear from the switch" Wrappers.wait(WAIT_OFFSET) { @@ -265,10 +278,10 @@ on a #switchType switch"() { assert defaultMeters and: "Create a single-switch flow with maximum_bandwidth=0" - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.maximumBandwidth = 0 - flow.ignoreBandwidth = true - flowHelperV2.addFlow(flow) + flowFactory.getBuilder(sw, sw) + .withBandwidth(0) + .withIgnoreBandwidth(true).build() + .create() then: "Ony default meters should be present on the switch and new meters should not appear after flow setup" def newMeters = northbound.getAllMeters(sw.dpId) @@ -291,8 +304,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { .withSwitchesManufacturedBy(srcSwitch, dstSwitch).random() when: "Create a flow between given switches" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) then: "The source and destination switches have only one meter in the flow's ingress rule" def srcSwFlowMeters = northbound.getAllMeters(flow.source.switchId).meterEntries.findAll(flowMeters) @@ -301,8 +313,8 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { srcSwFlowMeters.size() == 1 dstSwFlowMeters.size() == 1 - def srcSwitchRules = northbound.getSwitchRules(flow.source.switchId).flowEntries.findAll { !Cookie.isDefaultRule(it.cookie) } - def dstSwitchRules = northbound.getSwitchRules(flow.destination.switchId).flowEntries.findAll { !Cookie.isDefaultRule(it.cookie) } + def srcSwitchRules = switchRulesFactory.get(flow.source.switchId).getRules().findAll { !Cookie.isDefaultRule(it.cookie) } + def dstSwitchRules = switchRulesFactory.get(flow.destination.switchId).getRules().findAll { !Cookie.isDefaultRule(it.cookie) } def srcSwIngressFlowRules = srcSwitchRules.findAll { it.match.inPort == flow.source.portNumber.toString() } assert srcSwIngressFlowRules.size() == 2 //shared + simple ingress @@ -336,9 +348,12 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { !dstSwFlowEgressRule.instructions.goToMeter and: "Intermediate switches don't have meters in flow rules at all" - pathHelper.getInvolvedSwitches(flow.flowId)[1..-2].findAll { it.ofVersion != "OF_12" }.each { sw -> + List flowInvolvedSwitches = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List + + flowInvolvedSwitches[1..-2].findAll { it.ofVersion != "OF_12" }.each { sw -> assert northbound.getAllMeters(sw.dpId).meterEntries.findAll(flowMeters).empty - def flowRules = northbound.getSwitchRules(sw.dpId).flowEntries.findAll { !(it.cookie in sw.defaultCookies) } + def flowRules = switchRulesFactory.get(sw.dpId).getRules().findAll { !(it.cookie in sw.defaultCookies) } flowRules.each { assert !it.instructions.goToMeter } } @@ -360,13 +375,12 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def sw = switches.first() def defaultMeters = northbound.getAllMeters(sw.dpId) - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.setMaximumBandwidth(100) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw) + .withBandwidth(100).build() + .create() when: "Update flow bandwidth to #flowRate kbps" - flow.setMaximumBandwidth(flowRate) - flowHelperV2.updateFlow(flow.flowId, flow) + flow.update(flow.tap { it.maximumBandwidth = flowRate as Long }) then: "New meters should be installed on the switch" def newMeters = northbound.getAllMeters(sw.dpId).meterEntries.findAll { @@ -384,7 +398,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() and: "Flow validation shows no discrepancies in meters" - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() where: [flowRate, data] << [ @@ -408,13 +422,12 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def sw = switches.first() def expectedBurstSize = switchHelper.getExpectedBurst(sw.dpId, flowRate) def defaultMeters = northbound.getAllMeters(sw.dpId) - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.setMaximumBandwidth(100) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw) + .withBandwidth(100).build() + .create() when: "Update flow bandwidth to #flowRate kbps" - flow.setMaximumBandwidth(flowRate) - flowHelperV2.updateFlow(flow.flowId, flow) + flow.update(flow.tap{ it.maximumBandwidth = flowRate}) then: "Meters with updated rate should be installed on the switch" def newMeters = null @@ -433,7 +446,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() and: "Flow validation shows no discrepancies in meters" - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() where: flowRate << [ @@ -455,13 +468,12 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def sw = switches.first() def defaultMeters = northbound.getAllMeters(sw.dpId) - def flow = flowHelperV2.singleSwitchFlow(sw) - flow.setMaximumBandwidth(100) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(sw, sw) + .withBandwidth(100).build() + .create() when: "Update flow bandwidth to #flowRate kbps" - flow.setMaximumBandwidth(flowRate) - flowHelperV2.updateFlow(flow.flowId, flow) + flow.update(flow.tap { it.maximumBandwidth = flowRate }) then: "New meters should be installed on the switch" def newMeters = northbound.getAllMeters(sw.dpId).meterEntries.findAll { @@ -485,7 +497,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { !switchHelper.synchronizeAndCollectFixedDiscrepancies(sw.dpId).isPresent() and: "Flow validation shows no discrepancies in meters" - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() where: flowRate << [150, 1000, 1024, 5120, 10240, 2480, 960000] @@ -500,13 +512,13 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def dst = data.switches[1] and: "A flow with custom meter rate and burst, that differ from defaults" - def flow = flowHelperV2.randomFlow(src, dst) - flow.maximumBandwidth = 1000 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(src, dst) + .withBandwidth(1000).build() + .create() /*at this point meters are set for given flow. Now update flow bandwidth directly via DB, so that existing meter rate and burst is no longer correspond to the flow bandwidth*/ def newBandwidth = 2000 - database.updateFlowBandwidth(flow.flowId, newBandwidth) + flow.updateFlowBandwidthInDB(newBandwidth) //at this point existing meters do not correspond with the flow //now save some original data for further comparison before resetting meters Map originalRules = [src.dpId, dst.dpId].collectEntries { @@ -517,7 +529,7 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { } when: "Ask system to reset meters for the flow" - def response = northbound.resetMeters(flow.flowId) + def response = flow.resetMeters() then: "Response contains correct info about new meter values" [response.srcMeter, response.dstMeter].each { switchMeterEntries -> @@ -593,24 +605,19 @@ meters in flow rules at all (#srcSwitch - #dstSwitch flow)"() { def src = availableSwitches[0] def dst = availableSwitches[1] - def flow = flowHelperV2.randomFlow(src, dst) - flow.ignoreBandwidth = true - flow.maximumBandwidth = 0 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(src, dst) + .withBandwidth(0) + .withIgnoreBandwidth(true).build() + .create() when: "Resetting meter burst and rate to default" - northbound.resetMeters(flow.flowId) + flow.resetMeters() then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.to(MessageError).errorMessage == "Can't update meter: Flow '$flow.flowId' is unmetered" + new MeterExpectedError("Can't update meter: Flow '$flow.flowId' is unmetered", ~/Modify meters in FlowMeterModifyFsm/).matches(exc) } - @Memoized - String getSwitchDescription(SwitchId sw) { - northbound.activeSwitches.find { it.switchId == sw }.description - } @Memoized List getNoviflowSwitches() { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/PortHistorySpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/PortHistorySpec.groovy index c5c6ea6de9e..b1845187db6 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/PortHistorySpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/PortHistorySpec.groovy @@ -172,7 +172,7 @@ class PortHistorySpec extends HealthCheckSpecification { def timestampBefore = System.currentTimeMillis() when: "Execute port DOWN on the src switch for activating antiflap" - cleanupManager.addAction(CleanupActionType.PORT_UP, {northbound.portUp(isl.srcSwitch.dpId, isl.srcPort)}) + cleanupManager.addAction(CleanupActionType.PORT_UP, { northbound.portUp(isl.srcSwitch.dpId, isl.srcPort) }) northbound.portDown(isl.srcSwitch.dpId, isl.srcPort) Wrappers.wait(WAIT_OFFSET) { assert islUtils.getIslInfo(isl).get().state == IslChangeType.FAILED @@ -218,6 +218,10 @@ class PortHistorySpec extends HealthCheckSpecification { @Isolated class PortHistoryIsolatedSpec extends HealthCheckSpecification { + + @Autowired @Shared + CleanupManager cleanupManager + @Shared def antiflapDumpingInterval = 60 @@ -243,6 +247,7 @@ class PortHistoryIsolatedSpec extends HealthCheckSpecification { } when: "Blink port to generate antiflap statistic" + cleanupManager.addAction(CleanupActionType.PORT_UP, { northbound.portUp(isl.srcSwitch.dpId, isl.srcPort) }) Wrappers.timedLoop(antiflapDumpingInterval - antiflapCooldown + 1) { northbound.portUp(isl.srcSwitch.dpId, isl.srcPort) northbound.portDown(isl.srcSwitch.dpId, isl.srcPort) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchActivationSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchActivationSpec.groovy index 665d1677b21..a832c26f45e 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchActivationSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchActivationSpec.groovy @@ -1,10 +1,23 @@ package org.openkilda.functionaltests.spec.switches -import com.google.common.collect.Sets -import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.clients.producer.ProducerRecord +import static org.openkilda.functionaltests.extension.tags.Tag.LOCKKEEPER +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH +import static org.openkilda.messaging.info.event.SwitchChangeType.ACTIVATED +import static org.openkilda.messaging.info.event.SwitchChangeType.DEACTIVATED +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW +import static org.openkilda.testing.tools.KafkaUtils.buildCookie +import static org.openkilda.testing.tools.KafkaUtils.buildMessage + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.command.switches.DeleteRulesAction @@ -17,25 +30,15 @@ import org.openkilda.rulemanager.MeterFlag import org.openkilda.rulemanager.MeterSpeakerData import org.openkilda.rulemanager.OfTable import org.openkilda.rulemanager.OfVersion + +import com.google.common.collect.Sets +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.LOCKKEEPER -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH -import static org.openkilda.messaging.info.event.SwitchChangeType.ACTIVATED -import static org.openkilda.messaging.info.event.SwitchChangeType.DEACTIVATED -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW -import static org.openkilda.testing.tools.KafkaUtils.buildCookie -import static org.openkilda.testing.tools.KafkaUtils.buildMessage class SwitchActivationSpec extends HealthCheckSpecification { @Value("#{kafkaTopicsConfig.getSpeakerSwitchManagerTopic()}") @@ -43,15 +46,18 @@ class SwitchActivationSpec extends HealthCheckSpecification { @Autowired @Qualifier("kafkaProducerProperties") Properties producerProps - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory @Tags([SMOKE, SMOKE_SWITCHES, LOCKKEEPER, SWITCH_RECOVER_ON_FAIL]) def "Missing flow rules/meters are installed on a new switch before connecting to the controller"() { given: "A switch with missing flow rules/meters and not connected to the controller" def switchPair = switchPairs.all().neighbouring().random() - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) def originalMeterIds = northbound.getAllMeters(switchPair.src.dpId).meterEntries*.meterId assert originalMeterIds.size() == 1 + switchPair.src.defaultMeters.size() diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchDeleteSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchDeleteSpec.groovy index e717196d013..b1d9be90142 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchDeleteSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchDeleteSpec.groovy @@ -1,15 +1,29 @@ package org.openkilda.functionaltests.spec.switches +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES +import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW + import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.error.SwitchIsInIllegalStateExpectedError +import org.openkilda.functionaltests.error.SwitchNotFoundExpectedError import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.IterationTags import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.testing.service.traffexam.TraffExamService import org.openkilda.testing.service.traffexam.model.ArpData import org.openkilda.testing.service.traffexam.model.LldpData import org.openkilda.testing.tools.ConnectedDevice + import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Shared @@ -17,22 +31,19 @@ import spock.lang.Shared import javax.inject.Provider import java.util.concurrent.TimeUnit -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.OTHER -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES -import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW class SwitchDeleteSpec extends HealthCheckSpecification { - @Autowired @Shared + @Autowired + @Shared Provider traffExamProvider - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory + def "Unable to delete a nonexistent switch"() { when: "Try to delete a nonexistent switch" @@ -40,7 +51,8 @@ class SwitchDeleteSpec extends HealthCheckSpecification { then: "Get 404 NotFound error" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 404 + new SwitchNotFoundExpectedError("Could not delete switch '$NON_EXISTENT_SWITCH_ID': 'Switch $NON_EXISTENT_SWITCH_ID not found.'", + ~/Switch is not found./).matches(exc) } @Tags(SMOKE) @@ -53,8 +65,9 @@ class SwitchDeleteSpec extends HealthCheckSpecification { then: "Get 400 BadRequest error because the switch must be deactivated first" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.contains("Switch '$switchId' is in 'Active' state") + new SwitchIsInIllegalStateExpectedError("Could not delete switch '$switchId': " + + "'Switch '$switchId' is in illegal state. " + + "Switch '$switchId' is in 'Active' state.'").matches(exc) } @Tags(SWITCH_RECOVER_ON_FAIL) @@ -69,9 +82,9 @@ class SwitchDeleteSpec extends HealthCheckSpecification { then: "Get 400 BadRequest error because the switch has ISLs" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.matches(".*Switch '$sw.dpId' has ${swIsls.size() * 2} active links\\. " + - "Unplug and remove them first.*") + new SwitchIsInIllegalStateExpectedError("Could not delete switch '${sw.dpId}': " + + "'Switch '${sw.dpId}' is in illegal state. " + + "Switch '${sw.dpId}' has ${swIsls.size() * 2} active links. Unplug and remove them first.'").matches(exc) } @Tags(SWITCH_RECOVER_ON_FAIL) @@ -87,16 +100,16 @@ class SwitchDeleteSpec extends HealthCheckSpecification { then: "Get 400 BadRequest error because the switch has ISLs" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.matches(".*Switch '$sw.dpId' has ${swIsls.size() * 2} inactive links\\. " + - "Remove them first.*") + new SwitchIsInIllegalStateExpectedError("Could not delete switch '${sw.dpId}': " + + "'Switch '${sw.dpId}' is in illegal state. " + + "Switch '${sw.dpId}' has ${swIsls.size() * 2} inactive links. Remove them first.'").matches(exc) } @Tags(SWITCH_RECOVER_ON_FAIL) @IterationTags([@IterationTag(tags = [LOW_PRIORITY], take = 1)]) def "Unable to delete an inactive switch with a #flowType flow assigned"() { given: "A flow going through a switch" - flowHelperV2.addFlow(flow) + flow.create() when: "Deactivate the switch" def swToDeactivate = topology.switches.find { it.dpId == flow.source.switchId } @@ -107,13 +120,14 @@ class SwitchDeleteSpec extends HealthCheckSpecification { then: "Got 400 BadRequest error because the switch has the flow assigned" def exc = thrown(HttpClientErrorException) - exc.rawStatusCode == 400 - exc.responseBodyAsString.matches(".*Switch '${flow.source.switchId}' has 1 assigned flows: \\[${flow.flowId}\\].*") + new SwitchIsInIllegalStateExpectedError("Could not delete switch '${swToDeactivate.dpId}': " + + "'Switch '${swToDeactivate.dpId}' is in illegal state. " + + "Switch '${swToDeactivate.dpId}' has 1 assigned flows: [${flow.flowId}].'").matches(exc) where: flowType | flow - "single-switch" | getFlowHelperV2().singleSwitchFlow(getTopology().getActiveSwitches()[0]) - "casual" | getFlowHelperV2().randomFlow(*getTopology().getActiveSwitches()[0..1]) + "single-switch" | flowFactory.getBuilder(switchPairs.singleSwitch().random()).build() + "casual" | flowFactory.getBuilder(switchPairs.all().neighbouring().random()).build() } @Tags(SWITCH_RECOVER_ON_FAIL) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchFailuresSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchFailuresSpec.groovy index 26f420a4aed..e7f8b2c3767 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchFailuresSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchFailuresSpec.groovy @@ -1,33 +1,34 @@ package org.openkilda.functionaltests.spec.switches +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.LOCKKEEPER +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.REVIVE_SWITCH + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowNotValidatedExpectedError import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.SwitchChangeType import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.lockkeeper.model.TrafficControlData + import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Ignore import spock.lang.Narrative import spock.lang.Shared -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.LOCKKEEPER -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_SUCCESS -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.REVIVE_SWITCH -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW @Narrative(""" This spec verifies different situations when Kilda switches suddenly disconnect from the controller. @@ -35,16 +36,19 @@ Note: For now it is only runnable on virtual env due to no ability to disconnect """) class SwitchFailuresSpec extends HealthCheckSpecification { - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory @Tags([SMOKE, SMOKE_SWITCHES, LOCKKEEPER]) def "ISL is still able to properly fail even if switches have reconnected"() { given: "A flow" def isl = topology.getIslsForActiveSwitches().find { it.aswitch && it.dstSwitch } assumeTrue(isl.asBoolean(), "No a-switch ISL found for the test") - def flow = flowHelperV2.randomFlow(isl.srcSwitch, isl.dstSwitch) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(isl.srcSwitch, isl.dstSwitch) when: "Two neighbouring switches of the flow go down simultaneously" def srcBlockData = switchHelper.knockoutSwitch(isl.srcSwitch, RW) @@ -71,12 +75,12 @@ class SwitchFailuresSpec extends HealthCheckSpecification { //depends whether there are alt paths available and: "The flow goes down OR changes path to avoid failed ISL after reroute timeout" Wrappers.wait(rerouteDelay + WAIT_OFFSET) { - def currentIsls = pathHelper.getInvolvedIsls(PathHelper.convert(northbound.getFlowPath(flow.flowId))) + def currentIsls = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() def pathChanged = !currentIsls.contains(isl) && !currentIsls.contains(isl.reversed) - assert pathChanged || (northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN && - flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).find { + assert pathChanged || (flow.retrieveFlowStatus().status == FlowState.DOWN && + flow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE_FAILED).find { it.taskId =~ (/.+ : retry #1 ignore_bw true/) - }?.payload?.last()?.action == REROUTE_FAIL) + }?.payload?.last()?.action == FlowActionType.REROUTE_FAILED.payloadLastAction) } } @@ -84,92 +88,86 @@ class SwitchFailuresSpec extends HealthCheckSpecification { def "System is able to finish the reroute if switch blinks in the middle of it"() { given: "A flow" def swPair = switchPairs.all().nonNeighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(swPair)) + def flow = flowFactory.getRandom(swPair) when: "Current path breaks and reroute starts" switchHelper.shapeSwitchesTraffic([swPair.dst], new TrafficControlData(3000)) - def islToBreak = pathHelper.getInvolvedIsls(northbound.getFlowPath(flow.flowId)).first() + def islToBreak = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() antiflap.portDown(islToBreak.srcSwitch.dpId, islToBreak.srcPort) and: "Switch reconnects in the middle of reroute" Wrappers.wait(WAIT_OFFSET, 0) { - def reroute = flowHelper.getEarliestHistoryEntryByAction(flow.flowId, REROUTE_ACTION) + def reroute = flow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE).first() assert reroute.payload.last().action == "Started validation of installed non ingress rules" } lockKeeper.reviveSwitch(swPair.src, lockKeeper.knockoutSwitch(swPair.src, RW)) then: "Flow reroute is successful" Wrappers.wait(PATH_INSTALLATION_TIME * 2) { //double timeout since rerouted is slowed by delay - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert flowHelper.getLatestHistoryEntry(flow.flowId).payload.last().action == REROUTE_SUCCESS + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE).last().payload.last().action == FlowActionType.REROUTE.payloadLastAction } and: "Blinking switch has no rule anomalies" !switchHelper.validateAndCollectFoundDiscrepancies(swPair.src.dpId).isPresent() and: "Flow validation is OK" - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } def "System can handle situation when switch reconnects while flow is being created"() { when: "Start creating a flow between switches and lose connection to src before rules are set" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowHelperV2.attemptToAddFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch).build().sendCreateRequest() sleep(50) - def blockData = lockKeeper.knockoutSwitch(srcSwitch, RW) - cleanupManager.addAction(REVIVE_SWITCH, {switchHelper.reviveSwitch(srcSwitch, blockData)}) + def blockData = switchHelper.knockoutSwitch(srcSwitch, RW) then: "Flow eventually goes DOWN" Wrappers.wait(WAIT_OFFSET) { - assert northbound.getSwitch(srcSwitch.dpId).state == SwitchChangeType.DEACTIVATED - } - Wrappers.wait(WAIT_OFFSET) { - def flowInfo = northboundV2.getFlow(flow.flowId) - assert flowInfo.status == FlowState.DOWN.toString() + def flowInfo = flow.retrieveDetails() + assert flowInfo.status == FlowState.DOWN assert flowInfo.statusInfo == "Failed to create flow $flow.flowId" } and: "Flow has no path associated" - with(northbound.getFlowPath(flow.flowId)) { - forwardPath.empty - reversePath.empty + with(flow.retrieveAllEntityPaths()) { + getPathNodes(Direction.FORWARD).empty + getPathNodes(Direction.REVERSE).empty } and: "Dst switch validation shows no missing rules" !switchHelper.validateAndCollectFoundDiscrepancies(dstSwitch.dpId).isPresent() when: "Try to validate flow" - northbound.validateFlow(flow.flowId) + flow.validate() then: "Error is returned, explaining that this is impossible for DOWN flows" def e = thrown(HttpClientErrorException) new FlowNotValidatedExpectedError(~/Could not validate flow: Flow $flow.flowId is in DOWN state/).matches(e) when: "Switch returns back UP" switchHelper.reviveSwitch(srcSwitch, blockData) - def swIsOnline = true then: "Flow is still down, because ISLs had not enough time to fail, so no ISLs are discovered and no reroute happen" - northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + flow.retrieveFlowStatus().status == FlowState.DOWN when: "Reroute the flow" - def rerouteResponse = northboundV2.rerouteFlow(flow.flowId) + def rerouteResponse = flow.reroute() then: "Flow is rerouted and in UP state" rerouteResponse.rerouted - Wrappers.wait(WAIT_OFFSET) { northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } + Wrappers.wait(WAIT_OFFSET) { flow.retrieveFlowStatus().status == FlowState.UP } and: "Has a path now" - with(northbound.getFlowPath(flow.flowId)) { - !forwardPath.empty - !reversePath.empty + with(flow.retrieveAllEntityPaths()) { + !getPathNodes(Direction.FORWARD).empty + !getPathNodes(Direction.REVERSE).empty } and: "Can be validated" - northbound.validateFlow(flow.flowId).each { assert it.discrepancies.empty } + flow.validateAndCollectDiscrepancies().isEmpty() and: "Flow can be removed" - flowHelper.deleteFlow(flow.flowId) + flow.delete() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchMaintenanceSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchMaintenanceSpec.groovy index 9dccf32ec70..429d15c3abc 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchMaintenanceSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchMaintenanceSpec.groovy @@ -1,23 +1,30 @@ package org.openkilda.functionaltests.spec.switches +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.testing.Constants.DEFAULT_COST +import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.PathNode import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.testing.model.topology.TopologyDefinition -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.testing.Constants.DEFAULT_COST -import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET +import org.springframework.beans.factory.annotation.Autowired +import spock.lang.Shared class SwitchMaintenanceSpec extends HealthCheckSpecification { + @Shared + @Autowired + FlowFactory flowFactory + @Tags(SMOKE) def "Maintenance mode can be set/unset for a particular switch"() { given: "An active switch" @@ -76,19 +83,17 @@ class SwitchMaintenanceSpec extends HealthCheckSpecification { switchPair.paths.findAll { it != path }.each { pathHelper.makePathMorePreferable(path, it) } and: "Create a couple of flows going through these switches" - def flow1 = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow1) - def flow2 = flowHelperV2.randomFlow(switchPair, false, [flow1]) - flowHelperV2.addFlow(flow2) - assert PathHelper.convert(northbound.getFlowPath(flow1.flowId)) == path - assert PathHelper.convert(northbound.getFlowPath(flow2.flowId)) == path + def flow1 = flowFactory.getRandom(switchPair) + def flow2 = flowFactory.getRandom(switchPair, false, FlowState.UP, flow1.occupiedEndpoints()) + flow1.retrieveAllEntityPaths().getPathNodes() == path + flow2.retrieveAllEntityPaths().getPathNodes() == path when: "Set maintenance mode without flows evacuation flag for some intermediate switch involved in flow paths" switchHelper.setSwitchMaintenance(sw.dpId, true, false) then: "Flows are not evacuated (rerouted) and have the same paths" - PathHelper.convert(northbound.getFlowPath(flow1.flowId)) == path - PathHelper.convert(northbound.getFlowPath(flow2.flowId)) == path + flow1.retrieveAllEntityPaths().getPathNodes() == path + flow2.retrieveAllEntityPaths().getPathNodes() == path when: "Set maintenance mode again with flows evacuation flag for the same switch" northbound.setSwitchMaintenance(sw.dpId, true, true) @@ -96,18 +101,18 @@ class SwitchMaintenanceSpec extends HealthCheckSpecification { then: "Flows are evacuated (rerouted)" def flow1PathUpdated, flow2PathUpdated Wrappers.wait(PATH_INSTALLATION_TIME + WAIT_OFFSET) { - [flow1, flow2].each { assert northboundV2.getFlowStatus(it.flowId).status == FlowState.UP } + [flow1, flow2].each { assert it.retrieveFlowStatus().status == FlowState.UP } - flow1PathUpdated = PathHelper.convert(northbound.getFlowPath(flow1.flowId)) - flow2PathUpdated = PathHelper.convert(northbound.getFlowPath(flow2.flowId)) + flow1PathUpdated = flow1.retrieveAllEntityPaths() + flow2PathUpdated = flow2.retrieveAllEntityPaths() - assert flow1PathUpdated != path - assert flow2PathUpdated != path + assert flow1PathUpdated.getPathNodes() != path + assert flow2PathUpdated.getPathNodes() != path } and: "Switch under maintenance is not involved in new flow paths" - !(sw in pathHelper.getInvolvedSwitches(flow1PathUpdated)) - !(sw in pathHelper.getInvolvedSwitches(flow2PathUpdated)) + !(sw in flow1PathUpdated.getInvolvedSwitches()) + !(sw in flow2PathUpdated.getInvolvedSwitches()) } @Tags(ISL_RECOVER_ON_FAIL) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchPropertiesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchPropertiesSpec.groovy index 6c1b7eaec5f..a86ebae89e6 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchPropertiesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchPropertiesSpec.groovy @@ -125,7 +125,7 @@ class SwitchPropertiesSpec extends HealthCheckSpecification { then: "Human readable error is returned" def exc = thrown(HttpClientErrorException) new SwitchPropertiesNotUpdatedExpectedError(String.format(data.error, sw.dpId), - data.description ?: SwitchPropertiesNotUpdatedExpectedError.getDescriptionPattern()).matches(exc) + data.description ?: ~/Failed to update switch properties./).matches(exc) where: data << [ diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchSyncSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchSyncSpec.groovy index c4d8dc6d980..835f2ff2a42 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchSyncSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchSyncSpec.groovy @@ -1,44 +1,49 @@ package org.openkilda.functionaltests.spec.switches -import com.google.common.collect.Sets -import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.clients.producer.ProducerRecord +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL +import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID +import static org.openkilda.model.cookie.Cookie.VERIFICATION_BROADCAST_RULE_COOKIE +import static org.openkilda.rulemanager.OfTable.EGRESS +import static org.openkilda.rulemanager.OfTable.INPUT +import static org.openkilda.rulemanager.OfTable.TRANSIT +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.tools.KafkaUtils.buildMessage + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.command.switches.DeleteRulesAction -import org.openkilda.model.FlowEncapsulationType +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType import org.openkilda.model.MeterId import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.rulemanager.FlowSpeakerData import org.openkilda.rulemanager.Instructions import org.openkilda.rulemanager.MeterFlag import org.openkilda.rulemanager.MeterSpeakerData import org.openkilda.rulemanager.OfVersion +import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import com.google.common.collect.Sets +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value +import spock.lang.Issue import spock.lang.See import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL -import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID -import static org.openkilda.model.cookie.Cookie.VERIFICATION_BROADCAST_RULE_COOKIE -import static org.openkilda.rulemanager.OfTable.EGRESS -import static org.openkilda.rulemanager.OfTable.INPUT -import static org.openkilda.rulemanager.OfTable.TRANSIT -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.tools.KafkaUtils.buildMessage @See(["https://github.com/telstra/open-kilda/tree/develop/docs/design/hub-and-spoke/switch-sync", "https://github.com/telstra/open-kilda/blob/develop/docs/design/network-discovery/switch-FSM.png"]) @@ -51,8 +56,12 @@ class SwitchSyncSpec extends HealthCheckSpecification { @Autowired @Qualifier("kafkaProducerProperties") Properties producerProps - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory def "Able to synchronize switch without any rule and meter discrepancies (removeExcess=#removeExcess)"() { given: "An active switch" @@ -79,16 +88,17 @@ class SwitchSyncSpec extends HealthCheckSpecification { removeExcess << [false, true] } + @Issue("Noviflow WB5164 Only: https://github.com/telstra/open-kilda/issues/5638") def "Able to synchronize switch (install missing rules and meters)"() { given: "Two active not neighboring switches" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) and: "Drop all rules an meters from related switches (both default and non-default)" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) + List involvedSwitches = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List def involvedSwitchIds = involvedSwitches*.getDpId() def cookiesMap = involvedSwitches.collectEntries { sw -> [sw.dpId, northbound.getSwitchRules(sw.dpId).flowEntries.findAll { @@ -155,8 +165,7 @@ class SwitchSyncSpec extends HealthCheckSpecification { def "Able to synchronize #switchKind switch (delete excess rules and meters)"() { given: "Flow with intermediate switches" def switchPair = switchPairs.all().nonNeighbouring().random() - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) def switchId = getSwitch(flow) def ofVersion = topology.getActiveSwitches().find{it.getDpId() == switchId}.getOfVersion() assert !switchHelper.synchronizeAndCollectFixedDiscrepancies(switchId).isPresent() @@ -180,8 +189,8 @@ class SwitchSyncSpec extends HealthCheckSpecification { .switchId(switchId) .ofVersion(OfVersion.of(ofVersion)) .meterId(new MeterId(excessMeterId)) - .rate(flow.getMaximumBandwidth()) - .burst(flow.getMaximumBandwidth()) + .rate(flow.maximumBandwidth) + .burst(flow.maximumBandwidth) .flags(Sets.newHashSet(MeterFlag.KBPS, MeterFlag.BURST, MeterFlag.STATS)) .build()]).toJson())).get() Wrappers.wait(RULES_INSTALLATION_TIME) { @@ -205,14 +214,12 @@ class SwitchSyncSpec extends HealthCheckSpecification { } where: - switchKind | getSwitch | table - "source" | { FlowRequestV2 flowRequestV2 -> flowRequestV2.getSource().getSwitchId()} | INPUT - "destination"| {FlowRequestV2 flowRequestV2 -> flowRequestV2.getDestination().getSwitchId()}| EGRESS - "transit"| {FlowRequestV2 flowRequestV2 -> - def allSwitches = pathHelper.getInvolvedSwitches(flowRequestV2.getFlowId()).collect {it.getDpId()} - def transitSwitches = allSwitches - [flowRequestV2.getDestination().getSwitchId(), - flowRequestV2.getSource().getSwitchId()] - return transitSwitches.shuffled().first() }| TRANSIT + switchKind | getSwitch | table + "source" | { FlowExtended flowExtended -> flowExtended.source.switchId } | INPUT + "destination" | { FlowExtended flowExtended -> flowExtended.destination.switchId } | EGRESS + "transit" | { FlowExtended flowExtended -> return flowExtended.retrieveAllEntityPaths() + .flowPath.path.forward.getTransitInvolvedSwitches().shuffled().first() + } | TRANSIT } def "Able to synchronize switch with 'vxlan' rule(install missing rules and meters)"() { @@ -220,13 +227,14 @@ class SwitchSyncSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withBothSwitchesVxLanEnabled().random() and: "Create a flow with vxlan encapsulation" - def flow = flowHelperV2.randomFlow(switchPair) - flow.encapsulationType = FlowEncapsulationType.VXLAN - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN) + .build().create() and: "Reproduce situation when switches have missing rules and meters" - def flowInfoFromDb = database.getFlow(flow.flowId) - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) + def flowInfoFromDb = flow.retrieveDetailsFromDB() + List involvedSwitches = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List def involvedSwitchIds = involvedSwitches*.getDpId() def transitSwitchIds = involvedSwitches[1..-2]*.dpId def cookiesMap = involvedSwitches.collectEntries { sw -> @@ -248,11 +256,11 @@ class SwitchSyncSpec extends HealthCheckSpecification { def swProps = switchHelper.getCachedSwProps(it.dpId) def switchIdInSrcOrDst = (it.dpId in [switchPair.src.dpId, switchPair.dst.dpId]) def defaultAmountOfFlowRules = 2 // ingress + egress - def amountOfServer42Rules = (switchIdInSrcOrDst && swProps.server42FlowRtt ? 1 : 0) - if (swProps.server42FlowRtt) { - if ((flow.destination.getSwitchId() == it.dpId && flow.destination.vlanId) || ( - flow.source.getSwitchId() == it.dpId && flow.source.vlanId)) - amountOfServer42Rules += 1 + def amountOfServer42Rules = 0 + if(swProps.server42FlowRtt && it.dpId in [switchPair.src.dpId, switchPair.dst.dpId]) { + amountOfServer42Rules +=1 + it.dpId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules + it.dpId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules } def rulesCount = defaultAmountOfFlowRules + amountOfServer42Rules + (switchIdInSrcOrDst ? 1 : 0) @@ -357,8 +365,7 @@ class SwitchSyncSpec extends HealthCheckSpecification { def "Able to synchronize misconfigured flow meter"() { given: "An active switch with flow on it" def sw = topology.activeSwitches.first() - def flow = flowHelperV2.singleSwitchFlow(sw) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(sw, sw) when: "Update flow's meter" def flowMeterIdToManipulate = northbound.getAllMeters(sw.dpId).meterEntries.find { @@ -369,10 +376,10 @@ class SwitchSyncSpec extends HealthCheckSpecification { lockKeeper.updateBurstSizeAndRate(sw, flowMeterIdToManipulate.meterId, newBurstSize, newRate) then: "Flow is not valid" - def responseValidateFlow = northbound.validateFlow(flow.flowId).findAll { !it.discrepancies.empty }*.discrepancies - assert responseValidateFlow.size() == 1 - def meterRateDiscrepancies = responseValidateFlow[0].find { it.field.toString() == "meterRate" } - def meterBurstSizeDiscrepancies = responseValidateFlow[0].find { it.field.toString() == "meterBurstSize" } + def flowDiscrepancies = flow.validateAndCollectDiscrepancies().values() + assert flowDiscrepancies.size() == 1 + def meterRateDiscrepancies = flowDiscrepancies[0].find { it.field.toString() == "meterRate" } + def meterBurstSizeDiscrepancies = flowDiscrepancies[0].find { it.field.toString() == "meterBurstSize" } meterRateDiscrepancies.actualValue == newRate.toString() meterRateDiscrepancies.expectedValue == flowMeterIdToManipulate.rate.toString() meterBurstSizeDiscrepancies.actualValue == newBurstSize.toString() @@ -405,6 +412,6 @@ class SwitchSyncSpec extends HealthCheckSpecification { } and: "Flow is valid" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSingleSwFlowSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSingleSwFlowSpec.groovy index 2347d768889..55bd0ee2e78 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSingleSwFlowSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSingleSwFlowSpec.groovy @@ -1,13 +1,24 @@ package org.openkilda.functionaltests.spec.switches -import com.google.common.collect.Sets -import groovy.transform.Memoized -import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.clients.producer.ProducerRecord +import static org.junit.jupiter.api.Assumptions.assumeTrue +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH +import static org.openkilda.functionaltests.spec.switches.MetersSpec.NOT_OVS_REGEX +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.tools.KafkaUtils.buildMessage + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.IterationTag import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowDirection import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.model.FlowDirectionType @@ -21,6 +32,11 @@ import org.openkilda.rulemanager.MeterSpeakerData import org.openkilda.rulemanager.OfTable import org.openkilda.rulemanager.OfVersion import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import com.google.common.collect.Sets +import groovy.transform.Memoized +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value @@ -28,18 +44,6 @@ import spock.lang.Narrative import spock.lang.See import spock.lang.Shared -import static org.junit.jupiter.api.Assumptions.assumeTrue -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH -import static org.openkilda.functionaltests.spec.switches.MetersSpec.NOT_OVS_REGEX -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.tools.KafkaUtils.buildMessage @See("https://github.com/telstra/open-kilda/tree/develop/docs/design/hub-and-spoke/switch-validate") @Narrative("""This test suite checks the switch validate feature on a single flow switch. @@ -56,8 +60,12 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { @Autowired @Qualifier("kafkaProducerProperties") Properties producerProps - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory def setupSpec() { deleteAnyFlowsLeftoversIssue5480() @@ -72,7 +80,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { def sw = switches.first() when: "Create a flow" - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchFlow(sw)) + def flow = flowFactory.getRandom(sw, sw) def meterIds = getCreatedMeterIds(sw.dpId) Long burstSize = switchHelper.getExpectedBurst(sw.dpId, flow.maximumBandwidth) @@ -103,7 +111,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { switchValidateInfo.verifyRuleSectionsAreEmpty(["missing", "excess"]) when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -134,7 +142,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { def amountOfRules = amountOfSwRules + amountOfFlowRules + amountOfMultiTableFlRules def amountOfMeters = northbound.getAllMeters(sw.dpId).meterEntries.size() def amountOfFlowMeters = 2 - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchFlow(sw).tap { it.maximumBandwidth = 5000 }) + def flow = flowFactory.getBuilder(sw, sw, false).withBandwidth(5000).build().create() def meterIds = getCreatedMeterIds(sw.dpId) Long burstSize = switchHelper.getExpectedBurst(sw.dpId, flow.maximumBandwidth) @@ -144,7 +152,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(sw.dpId)}) /** at this point meters are set for given flow. Now update flow bandwidth directly via DB, it is done just for moving meters from the 'proper' section into the 'misconfigured'*/ - database.updateFlowBandwidth(flow.flowId, newBandwidth) + flow.updateFlowBandwidthInDB(newBandwidth) //at this point existing meters do not correspond with the flow then: "Meters info is moved into the 'misconfigured' section" @@ -174,10 +182,12 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { switchValidateInfo.rules.proper.containsAll(createdCookies) and: "Flow validation shows discrepancies" - def flowValidateResponse = northbound.validateFlow(flow.flowId) + def flowValidateResponse = flow.validate() + // check isServer42 only for src switch because it is single switch pair, src equal to dst + def isSwitchServer42 = switchHelper.isServer42Supported(flow.source.switchId) def expectedRulesCount = [ - flowHelperV2.getFlowRulesCountBySwitch(flow, true, 1), - flowHelperV2.getFlowRulesCountBySwitch(flow, false, 1)] + flow.getFlowRulesCountBySwitch(FlowDirection.FORWARD, 1, isSwitchServer42), + flow.getFlowRulesCountBySwitch(FlowDirection.REVERSE, 1, isSwitchServer42)] flowValidateResponse.eachWithIndex { direction, i -> assert direction.discrepancies.size() == 2 @@ -200,7 +210,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { } when: "Reset meters for the flow" - northbound.resetMeters(flow.flowId) + flow.resetMeters() then: "Misconfigured meters are reinstalled according to the new bandwidth and moved into the 'proper' section" with(switchHelper.validateV1(sw.dpId)) { @@ -209,13 +219,10 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { } and: "Flow validation shows no discrepancies" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - assert direction.asExpected - } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -240,7 +247,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { def sw = switches.first() when: "Create a flow" - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchFlow(sw)) + def flow = flowFactory.getRandom(sw, sw) def meterIds = getCreatedMeterIds(sw.dpId) and: "Remove created meter" @@ -281,7 +288,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { syncResponse.meters.installed*.meterId.containsAll(meterIds) when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -306,7 +313,8 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { def sw = switches.first() when: "Create a flow" - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchFlow(sw)) + // No TraffGens because the Single switch flow is created at the same port, and no traffic is checked + def flow = flowFactory.getRandom(sw, sw, false) def metersIds = getCreatedMeterIds(sw.dpId) Long burstSize = switchHelper.getExpectedBurst(sw.dpId, flow.maximumBandwidth) @@ -320,7 +328,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { when: "Update meterId for created flow directly via db" long newMeterId = 100; cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(sw.dpId)}) - database.updateFlowMeterId(flow.flowId, newMeterId) + flow.updateFlowMeterIdInDB(newMeterId) then: "Origin meters are moved into the 'excess' section" def switchValidateInfo = switchHelper.validateV1(sw.dpId) @@ -347,7 +355,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { switchValidateInfo.verifyRuleSectionsAreEmpty(["missing", "excess"]) when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() and: "Delete excess meters" metersIds.each { northbound.deleteMeter(sw.dpId, it) } @@ -375,7 +383,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { def sw = switches.first() when: "Create a flow" - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchFlow(sw)) + def flow = flowFactory.getRandom(sw, sw) def createdCookies = getCookiesWithMeter(sw.dpId) def createdHexCookies = createdCookies.collect { Long.toHexString(it) } @@ -402,7 +410,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { syncResponse.rules.installed.containsAll(createdCookies) when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -528,7 +536,8 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { def "Able to validate and sync a #switchType switch having missing rules of single-port single-switch flow"() { assumeTrue(sw as boolean, "Unable to find $switchType switch in topology") given: "A single-port single-switch flow" - def flow = flowHelperV2.addFlow(flowHelperV2.singleSwitchSinglePortFlow(sw)) + // No TraffGens because the Single switch flow is created at the same port, and no traffic is checked + def flow = flowFactory.getRandom(sw, sw, false) when: "Remove flow rules from the switch, so that they become missing" switchHelper.deleteSwitchRules(sw.dpId, DeleteRulesAction.IGNORE_DEFAULTS) @@ -553,7 +562,7 @@ class SwitchValidationSingleSwFlowSpec extends HealthCheckSpecification { } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Switch validation returns empty sections" with(switchHelper.validateV1(sw.dpId)) { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSpec.groovy index 11526f9f324..eb25f4be2fd 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationSpec.groovy @@ -1,18 +1,29 @@ package org.openkilda.functionaltests.spec.switches -import com.google.common.collect.Sets -import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.clients.producer.ProducerRecord +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT +import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter +import static org.openkilda.functionaltests.helpers.SwitchHelper.isServer42Supported +import static org.openkilda.functionaltests.helpers.model.FlowEncapsulationType.VXLAN +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH +import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID +import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID +import static org.openkilda.testing.Constants.RULES_DELETION_TIME +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.tools.KafkaUtils.buildMessage + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.SwitchHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowDirection +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.model.FlowDirectionType -import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload -import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.MeterId import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie @@ -25,6 +36,10 @@ import org.openkilda.rulemanager.MeterSpeakerData import org.openkilda.rulemanager.OfTable import org.openkilda.rulemanager.OfVersion import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import com.google.common.collect.Sets +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value @@ -32,18 +47,6 @@ import spock.lang.Narrative import spock.lang.See import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT -import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID -import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID -import static org.openkilda.testing.Constants.RULES_DELETION_TIME -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.tools.KafkaUtils.buildMessage - @See(["https://github.com/telstra/open-kilda/tree/develop/docs/design/hub-and-spoke/switch-validate", "https://github.com/telstra/open-kilda/tree/develop/docs/design/hub-and-spoke/switch-sync"]) @Narrative("""This test suite checks the switch validate feature followed by switch synchronization for different type @@ -64,8 +67,15 @@ class SwitchValidationSpec extends HealthCheckSpecification { @Autowired @Qualifier("kafkaProducerProperties") Properties producerProps - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory def setupSpec() { deleteAnyFlowsLeftoversIssue5480() @@ -74,7 +84,7 @@ class SwitchValidationSpec extends HealthCheckSpecification { def "Able to validate and sync a terminating switch with proper rules and meters"() { given: "A flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) expect: "Validate switch for src and dst contains expected meters data in 'proper' section" def srcSwitchValidateInfo = switchHelper.validateV1(srcSwitch.dpId) @@ -124,7 +134,7 @@ class SwitchValidationSpec extends HealthCheckSpecification { !switchHelper.synchronizeAndCollectFixedDiscrepancies(srcSwitch.dpId).isPresent() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Switch validate request returns only default rules information" Wrappers.wait(WAIT_OFFSET) { @@ -140,12 +150,14 @@ class SwitchValidationSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().random() when: "Create an intermediate-switch flow" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair)) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def flow = flowFactory.getRandom(switchPair) + def flowPathInfo = flow.retrieveAllEntityPaths() + List involvedSwitches = flowPathInfo.flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List then: "The intermediate switch does not contain any information about meter" - def switchToValidate = flowPath[1..-2].find { !it.switchId.description.contains("OF_12") } - def intermediateSwitchValidateInfo = switchHelper.validateV1(switchToValidate.switchId) + def switchToValidate = involvedSwitches[1..-2].find { !it.dpId.description.contains("OF_12") } + def intermediateSwitchValidateInfo = switchHelper.validateV1(switchToValidate.dpId) intermediateSwitchValidateInfo.verifyMeterSectionsAreEmpty() and: "Rules are stored in the 'proper' section on the transit switch" @@ -153,13 +165,12 @@ class SwitchValidationSpec extends HealthCheckSpecification { intermediateSwitchValidateInfo.verifyRuleSectionsAreEmpty(["missing", "excess"]) and: "Able to perform switch sync which does nothing" - !switchHelper.synchronizeAndCollectFixedDiscrepancies(switchToValidate.switchId).isPresent() + !switchHelper.synchronizeAndCollectFixedDiscrepancies(switchToValidate.dpId).isPresent() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" - def involvedSwitches = pathHelper.getInvolvedSwitches(flowPath) involvedSwitches.each { sw -> def switchValidateInfo = switchHelper.validateV1(sw.dpId) switchValidateInfo.verifyRuleSectionsAreEmpty() @@ -172,7 +183,7 @@ class SwitchValidationSpec extends HealthCheckSpecification { def "Able to validate switch with 'misconfigured' meters"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) def srcSwitchCreatedMeterIds = getCreatedMeterIds(srcSwitch.dpId) def dstSwitchCreatedMeterIds = getCreatedMeterIds(dstSwitch.dpId) @@ -183,7 +194,7 @@ misconfigured" it is done just for moving meter from the 'proper' section into the 'misconfigured'*/ cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(srcSwitch.dpId)}) cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(dstSwitch.dpId)}) - database.updateFlowBandwidth(flow.flowId, newBandwidth) + flow.updateFlowBandwidthInDB(newBandwidth) //at this point existing meters do not correspond with the flow and: "Validate src and dst switches" @@ -236,17 +247,18 @@ misconfigured" } and: "Flow validation shows discrepancies" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() def totalSwitchRules = 0 def totalSwitchMeters = 0 involvedSwitches.each { swId -> - totalSwitchRules += northbound.getSwitchRules(swId).flowEntries.size() + totalSwitchRules += switchRulesFactory.get(swId).getRules().size() totalSwitchMeters += northbound.getAllMeters(swId).meterEntries.size() } - def flowValidateResponse = northbound.validateFlow(flow.flowId) + def flowValidateResponse = flow.validate() def expectedRulesCount = [ - flowHelperV2.getFlowRulesCountBySwitch(flow, true, involvedSwitches.size()), - flowHelperV2.getFlowRulesCountBySwitch(flow, false, involvedSwitches.size())] + flow.getFlowRulesCountBySwitch(FlowDirection.FORWARD, involvedSwitches.size(), isServer42Supported(srcSwitch.dpId)), + flow.getFlowRulesCountBySwitch(FlowDirection.REVERSE, involvedSwitches.size(), isServer42Supported(dstSwitch.dpId))] + flowValidateResponse.eachWithIndex { direction, i -> assert direction.discrepancies.size() == 2 @@ -274,7 +286,7 @@ misconfigured" } when: "Restore correct bandwidth via DB" - database.updateFlowBandwidth(flow.flowId, flow.maximumBandwidth) + flow.updateFlowBandwidthInDB(flow.maximumBandwidth) then: "Misconfigured meters are moved into the 'proper' section" def srcSwitchValidateInfoRestored = switchHelper.validateV1(srcSwitch.dpId) @@ -286,13 +298,10 @@ misconfigured" dstSwitchValidateInfoRestored.verifyMeterSectionsAreEmpty(["missing", "misconfigured", "excess"]) and: "Flow validation shows no discrepancies" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - assert direction.asExpected - } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -306,18 +315,18 @@ misconfigured" def "Able to validate and sync a switch with missing ingress rule + meter"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) def srcSwitchCreatedMeterIds = getCreatedMeterIds(srcSwitch.dpId) def dstSwitchCreatedMeterIds = getCreatedMeterIds(dstSwitch.dpId) and: "Remove created meter on the srcSwitch" def forwardCookies = getCookiesWithMeter(srcSwitch.dpId) def reverseCookies = getCookiesWithMeter(dstSwitch.dpId) - def sharedCookieOnSrcSw = northbound.getSwitchRules(srcSwitch.dpId).flowEntries.findAll { + def sharedCookieOnSrcSw = switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { new Cookie(it.cookie).getType() in [CookieType.SHARED_OF_FLOW, CookieType.SERVER_42_FLOW_RTT_INGRESS] }?.cookie def untouchedCookiesOnSrcSw = (reverseCookies + sharedCookieOnSrcSw).sort() - def cookiesOnDstSw = northbound.getSwitchRules(dstSwitch.dpId).flowEntries*.cookie + def cookiesOnDstSw = switchRulesFactory.get(dstSwitch.dpId).getRules().cookie cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(srcSwitch.dpId)}) northbound.deleteMeter(srcSwitch.dpId, srcSwitchCreatedMeterIds[0]) @@ -374,7 +383,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -388,18 +397,19 @@ misconfigured" def "Able to validate and sync a switch with missing ingress rule (unmetered)"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flowRequest = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flowRequest.maximumBandwidth = 0 - flowRequest.ignoreBandwidth = true - def flow = flowHelperV2.addFlow(flowRequest) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(0) + .withIgnoreBandwidth(true).build() + .create() and: "Remove ingress rule on the srcSwitch" - def ingressCookie = database.getFlow(flow.flowId).forwardPath.cookie.value - def egressCookie = database.getFlow(flow.flowId).reversePath.cookie.value + def flowDBInfo = flow.retrieveDetailsFromDB() + def ingressCookie = flowDBInfo.forwardPath.cookie.value + def egressCookie = flowDBInfo.reversePath.cookie.value switchHelper.deleteSwitchRules(srcSwitch.dpId, ingressCookie) then: "Ingress rule is moved into the 'missing' section on the srcSwitch" - def sharedCookieOnSrcSw = northbound.getSwitchRules(srcSwitch.dpId).flowEntries.findAll { + def sharedCookieOnSrcSw = switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { new Cookie(it.cookie).getType() in [CookieType.SHARED_OF_FLOW, CookieType.SERVER_42_FLOW_RTT_INGRESS] }?.cookie def untouchedCookies = ([egressCookie] + sharedCookieOnSrcSw).sort() @@ -426,7 +436,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -442,8 +452,7 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) when: "Delete created rules on the transit" def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) @@ -471,7 +480,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections on all involved switches" Wrappers.wait(WAIT_OFFSET) { @@ -490,13 +499,12 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) - def rulesOnSrc = northbound.getSwitchRules(switchPair.src.dpId).flowEntries - def rulesOnDst = northbound.getSwitchRules(switchPair.dst.dpId).flowEntries + def flow = flowFactory.getRandom(switchPair) + def rulesOnSrc = switchRulesFactory.get(switchPair.src.dpId).getRules() + def rulesOnDst = switchRulesFactory.get(switchPair.dst.dpId).getRules() when: "Delete created rules on the srcSwitch" - def egressCookie = database.getFlow(flow.flowId).reversePath.cookie.value + def egressCookie = flow.retrieveDetailsFromDB().reversePath.cookie.value switchHelper.deleteSwitchRules(switchPair.src.dpId, egressCookie) then: "Rule info is moved into the 'missing' section on the srcSwitch" @@ -510,7 +518,7 @@ misconfigured" def dstSwitchValidateInfo = switchHelper.validateV1(switchPair.dst.dpId) dstSwitchValidateInfo.rules.proper.sort() == rulesOnDst*.cookie.sort() dstSwitchValidateInfo.verifyRuleSectionsAreEmpty(["missing", "excess"]) - def involvedSwitchIds = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId + def involvedSwitchIds = flow.retrieveAllEntityPaths().getInvolvedSwitches() def transitSwitches = involvedSwitchIds[1..-2].findAll { !it.description.contains("OF_12") } transitSwitches.each { switchId -> @@ -531,7 +539,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections on all involved switches" Wrappers.wait(WAIT_OFFSET) { @@ -548,15 +556,13 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair)) - def createdCookiesSrcSw = northbound.getSwitchRules(switchPair.src.dpId).flowEntries*.cookie - def createdCookiesDstSw = northbound.getSwitchRules(switchPair.dst.dpId).flowEntries*.cookie - def createdCookiesTransitSwitch = northbound.getSwitchRules(pathHelper.getInvolvedSwitches(flow.flowId)[1].dpId) - .flowEntries*.cookie + def flow = flowFactory.getRandom(switchPair) + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + def createdCookiesSrcSw = switchRulesFactory.get(switchPair.src.dpId).getRules().cookie + def createdCookiesDstSw = switchRulesFactory.get(switchPair.dst.dpId).getRules().cookie + def createdCookiesTransitSwitch = switchRulesFactory.get(involvedSwitches[1]).getRules().cookie when: "Create excess rules on switches" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId - def producer = new KafkaProducer(producerProps) //pick a meter id which is not yet used on src switch def excessMeterId = ((MIN_FLOW_METER_ID..100) - northbound.getAllMeters(switchPair.src.dpId) @@ -604,7 +610,7 @@ misconfigured" then: "Switch validation shows excess rules and store them in the 'excess' section" Wrappers.wait(WAIT_OFFSET) { - assert northbound.getSwitchRules(switchPair.src.dpId).flowEntries.size() == createdCookiesSrcSw.size() + 1 + assert switchRulesFactory.get(switchPair.src.dpId).getRules().size() == createdCookiesSrcSw.size() + 1 involvedSwitches.findAll { !it.description.contains("OF_12") }.each { switchId -> def involvedSwitchValidateInfo = switchHelper.validateV1(switchId) @@ -654,7 +660,7 @@ misconfigured" assert syncResultsMap[switchPair.src.dpId].meters.removed.meterId[0] == excessMeterId when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections on all involved switches" Wrappers.wait(WAIT_OFFSET) { @@ -675,14 +681,15 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().withBothSwitchesVxLanEnabled().random() and: "Create a flow with vxlan encapsulation" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair).tap {it.encapsulationType = FlowEncapsulationType.VXLAN}) + def flow = flowFactory.getBuilder(switchPair).withEncapsulationType(VXLAN).build().create() and: "Remove required rules and meters from switches" - def flowInfoFromDb = database.getFlow(flow.flowId) - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) - def transitSwitchIds = involvedSwitches[1..-2]*.dpId + def flowInfoFromDb = flow.retrieveDetailsFromDB() + List involvedSwitches = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List + def transitSwitchIds = involvedSwitches[1..-2].dpId def cookiesMap = involvedSwitches.collectEntries { sw -> - [sw.dpId, northbound.getSwitchRules(sw.dpId).flowEntries.findAll { + [sw.dpId, switchRulesFactory.get(sw.dpId).getRules().findAll { !(it.cookie in sw.defaultCookies) && !new Cookie(it.cookie).serviceFlag }*.cookie] } @@ -701,11 +708,11 @@ misconfigured" def swProps = northbound.getSwitchProperties(it.dpId) def switchIdInSrcOrDst = (it.dpId in [switchPair.src.dpId, switchPair.dst.dpId]) def defaultAmountOfFlowRules = 2 // ingress + egress - def amountOfServer42Rules = (switchIdInSrcOrDst && swProps.server42FlowRtt ? 1 : 0) - if (swProps.server42FlowRtt) { - if ((flow.destination.getSwitchId() == it.dpId && flow.destination.vlanId) || ( - flow.source.getSwitchId() == it.dpId && flow.source.vlanId)) - amountOfServer42Rules += 1 + def amountOfServer42Rules = 0 + if(swProps.server42FlowRtt && switchIdInSrcOrDst) { + amountOfServer42Rules +=1 + it.dpId == switchPair.src.dpId && flow.source.vlanId && ++amountOfServer42Rules + it.dpId == switchPair.dst.dpId && flow.destination.vlanId && ++amountOfServer42Rules } def rulesCount = defaultAmountOfFlowRules + amountOfServer42Rules + (switchIdInSrcOrDst ? 1 : 0) @@ -747,7 +754,7 @@ misconfigured" and: "Rules are synced correctly" // ingressRule should contain "pushVxlan" // egressRule should contain "tunnel-id" - with(northbound.getSwitchRules(switchPair.src.dpId).flowEntries) { rules -> + with(switchRulesFactory.get(switchPair.src.dpId).getRules()) { rules -> assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.instructions.applyActions.pushVxlan @@ -756,7 +763,7 @@ misconfigured" }.match.tunnelId } - with(northbound.getSwitchRules(switchPair.dst.dpId).flowEntries) { rules -> + with(switchRulesFactory.get(switchPair.dst.dpId).getRules()) { rules -> assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId @@ -766,7 +773,7 @@ misconfigured" } transitSwitchIds.each { swId -> - with(northbound.getSwitchRules(swId).flowEntries) { rules -> + with(switchRulesFactory.get(swId).getRules()) { rules -> assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId @@ -780,31 +787,32 @@ misconfigured" def "Able to validate and sync a missing 'protected path' egress rule"() { given: "A flow with protected path" def swPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() - def flow = flowHelperV2.randomFlow(swPair).tap { allocateProtectedPath = true } - flowHelperV2.addFlow(flow) - def flowInfo = northbound.getFlowPath(flow.flowId) - def allSwitches = (pathHelper.getInvolvedSwitches(pathHelper.convert(flowInfo.protectedPath)) + - pathHelper.getInvolvedSwitches(pathHelper.convert(flowInfo))).unique { it.dpId } - def rulesPerSwitch = allSwitches.collectEntries { - [it.dpId, northbound.getSwitchRules(it.dpId).flowEntries*.cookie.sort()] + def flow = flowFactory.getBuilder(swPair).withProtectedPath(true).build().create() + + def flowPathInfo = flow.retrieveAllEntityPaths() + def allSwitches = flowPathInfo.getInvolvedSwitches() + def rulesPerSwitch = allSwitches.collectEntries { swId -> + [swId, switchRulesFactory.get(swId).getRules().cookie.sort()] } expect: "Upon validation all rules are stored in the 'proper' section" - allSwitches*.dpId.each { switchId -> - def rules = northbound.validateSwitchRules(switchId) - assert rules.properRules.sort() == rulesPerSwitch[switchId] + allSwitches.each { swId -> + def rules = northbound.validateSwitchRules(swId) + assert rules.properRules.sort() == rulesPerSwitch[swId] assert rules.missingRules.empty assert rules.excessRules.empty } when: "Delete rule of protected path on the srcSwitch (egress)" - def protectedPath = northbound.getFlowPath(flow.flowId).protectedPath.forwardPath - def srcSwitchRules = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { + def protectedPath = flowPathInfo.flowPath.protectedPath.forward.nodes.nodes + + def srcSwitchRules = switchRulesFactory.get(swPair.src.dpId).getRules().findAll { !new Cookie(it.cookie).serviceFlag } def ruleToDelete = srcSwitchRules.find { - it.instructions?.applyActions?.flowOutput == protectedPath[0].inputPort.toString() && - it.match.inPort == protectedPath[0].outputPort.toString() + //specifying protectedPath[0](src.inputPort) and protectedPath[1](src.outputPort) as protected path for FORWARD direction is used + it.instructions?.applyActions?.flowOutput == protectedPath[0].portNo.toString() && + it.match.inPort == protectedPath[1].portNo.toString() }.cookie switchHelper.deleteSwitchRules(swPair.src.dpId, ruleToDelete) @@ -816,9 +824,9 @@ misconfigured" } and: "Rest switches are not affected by deleting the rule on the srcSwitch" - allSwitches.findAll { it.dpId != swPair.src.dpId }.each { sw -> - def validation = switchHelper.validateV1(sw.dpId) - assert validation.rules.proper.sort() == rulesPerSwitch[sw.dpId] + allSwitches.findAll { it != swPair.src.dpId }.each { swId -> + def validation = switchHelper.validateV1(swId) + assert validation.rules.proper.sort() == rulesPerSwitch[swId] assert validation.rules.missing.empty assert validation.rules.excess.empty } @@ -840,34 +848,34 @@ misconfigured" def swPair = switchPairs.all().random() Map initialProps = [swPair.src, swPair.dst] .collectEntries { [(it): switchHelper.getCachedSwProps(it.getDpId())] } - def flow = flowHelper.randomFlow(swPair) - flow.destination.detectConnectedDevices = new DetectConnectedDevicesPayload(true, true) - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withDetectedDevicesOnDst(true, true).build() + .create() expect: "Switch validation puts connected device lldp rule into 'proper' section" - def deviceCookie = northbound.getSwitchRules(swPair.dst.dpId).flowEntries + def deviceCookie = switchRulesFactory.get(flow.destination.switchId).getRules() .find(data.cookieSearchClosure).cookie - with(switchHelper.validateV1(flow.destination.datapath)) { + with(switchHelper.validateV1(flow.destination.switchId)) { it.rules.proper.contains(deviceCookie) } when: "Remove the connected device rule" - switchHelper.deleteSwitchRules(flow.destination.datapath, deviceCookie) + switchHelper.deleteSwitchRules(flow.destination.switchId, deviceCookie) then: "Switch validation puts connected device rule into 'missing' section" - verifyAll(switchHelper.validateV1(flow.destination.datapath)) { + verifyAll(switchHelper.validateV1(flow.destination.switchId)) { !it.rules.proper.contains(deviceCookie) it.rules.missing.contains(deviceCookie) it.rules.missingHex.contains(Long.toHexString(deviceCookie)) } when: "Synchronize the switch" - with(switchHelper.synchronize(flow.destination.datapath, false)) { + with(switchHelper.synchronize(flow.destination.switchId, false)) { it.rules.installed == [deviceCookie] } then: "Switch validation no longer shows any discrepancies in rules nor meters" - verifyAll(switchHelper.validateV1(flow.destination.datapath)) { + verifyAll(switchHelper.validateV1(flow.destination.switchId)) { it.rules.proper.contains(deviceCookie) it.rules.missing.empty it.rules.missingHex.empty @@ -878,10 +886,10 @@ misconfigured" } when: "Delete the flow" - flowHelper.deleteFlow(flow.id) + flow.delete() then: "Switch validation is empty" - verifyAll(switchHelper.validateV1(flow.destination.datapath)) { + verifyAll(switchHelper.validateV1(flow.destination.switchId)) { it.verifyRuleSectionsAreEmpty() it.verifyMeterSectionsAreEmpty() } @@ -909,7 +917,7 @@ misconfigured" } List getCookiesWithMeter(SwitchId switchId) { - return northbound.getSwitchRules(switchId).flowEntries.findAll { + return switchRulesFactory.get(switchId).getRules().findAll { !new Cookie(it.cookie).serviceFlag && it.instructions.goToMeter }*.cookie.sort() } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationV2Spec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationV2Spec.groovy index 16b402af484..09160bb5fc6 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationV2Spec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchValidationV2Spec.groovy @@ -6,6 +6,7 @@ import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES import static org.openkilda.functionaltests.extension.tags.Tag.TOPOLOGY_DEPENDENT import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL import static org.openkilda.functionaltests.helpers.SwitchHelper.isDefaultMeter +import static org.openkilda.functionaltests.helpers.SwitchHelper.isServer42Supported import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.SYNCHRONIZE_SWITCH import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID import static org.openkilda.model.MeterId.MIN_FLOW_METER_ID @@ -14,27 +15,25 @@ import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME import static org.openkilda.testing.Constants.WAIT_OFFSET import static org.openkilda.testing.tools.KafkaUtils.buildMessage -import com.google.common.collect.Sets -import org.apache.kafka.clients.producer.KafkaProducer -import org.apache.kafka.clients.producer.ProducerRecord import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.DockerHelper -import org.openkilda.functionaltests.helpers.PathHelper import org.openkilda.functionaltests.helpers.SwitchHelper import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.helpers.model.ContainerName +import org.openkilda.functionaltests.helpers.model.FlowDirection +import org.openkilda.functionaltests.helpers.model.FlowEncapsulationType +import org.openkilda.functionaltests.helpers.model.SwitchRulesFactory import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.model.FlowDirectionType import org.openkilda.messaging.payload.flow.DetectConnectedDevicesPayload -import org.openkilda.model.FlowEncapsulationType import org.openkilda.model.MeterId import org.openkilda.model.SwitchFeature import org.openkilda.model.SwitchId import org.openkilda.model.cookie.Cookie import org.openkilda.model.cookie.CookieBase.CookieType -import org.openkilda.northbound.dto.v1.switches.SwitchPropertiesDto import org.openkilda.rulemanager.FlowSpeakerData import org.openkilda.rulemanager.Instructions import org.openkilda.rulemanager.MeterFlag @@ -42,6 +41,10 @@ import org.openkilda.rulemanager.MeterSpeakerData import org.openkilda.rulemanager.OfTable import org.openkilda.rulemanager.OfVersion import org.openkilda.testing.model.topology.TopologyDefinition.Switch + +import com.google.common.collect.Sets +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.clients.producer.ProducerRecord import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value @@ -72,8 +75,15 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { @Value('${docker.host}') @Shared String dockerHost - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory + @Autowired + @Shared + SwitchRulesFactory switchRulesFactory def setupSpec() { deleteAnyFlowsLeftoversIssue5480() @@ -82,7 +92,7 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { def "Able to validate and sync a terminating switch with proper rules and meters"() { given: "A flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) expect: "Validate switch for src and dst contains expected meters data in 'proper' section" def srcSwitchValidateInfo = switchHelper.validate(srcSwitch.dpId) @@ -138,7 +148,7 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Switch validate request returns only default rules information" Wrappers.wait(WAIT_OFFSET) { @@ -155,13 +165,14 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().random() when: "Create an intermediate-switch flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) - def flowPath = PathHelper.convert(northbound.getFlowPath(flow.flowId)) + def flow = flowFactory.getRandom(switchPair) + def flowPathInfo = flow.retrieveAllEntityPaths() + def involvedSwitches = flowPathInfo.flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List then: "The intermediate switch does not contain any information about meter" - def switchToValidate = flowPath[1..-2].find { !it.switchId.description.contains("OF_12") } - def intermediateSwitchValidateInfo = switchHelper.validate(switchToValidate.switchId) + def switchToValidate = involvedSwitches[1..-2].find { !it.dpId.description.contains("OF_12") } + def intermediateSwitchValidateInfo = switchHelper.validate(switchToValidate.dpId) intermediateSwitchValidateInfo.verifyMeterSectionsAreEmpty() and: "Rules are stored in the 'proper' section on the transit switch" @@ -169,7 +180,7 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { intermediateSwitchValidateInfo.verifyRuleSectionsAreEmpty(["missing", "excess"]) and: "Able to perform switch sync which does nothing" - verifyAll(switchHelper.synchronize(switchToValidate.switchId, true)) { + verifyAll(switchHelper.synchronize(switchToValidate.dpId, true)) { it.rules.removed.empty it.rules.installed.empty it.meters.removed.empty @@ -177,10 +188,9 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" - def involvedSwitches = pathHelper.getInvolvedSwitches(flowPath) involvedSwitches.each { sw -> def switchValidateInfo = switchHelper.validate(sw.dpId) switchValidateInfo.verifyRuleSectionsAreEmpty() @@ -193,7 +203,7 @@ class SwitchValidationV2Spec extends HealthCheckSpecification { def "Able to validate switch with 'misconfigured' meters"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) def srcSwitchCreatedMeterIds = getCreatedMeterIds(srcSwitch.dpId) def dstSwitchCreatedMeterIds = getCreatedMeterIds(dstSwitch.dpId) @@ -204,7 +214,7 @@ misconfigured" cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(dstSwitch.dpId)}) /** at this point meter is set for given flow. Now update flow bandwidth directly via DB, it is done just for moving meter from the 'proper' section into the 'misconfigured'*/ - database.updateFlowBandwidth(flow.flowId, newBandwidth) + flow.updateFlowBandwidthInDB(newBandwidth) //at this point existing meters do not correspond with the flow and: "Validate src and dst switches" @@ -258,7 +268,7 @@ misconfigured" } and: "Flow validation shows discrepancies" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() def totalSwitchRules = 0 def totalSwitchMeters = 0 involvedSwitches.each { swId -> @@ -266,9 +276,10 @@ misconfigured" totalSwitchMeters += northbound.getAllMeters(swId).meterEntries.size() } def expectedRulesCount = [ - flowHelperV2.getFlowRulesCountBySwitch(flow, true, involvedSwitches.size()), - flowHelperV2.getFlowRulesCountBySwitch(flow, false, involvedSwitches.size())] - def flowValidateResponse = northbound.validateFlow(flow.flowId) + flow.getFlowRulesCountBySwitch(FlowDirection.FORWARD, involvedSwitches.size(), isServer42Supported(srcSwitch.dpId)), + flow.getFlowRulesCountBySwitch(FlowDirection.REVERSE, involvedSwitches.size(), isServer42Supported(dstSwitch.dpId))] + + def flowValidateResponse = flow.validate() flowValidateResponse.eachWithIndex { direction, i -> assert direction.discrepancies.size() == 2 @@ -296,7 +307,7 @@ misconfigured" } when: "Restore correct bandwidth via DB" - database.updateFlowBandwidth(flow.flowId, flow.maximumBandwidth) + flow.updateFlowBandwidthInDB(flow.maximumBandwidth) then: "Misconfigured meters are moved into the 'proper' section" def srcSwitchValidateInfoRestored = switchHelper.validate(srcSwitch.dpId) @@ -308,13 +319,10 @@ misconfigured" dstSwitchValidateInfoRestored.verifyMeterSectionsAreEmpty(["missing", "misconfigured", "excess"]) and: "Flow validation shows no discrepancies" - northbound.validateFlow(flow.flowId).each { direction -> - assert direction.discrepancies.empty - assert direction.asExpected - } + flow.validateAndCollectDiscrepancies().isEmpty() when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -328,18 +336,18 @@ misconfigured" def "Able to validate and sync a switch with missing ingress rule + meter"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) def srcSwitchCreatedMeterIds = getCreatedMeterIds(srcSwitch.dpId) def dstSwitchCreatedMeterIds = getCreatedMeterIds(dstSwitch.dpId) and: "Remove created meter on the srcSwitch" def forwardCookies = getCookiesWithMeter(srcSwitch.dpId) def reverseCookies = getCookiesWithMeter(dstSwitch.dpId) - def sharedCookieOnSrcSw = northbound.getSwitchRules(srcSwitch.dpId).flowEntries.findAll { + def sharedCookieOnSrcSw = switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { new Cookie(it.cookie).getType() in [CookieType.SHARED_OF_FLOW, CookieType.SERVER_42_FLOW_RTT_INGRESS] }?.cookie def untouchedCookiesOnSrcSw = (reverseCookies + sharedCookieOnSrcSw).sort() - def cookiesOnDstSw = northbound.getSwitchRules(dstSwitch.dpId).flowEntries*.cookie + def cookiesOnDstSw = switchRulesFactory.get(dstSwitch.dpId).getRules()*.cookie cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(srcSwitch.dpId)}) cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(dstSwitch.dpId)}) northbound.deleteMeter(srcSwitch.dpId, srcSwitchCreatedMeterIds[0]) @@ -397,7 +405,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -411,19 +419,19 @@ misconfigured" def "Able to validate and sync a switch with missing ingress rule (unmetered)"() { when: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.randomFlow(srcSwitch, dstSwitch) - flow.maximumBandwidth = 0 - flow.ignoreBandwidth = true - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(srcSwitch, dstSwitch) + .withBandwidth(0) + .withIgnoreBandwidth(true).build() + .create() and: "Remove ingress rule on the srcSwitch" - def ingressCookie = database.getFlow(flow.flowId).forwardPath.cookie.value - def egressCookie = database.getFlow(flow.flowId).reversePath.cookie.value - cleanupManager.addAction(SYNCHRONIZE_SWITCH, {switchHelper.synchronize(srcSwitch.dpId)}) + def flowDBInfo = flow.retrieveDetailsFromDB() + def ingressCookie = flowDBInfo.forwardPath.cookie.value + def egressCookie = flowDBInfo.reversePath.cookie.value switchHelper.deleteSwitchRules(srcSwitch.dpId, ingressCookie) then: "Ingress rule is moved into the 'missing' section on the srcSwitch" - def sharedCookieOnSrcSw = northbound.getSwitchRules(srcSwitch.dpId).flowEntries.findAll { + def sharedCookieOnSrcSw = switchRulesFactory.get(srcSwitch.dpId).getRules().findAll { new Cookie(it.cookie).getType() in [CookieType.SHARED_OF_FLOW, CookieType.SERVER_42_FLOW_RTT_INGRESS] }?.cookie def untouchedCookies = ([egressCookie] + sharedCookieOnSrcSw).sort() @@ -450,7 +458,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -466,11 +474,11 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) when: "Delete created rules on the transit" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) + List involvedSwitches = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List def transitSw = involvedSwitches[1] switchHelper.deleteSwitchRules(transitSw.dpId, DeleteRulesAction.IGNORE_DEFAULTS) @@ -492,7 +500,8 @@ misconfigured" it.verifyRuleSectionsAreEmpty(["missing", "excess"]) } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() + then: "Check that the switch validate request returns empty sections on all involved switches" Wrappers.wait(WAIT_OFFSET) { involvedSwitches.each { sw -> @@ -510,13 +519,13 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) + def rulesOnSrc = northbound.getSwitchRules(switchPair.src.dpId).flowEntries def rulesOnDst = northbound.getSwitchRules(switchPair.dst.dpId).flowEntries when: "Delete created rules on the srcSwitch" - def egressCookie = database.getFlow(flow.flowId).reversePath.cookie.value + def egressCookie = flow.retrieveDetailsFromDB().reversePath.cookie.value switchHelper.deleteSwitchRules(switchPair.src.dpId, egressCookie) then: "Rule info is moved into the 'missing' section on the srcSwitch" @@ -530,7 +539,7 @@ misconfigured" def dstSwitchValidateInfo = switchHelper.validate(switchPair.dst.dpId) dstSwitchValidateInfo.rules.proper*.cookie.sort() == rulesOnDst*.cookie.sort() dstSwitchValidateInfo.verifyRuleSectionsAreEmpty(["missing", "excess"]) - def involvedSwitchIds = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId + def involvedSwitchIds = flow.retrieveAllEntityPaths().getInvolvedSwitches() def transitSwitches = involvedSwitchIds[1..-2].findAll { !it.description.contains("OF_12") } transitSwitches.each { switchId -> def transitSwitchValidateInfo = switchHelper.validate(switchId) @@ -550,7 +559,7 @@ misconfigured" } when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections on all involved switches" Wrappers.wait(WAIT_OFFSET) { @@ -567,14 +576,13 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().random() and: "Create an intermediate-switch flow" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair)) - def createdCookiesSrcSw = northbound.getSwitchRules(switchPair.src.dpId).flowEntries*.cookie - def createdCookiesDstSw = northbound.getSwitchRules(switchPair.dst.dpId).flowEntries*.cookie - def createdCookiesTransitSwitch = northbound.getSwitchRules(pathHelper.getInvolvedSwitches(flow.flowId)[1].dpId) - .flowEntries*.cookie + def flow = flowFactory.getRandom(switchPair) + def involvedSwitches = flow.retrieveAllEntityPaths().getInvolvedSwitches() + def createdCookiesSrcSw = switchRulesFactory.get(switchPair.src.dpId).getRules().cookie + def createdCookiesDstSw =switchRulesFactory.get(switchPair.dst.dpId).getRules().cookie + def createdCookiesTransitSwitch = switchRulesFactory.get(involvedSwitches[1]).getRules().cookie when: "Create excess rules on switches" - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId)*.dpId def producer = new KafkaProducer(producerProps) //pick a meter id which is not yet used on src switch def excessMeterId = ((MIN_FLOW_METER_ID..100) - northbound.getAllMeters(switchPair.src.dpId) @@ -621,7 +629,7 @@ misconfigured" then: "Switch validation shows excess rules and store them in the 'excess' section" Wrappers.wait(WAIT_OFFSET) { - assert northbound.getSwitchRules(switchPair.src.dpId).flowEntries.size() == createdCookiesSrcSw.size() + 1 + assert switchRulesFactory.get(switchPair.src.dpId).getRules().size() == createdCookiesSrcSw.size() + 1 involvedSwitches.findAll { !it.description.contains("OF_12") }.each { switchId -> def involvedSwitchValidateInfo = switchHelper.validate(switchId) if (switchId == switchPair.src.dpId) { @@ -666,7 +674,7 @@ misconfigured" assert syncResultsMap[switchPair.src.dpId].meters.removed.meterId[0] == excessMeterId when: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() then: "Check that the switch validate request returns empty sections on all involved switches" Wrappers.wait(WAIT_OFFSET) { @@ -687,14 +695,17 @@ misconfigured" def switchPair = switchPairs.all().nonNeighbouring().withBothSwitchesVxLanEnabled().random() and: "Create a flow with vxlan encapsulation" - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(switchPair).tap { it.encapsulationType = FlowEncapsulationType.VXLAN}) + def flow = flowFactory.getBuilder(switchPair) + .withEncapsulationType(FlowEncapsulationType.VXLAN).build() + .create() and: "Remove required rules and meters from switches" - def flowInfoFromDb = database.getFlow(flow.flowId) - def involvedSwitches = pathHelper.getInvolvedSwitches(flow.flowId) + def flowInfoFromDb = flow.retrieveDetailsFromDB() + List involvedSwitches = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls() + .collect { [it.srcSwitch, it.dstSwitch] }.flatten().unique() as List def transitSwitchIds = involvedSwitches[1..-2]*.dpId def cookiesMap = involvedSwitches.collectEntries { sw -> - [sw.dpId, northbound.getSwitchRules(sw.dpId).flowEntries.findAll { + [sw.dpId, switchRulesFactory.get(sw.dpId).getRules().findAll { !(it.cookie in sw.defaultCookies) && !new Cookie(it.cookie).serviceFlag }*.cookie] } @@ -754,7 +765,7 @@ misconfigured" and: "Rules are synced correctly" // ingressRule should contain "pushVxlan" // egressRule should contain "tunnel-id" - with(northbound.getSwitchRules(switchPair.src.dpId).flowEntries) { rules -> + with(switchRulesFactory.get(switchPair.src.dpId).getRules()) { rules -> assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.instructions.applyActions.pushVxlan @@ -762,7 +773,7 @@ misconfigured" it.cookie == flowInfoFromDb.reversePath.cookie.value }.match.tunnelId } - with(northbound.getSwitchRules(switchPair.dst.dpId).flowEntries) { rules -> + with(switchRulesFactory.get(switchPair.dst.dpId).getRules()) { rules -> assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId @@ -771,7 +782,7 @@ misconfigured" }.instructions.applyActions.pushVxlan } transitSwitchIds.each { swId -> - with(northbound.getSwitchRules(swId).flowEntries) { rules -> + with(switchRulesFactory.get(swId).getRules()) { rules -> assert rules.find { it.cookie == flowInfoFromDb.forwardPath.cookie.value }.match.tunnelId @@ -785,29 +796,31 @@ misconfigured" def "Able to validate and sync a missing 'protected path' egress rule"() { given: "A flow with protected path" def swPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() - def flow = flowHelperV2.randomFlow(swPair).tap { allocateProtectedPath = true } - flowHelperV2.addFlow(flow) - def flowInfo = northbound.getFlowPath(flow.flowId) - def allSwitches = (pathHelper.getInvolvedSwitches(pathHelper.convert(flowInfo.protectedPath)) + - pathHelper.getInvolvedSwitches(pathHelper.convert(flowInfo))).unique { it.dpId } + def flow = flowFactory.getBuilder(swPair) + .withProtectedPath(true).build() + .create() + + def flowPathInfo = flow.retrieveAllEntityPaths() + def allSwitches = flowPathInfo.getInvolvedSwitches() def rulesPerSwitch = allSwitches.collectEntries { - [it.dpId, northbound.getSwitchRules(it.dpId).flowEntries*.cookie.sort()] + [it, switchRulesFactory.get(it).getRules().cookie.sort()] } expect: "Upon validation all rules are stored in the 'proper' section" - allSwitches*.dpId.each { switchId -> + allSwitches.each { switchId -> def rules = northbound.validateSwitchRules(switchId) assert rules.properRules.sort() == rulesPerSwitch[switchId] assert rules.missingRules.empty assert rules.excessRules.empty } when: "Delete rule of protected path on the srcSwitch (egress)" - def protectedPath = northbound.getFlowPath(flow.flowId).protectedPath.forwardPath + def protectedPath = flowPathInfo.flowPath.protectedPath.forward.nodes.nodes def srcSwitchRules = northbound.getSwitchRules(swPair.src.dpId).flowEntries.findAll { !new Cookie(it.cookie).serviceFlag } def ruleToDelete = srcSwitchRules.find { - it.instructions?.applyActions?.flowOutput == protectedPath[0].inputPort.toString() && - it.match.inPort == protectedPath[0].outputPort.toString() + //specifying protectedPath[0](src.inputPort) and protectedPath[1](src.outputPort) as protected path for FORWARD direction is used + it.instructions?.applyActions?.flowOutput == protectedPath[0].portNo.toString() && + it.match.inPort == protectedPath[1].portNo.toString() }.cookie switchHelper.deleteSwitchRules(swPair.src.dpId, ruleToDelete) then: "Deleted rule is moved to the 'missing' section on the srcSwitch" @@ -817,9 +830,9 @@ misconfigured" it.rules.excess.empty } and: "Rest switches are not affected by deleting the rule on the srcSwitch" - allSwitches.findAll { it.dpId != swPair.src.dpId }.each { sw -> - def validation = switchHelper.validate(sw.dpId) - assert validation.rules.proper*.cookie.sort() == rulesPerSwitch[sw.dpId] + allSwitches.findAll { it != swPair.src.dpId }.each { switchId -> + def validation = switchHelper.validate(switchId) + assert validation.rules.proper*.cookie.sort() == rulesPerSwitch[switchId] assert validation.rules.missing.empty assert validation.rules.excess.empty } @@ -837,34 +850,33 @@ misconfigured" def "Able to validate and sync a missing 'connected device' #data.descr rule"() { given: "A flow with enabled connected devices" def swPair = switchPairs.all().random() - def flow = flowHelper.randomFlow(swPair) - flow.destination.detectConnectedDevices = new DetectConnectedDevicesPayload(true, true) - flowHelper.addFlow(flow) + def flow = flowFactory.getBuilder(swPair) + .withDetectedDevicesOnDst(true, true).build() + .create() expect: "Switch validation puts connected device lldp rule into 'proper' section" - def deviceCookie = northbound.getSwitchRules(swPair.dst.dpId).flowEntries - .find(data.cookieSearchClosure).cookie - with(switchHelper.validate(flow.destination.datapath)) { + def deviceCookie = switchRulesFactory.get(swPair.dst.dpId).getRules().find(data.cookieSearchClosure).cookie + with(switchHelper.validate(flow.destination.switchId)) { it.rules.proper*.cookie.contains(deviceCookie) } when: "Remove the connected device rule" - switchHelper.deleteSwitchRules(flow.destination.datapath, deviceCookie) + switchHelper.deleteSwitchRules(flow.destination.switchId, deviceCookie) then: "Switch validation puts connected device rule into 'missing' section" - verifyAll(switchHelper.validate(flow.destination.datapath)) { + verifyAll(switchHelper.validate(flow.destination.switchId)) { !it.rules.proper*.cookie.contains(deviceCookie) it.rules.missing*.cookie.contains(deviceCookie) it.rules.missing*.cookieHex.contains(Long.toHexString(deviceCookie).toUpperCase()) } when: "Synchronize the switch" - with(switchHelper.synchronize(flow.destination.datapath, false)) { + with(switchHelper.synchronize(flow.destination.switchId, false)) { it.rules.installed == [deviceCookie] } then: "Switch validation no longer shows any discrepancies in rules nor meters" - verifyAll(switchHelper.validate(flow.destination.datapath)) { + verifyAll(switchHelper.validate(flow.destination.switchId)) { it.rules.proper*.cookie.contains(deviceCookie) it.rules.missing.empty it.rules.excess.empty @@ -873,10 +885,10 @@ misconfigured" } when: "Delete the flow" - flowHelper.deleteFlow(flow.id) + flow.delete() then: "Switch validation is empty" - verifyAll(switchHelper.validate(flow.destination.datapath)) { + verifyAll(switchHelper.validate(flow.destination.switchId)) { it.verifyRuleSectionsAreEmpty() it.verifyMeterSectionsAreEmpty() } @@ -901,7 +913,7 @@ misconfigured" def "Able to filter results using request query, and asExpected field verification"() { given: "Create a flow" def (Switch srcSwitch, Switch dstSwitch) = topology.activeSwitches.findAll { it.ofVersion != "OF_12" } - def flow = flowHelperV2.addFlow(flowHelperV2.randomFlow(srcSwitch, dstSwitch)) + def flow = flowFactory.getRandom(srcSwitch, dstSwitch) when: "Perform result filtering" def srcSwitchValidateInfo = switchHelper.validate(srcSwitch.dpId, include, null) @@ -914,7 +926,7 @@ misconfigured" dstSwitchValidateInfo.verifySectionsAsExpectedFields(sectionsToVerifyPresence) then: "Delete the flow" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() and: "Check that the switch validate request returns empty sections" Wrappers.wait(WAIT_OFFSET) { @@ -968,7 +980,7 @@ misconfigured" } List getCookiesWithMeter(SwitchId switchId) { - return northbound.getSwitchRules(switchId).flowEntries.findAll { + return switchRulesFactory.get(switchId).getRules().findAll { !new Cookie(it.cookie).serviceFlag && it.instructions.goToMeter }*.cookie.sort() } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy index e0356c9464f..5d578ba88c8 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesFlowsV2Spec.groovy @@ -3,21 +3,20 @@ package org.openkilda.functionaltests.spec.switches import static org.junit.jupiter.api.Assumptions.assumeTrue import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.helpers.FlowHelperV2.randomVlan import static org.openkilda.functionaltests.model.cleanup.CleanupAfter.CLASS import static org.openkilda.messaging.payload.flow.FlowState.UP -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags -import org.openkilda.functionaltests.helpers.Wrappers import org.openkilda.functionaltests.helpers.model.SwitchPair import org.openkilda.functionaltests.helpers.model.SwitchTriplet import org.openkilda.functionaltests.helpers.model.YFlowExtended import org.openkilda.functionaltests.helpers.model.YFlowFactory import org.openkilda.model.FlowPathDirection -import org.openkilda.northbound.dto.v2.flows.FlowEndpointV2 -import org.openkilda.northbound.dto.v2.flows.FlowMirrorPointPayload import org.openkilda.testing.model.topology.TopologyDefinition.Switch +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.springframework.beans.factory.annotation.Autowired import spock.lang.Narrative @@ -33,6 +32,8 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { @Shared String yFlowSubFlow2Id @Shared + FlowExtended flow + @Shared String flowId @Shared SwitchTriplet switchTriplet @@ -42,6 +43,9 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { @Autowired YFlowFactory yFlowFactory @Shared + @Autowired + FlowFactory flowFactory + @Shared Switch switchFlowGoesThrough @Shared Switch switchProtectedPathGoesThrough @@ -63,14 +67,13 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { switchPair = switchPairs.all() .includeSwitch(switchTriplet.getShared()) .includeSwitch(switchTriplet.getEp1()).random() - def flowDefinition = flowHelperV2.randomFlow(switchPair, false).tap { allocateProtectedPath = true } - flowId = flowHelperV2.addFlow(flowDefinition, UP, CLASS).getFlowId() - switchFlowGoesThrough = pathHelper.getInvolvedSwitches(flowId).find { - ![switchPair.getSrc(), switchPair.getDst()].contains(it) - } - switchProtectedPathGoesThrough = pathHelper.getInvolvedSwitchesForProtectedPath(flowId).find { - ![switchPair.getSrc(), switchPair.getDst()].contains(it) - } + flow = flowFactory.getBuilder(switchPair, false) + .withProtectedPath(true) + .build().create(UP, CLASS) + flowId = flow.flowId + def flowPathInfo = flow.retrieveAllEntityPaths() + switchFlowGoesThrough = topology.activeSwitches.find { it.dpId == flowPathInfo.flowPath.path.forward.transitInvolvedSwitches.first() } + switchProtectedPathGoesThrough = topology.activeSwitches.find { it.dpId == flowPathInfo.flowPath.protectedPath.forward.transitInvolvedSwitches.first() } yFlow = yFlowFactory.getRandom(switchTriplet, true, [], CLASS) yFlowSubFlow1Id = yFlow.subFlows.first().flowId @@ -105,8 +108,8 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { where: switchRole | switchUnderTest "flows through" | switchFlowGoesThrough - "starts from" | switchPair.getSrc() - "ends on" | switchPair.getDst() + "starts from" | switchPair.src + "ends on" | switchPair.dst } def "System allows to get a flow which protected path that goes through switch"() { @@ -129,15 +132,10 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { - topology.getBusyPortsForSwitch(switchUnderTest)).first() when: "Create mirror point on switch with sink pointing to free port" - def mirrorEndpoint = FlowMirrorPointPayload.builder() - .mirrorPointId(flowHelperV2.generateFlowId()) - .mirrorPointDirection(FlowPathDirection.REVERSE.toString().toLowerCase()) - .mirrorPointSwitchId(switchUnderTest.getDpId()) - .sinkEndpoint(FlowEndpointV2.builder().switchId(switchUnderTest.getDpId()).portNumber(freePort) - .vlanId(flowHelperV2.randomVlan()) - .build()) - .build() - switchHelper.addMirrorPoint(flowId, mirrorEndpoint) + def mirrorEndpoint = flow.createMirrorPoint( + switchUnderTest.getDpId(), freePort, randomVlan(), + FlowPathDirection.REVERSE + ) then: "Mirror sink endpoint port is not listed in the ports list" switchHelper.getFlowsV2(switchUnderTest, [freePort]).getFlowsByPort().isEmpty() @@ -159,8 +157,7 @@ class SwitchesFlowsV2Spec extends HealthCheckSpecification { @Tags([LOW_PRIORITY]) def "One-switch Y-Flow subflows are listed in flows list"() { given: "One switch Y-Flow" - def swT = topologyHelper.getSwitchTriplet(switchProtectedPathGoesThrough.dpId, - switchProtectedPathGoesThrough.dpId, switchProtectedPathGoesThrough.dpId) + def swT = topologyHelper.getSingleSwitchTriplet(switchProtectedPathGoesThrough.dpId) def yFlow = yFlowFactory.getRandom(swT, false) when: "Request flows on switch" diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy index 5c4cb07e8c9..e0ccb6eebe8 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/switches/SwitchesSpec.groovy @@ -1,37 +1,44 @@ package org.openkilda.functionaltests.spec.switches +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES +import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES +import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID +import static org.openkilda.testing.Constants.WAIT_OFFSET +import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.SwitchNotFoundExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType import org.openkilda.functionaltests.model.cleanup.CleanupManager +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.command.switches.DeleteRulesAction import org.openkilda.messaging.command.switches.InstallRulesAction import org.openkilda.messaging.info.event.SwitchChangeType import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.northbound.dto.v2.switches.SwitchPatchDto + import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Shared -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES -import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_SWITCH_PROPERTIES -import static org.openkilda.testing.Constants.NON_EXISTENT_SWITCH_ID -import static org.openkilda.testing.Constants.WAIT_OFFSET -import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW class SwitchesSpec extends HealthCheckSpecification { @Shared SwitchNotFoundExpectedError switchNotFoundExpectedError = new SwitchNotFoundExpectedError( "Switch $NON_EXISTENT_SWITCH_ID not found", ~/Switch $NON_EXISTENT_SWITCH_ID not found/) - @Autowired @Shared + @Autowired + @Shared CleanupManager cleanupManager + @Autowired + @Shared + FlowFactory flowFactory def "System is able to return a list of all switches"() { expect: "System can return list of all switches" @@ -72,28 +79,25 @@ class SwitchesSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().withAtLeastNNonOverlappingPaths(2).random() and: "A protected flow" - def protectedFlow = flowHelperV2.randomFlow(switchPair) - protectedFlow.allocateProtectedPath = true - flowHelperV2.addFlow(protectedFlow) + def protectedFlow = flowFactory.getBuilder(switchPair) + .withProtectedPath(true) + .build().create() and: "A single switch flow" def allowedPorts = topology.getAllowedPortsForSwitch(switchPair.src).findAll { it != protectedFlow.source.portNumber } def r = new Random() - def singleFlow = flowHelperV2.singleSwitchFlow(switchPair.src) - singleFlow.source.portNumber = allowedPorts[r.nextInt(allowedPorts.size())] - singleFlow.destination.portNumber = allowedPorts[r.nextInt(allowedPorts.size())] - flowHelperV2.addFlow(singleFlow) + def singleFlow = flowFactory.getBuilder(switchPair.src, switchPair.src) + .withSourcePort(allowedPorts[r.nextInt(allowedPorts.size())]) + .withDestinationPort(allowedPorts[r.nextInt(allowedPorts.size())]) + .build().create() when: "Get all flows going through the involved switches" - def flowPathInfo = northbound.getFlowPath(protectedFlow.flowId) - def mainPath = pathHelper.convert(flowPathInfo) - def protectedPath = pathHelper.convert(flowPathInfo.protectedPath) - - def mainSwitches = pathHelper.getInvolvedSwitches(mainPath)*.dpId - def protectedSwitches = pathHelper.getInvolvedSwitches(protectedPath)*.dpId - def involvedSwitchIds = (mainSwitches + protectedSwitches).unique() + def flowPathInfo = protectedFlow.retrieveAllEntityPaths() + def mainPath = flowPathInfo.getPathNodes(Direction.FORWARD, false) + def protectedPath = flowPathInfo.getPathNodes(Direction.FORWARD, true) + def involvedSwitchIds = flowPathInfo.getInvolvedSwitches() then: "The created flows are in the response list from the src switch" def switchFlowsResponseSrcSwitch = northbound.getSwitchFlows(switchPair.src.dpId) @@ -136,10 +140,10 @@ class SwitchesSpec extends HealthCheckSpecification { getSwitchFlowsResponse4[0].id == protectedFlow.flowId when: "Create default flow on the same switches" - def defaultFlow = flowHelperV2.randomFlow(switchPair) - defaultFlow.source.vlanId = 0 - defaultFlow.destination.vlanId = 0 - flowHelperV2.addFlow(defaultFlow) + def defaultFlow = flowFactory.getBuilder(switchPair) + .withSourceVlan(0) + .withDestinationVlan(0) + .build().create() and: "Get all flows going through the src switch" def getSwitchFlowsResponse5 = northbound.getSwitchFlows(switchPair.src.dpId) @@ -154,11 +158,12 @@ class SwitchesSpec extends HealthCheckSpecification { and: "Get all flows going through the src switch" Wrappers.wait(WAIT_OFFSET * 2) { - assert northboundV2.getFlowStatus(protectedFlow.flowId).status == FlowState.DOWN - assert flowHelper.getLatestHistoryEntry(protectedFlow.flowId).payload.find { it.action == REROUTE_FAIL } - assert northboundV2.getFlowStatus(defaultFlow.flowId).status == FlowState.DOWN - def defaultFlowHistory = flowHelper.getHistoryEntriesByAction(defaultFlow.flowId, REROUTE_ACTION) - assert defaultFlowHistory.last().payload.find { it.action == REROUTE_FAIL } + assert protectedFlow.retrieveFlowStatus().status == FlowState.DOWN + assert protectedFlow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE_FAILED).last() + .payload.find { it.action == FlowActionType.REROUTE_FAILED.payloadLastAction } + assert defaultFlow.retrieveFlowStatus().status == FlowState.DOWN + assert defaultFlow.retrieveFlowHistory().getEntriesByType(FlowActionType.REROUTE_FAILED).last() + .payload.find { it.action == FlowActionType.REROUTE_FAILED.payloadLastAction } } def getSwitchFlowsResponse6 = northbound.getSwitchFlows(switchPair.src.dpId) @@ -180,12 +185,10 @@ class SwitchesSpec extends HealthCheckSpecification { def switchPair = switchPairs.all().nonNeighbouring().random() and: "A simple flow" - def simpleFlow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(simpleFlow) + def simpleFlow = flowFactory.getRandom(switchPair) and: "A single switch flow" - def singleFlow = flowHelperV2.singleSwitchFlow(switchPair.src) - flowHelperV2.addFlow(singleFlow) + def singleFlow = flowFactory.getRandom(switchPair.src, switchPair.src) when: "Deactivate the src switch" def switchToDisconnect = topology.switches.find { it.dpId == switchPair.src.dpId } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesSpec.groovy index 84d1d788e30..14950129c3f 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesSpec.groovy @@ -5,6 +5,7 @@ import org.openkilda.functionaltests.error.flow.FlowForbiddenToCreateExpectedErr import org.openkilda.functionaltests.error.flow.FlowForbiddenToDeleteExpectedError import org.openkilda.functionaltests.error.flow.FlowForbiddenToUpdateExpectedError import org.openkilda.functionaltests.extension.tags.Tags +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.springframework.beans.factory.annotation.Autowired @@ -27,73 +28,75 @@ BFD toggle is tested in BfdSpec*/ @Tags([SMOKE, LOW_PRIORITY]) @Isolated class FeatureTogglesSpec extends HealthCheckSpecification { + + @Autowired + @Shared + FlowFactory flowFactory + def "System forbids creating new flows when 'create_flow' toggle is set to false"() { given: "Existing flow" - def flowRequest = flowHelper.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1]) - def flow = flowHelper.addFlow(flowRequest) + def flow = flowFactory.getRandomV1(topology.activeSwitches[0], topology.activeSwitches[1]) when: "Set create_flow toggle to false" featureToggles.createFlowEnabled(false) and: "Try to create a new flow" - flowHelper.addFlow(flowHelper.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1])) + flowFactory.getBuilder(topology.activeSwitches[0], topology.activeSwitches[1]).build().sendCreateRequestV1() then: "Error response is returned, explaining that feature toggle doesn't allow such operation" def e = thrown(HttpClientErrorException) new FlowForbiddenToCreateExpectedError(~/Flow create feature is disabled/).matches(e) and: "Update of previously existing flow is still possible" - flowHelper.updateFlow(flow.id, flowRequest.tap { it.description = it.description + "updated" }) + flow.updateV1(flow.tap { it.description = it.description + "updated" }) and: "Delete of previously existing flow is still possible" - flowHelper.deleteFlow(flow.id) + flow.deleteV1() } def "System forbids updating flows when 'update_flow' toggle is set to false"() { given: "Existing flow" - def flowRequest = flowHelper.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1]) - flowHelper.addFlow(flowRequest) + def flow = flowFactory.getRandomV1(topology.activeSwitches[0], topology.activeSwitches[1]) when: "Set update_flow toggle to false" featureToggles.updateFlowEnabled(false) and: "Try to update the flow" - northbound.updateFlow(flowRequest.id, flowRequest.tap { it.description = it.description + "updated" }) + flow.updateV1(flow.tap { it.description = it.description + "updated" }) then: "Error response is returned, explaining that feature toggle doesn't allow such operation" def e = thrown(HttpClientErrorException) new FlowForbiddenToUpdateExpectedError(~/Flow update feature is disabled/).matches(e) and: "Creating new flow is still possible" - flowHelper.addFlow(flowHelper.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1])) + flowFactory.getRandomV1(topology.activeSwitches[0], topology.activeSwitches[1]) } def "System forbids deleting flows when 'delete_flow' toggle is set to false"() { given: "Existing flow" - def flowRequest = flowHelper.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1]) - def flow = flowHelper.addFlow(flowRequest) + def flow = flowFactory.getRandomV1(topology.activeSwitches[0], topology.activeSwitches[1]) when: "Set delete_flow toggle to false" featureToggles.deleteFlowEnabled(false) and: "Try to delete the flow" - northbound.deleteFlow(flowRequest.id) + flow.sendDeleteRequestV1() then: "Error response is returned, explaining that feature toggle doesn't allow such operation" def e = thrown(HttpClientErrorException) new FlowForbiddenToDeleteExpectedError(~/Flow delete feature is disabled/).matches(e) and: "Creating new flow is still possible" - def newFlow = flowHelper.addFlow(flowHelper.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1])) + def newFlow = flowFactory.getRandomV1(topology.activeSwitches[0], topology.activeSwitches[1]) and: "Updating of flow is still possible" - flowHelper.updateFlow(flowRequest.id, flowRequest.tap { it.description = it.description + "updated" }) + flow.updateV1(flow.tap { it.description = it.description + "updated" }) when: "Set delete_flow toggle back to true" featureToggles.deleteFlowEnabled(true) then: "Able to delete flows" - flowHelper.deleteFlow(flow.id) - flowHelper.deleteFlow(newFlow.id) + flow.deleteV1() + newFlow.deleteV1() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesV2Spec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesV2Spec.groovy index 3e8d9c7f542..dc2159116f6 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesV2Spec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/toggles/FeatureTogglesV2Spec.groovy @@ -1,29 +1,35 @@ package org.openkilda.functionaltests.spec.toggles +import static org.openkilda.functionaltests.ResourceLockConstants.DEFAULT_FLOW_ENCAP +import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE +import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY +import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE +import static org.openkilda.functionaltests.helpers.Wrappers.wait +import static org.openkilda.functionaltests.helpers.model.FlowActionType.* +import static org.openkilda.functionaltests.helpers.model.FlowEncapsulationType.TRANSIT_VLAN +import static org.openkilda.functionaltests.helpers.model.FlowStatusHistoryEvent.* +import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME +import static org.openkilda.testing.Constants.WAIT_OFFSET + import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.error.flow.FlowForbiddenToCreateExpectedError import org.openkilda.functionaltests.error.flow.FlowForbiddenToDeleteExpectedError import org.openkilda.functionaltests.error.flow.FlowForbiddenToUpdateExpectedError import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowActionType +import org.openkilda.functionaltests.helpers.model.FlowStatusHistoryEvent import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.FlowEncapsulationType +import org.springframework.beans.factory.annotation.Autowired import org.springframework.web.client.HttpClientErrorException import spock.lang.Isolated import spock.lang.Narrative import spock.lang.ResourceLock - -import static org.openkilda.functionaltests.ResourceLockConstants.DEFAULT_FLOW_ENCAP -import static org.openkilda.functionaltests.extension.tags.Tag.HARDWARE -import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY -import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_FAIL -import static org.openkilda.functionaltests.helpers.Wrappers.wait -import static org.openkilda.testing.Constants.RULES_INSTALLATION_TIME -import static org.openkilda.testing.Constants.WAIT_OFFSET +import spock.lang.Shared @Narrative(""" Feature Toggles is a special lever that allows to turn on/off certain Kilda features. For example, we can disable @@ -39,74 +45,76 @@ flow_latency_monitoring_reactions toggle is tested in FlowMonitoringSpec @Tags(SMOKE) @Isolated class FeatureTogglesV2Spec extends HealthCheckSpecification { + + @Autowired + @Shared + FlowFactory flowFactory + def "System forbids creating new flows when 'create_flow' toggle is set to false"() { given: "Existing flow" - def flowRequest = flowHelperV2.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1]) - def flow = flowHelperV2.addFlow(flowRequest) + def flow = flowFactory.getRandom(topology.activeSwitches[0], topology.activeSwitches[1]) when: "Set create_flow toggle to false" featureToggles.createFlowEnabled(false) and: "Try to create a new flow" - flowHelperV2.addFlow(flowHelperV2.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1])) + flowFactory.getBuilder(topology.activeSwitches[0], topology.activeSwitches[1]).build().sendCreateRequest() then: "Error response is returned, explaining that feature toggle doesn't allow such operation" def e = thrown(HttpClientErrorException) - new FlowForbiddenToCreateExpectedError(~/Flow create feature is disabled/).matches(e) + and: "Update of previously existing flow is still possible" - flowHelperV2.updateFlow(flow.flowId, flowRequest.tap { it.description = it.description + "updated" }) + flow.update(flow.tap { it.description = it.description + "updated" }) and: "Delete of previously existing flow is still possible" - flowHelperV2.deleteFlow(flow.flowId) + flow.delete() } def "System forbids updating flows when 'update_flow' toggle is set to false"() { given: "Existing flow" - def flowRequest = flowHelperV2.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1]) - flowHelperV2.addFlow(flowRequest) + def flow = flowFactory.getRandom(topology.activeSwitches[0], topology.activeSwitches[1]) when: "Set update_flow toggle to false" featureToggles.updateFlowEnabled(false) and: "Try to update the flow" - northboundV2.updateFlow(flowRequest.flowId, flowRequest.tap { it.description = it.description + "updated" }) + flow.update(flow.tap { it.description = it.description + "updated" }) then: "Error response is returned, explaining that feature toggle doesn't allow such operation" def e = thrown(HttpClientErrorException) new FlowForbiddenToUpdateExpectedError(~/Flow update feature is disabled/).matches(e) and: "Creating new flow is still possible" - flowHelperV2.addFlow(flowHelperV2.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1])) + flowFactory.getRandom(topology.activeSwitches[0], topology.activeSwitches[1]) } def "System forbids deleting flows when 'delete_flow' toggle is set to false"() { given: "Existing flow" - def flowRequest = flowHelperV2.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1]) - def flow = flowHelperV2.addFlow(flowRequest) + def flow = flowFactory.getRandom(topology.activeSwitches[0], topology.activeSwitches[1]) when: "Set delete_flow toggle to false" featureToggles.deleteFlowEnabled(false) and: "Try to delete the flow" - northboundV2.deleteFlow(flowRequest.flowId) + flow.delete() then: "Error response is returned, explaining that feature toggle doesn't allow such operation" def e = thrown(HttpClientErrorException) new FlowForbiddenToDeleteExpectedError(~/Flow delete feature is disabled/).matches(e) and: "Creating new flow is still possible" - def newFlow = flowHelperV2.addFlow(flowHelperV2.randomFlow(topology.activeSwitches[0], topology.activeSwitches[1])) + def newFlow = flowFactory.getRandom(topology.activeSwitches[0], topology.activeSwitches[1]) and: "Updating of flow is still possible" - flowHelperV2.updateFlow(flowRequest.flowId, flowRequest.tap { it.description = it.description + "updated" }) + flow.update(flow.tap { it.description = it.description + "updated" }) when: "Set delete_flow toggle back to true" featureToggles.deleteFlowEnabled(true) then: "Able to delete flows" - flowHelper.deleteFlow(flow.flowId) - flowHelper.deleteFlow(newFlow.flowId) + flow.delete() + newFlow.delete() } @Tags([HARDWARE, ISL_RECOVER_ON_FAIL]) @@ -121,9 +129,8 @@ feature toggle"() { and: "A flow with default encapsulation" def initKildaConfig = kildaConfiguration.getKildaConfiguration() - def flow = flowHelperV2.randomFlow(swPair).tap { encapsulationType = null } - flowHelperV2.addFlow(flow) - assert northboundV2.getFlow(flow.flowId).encapsulationType == initKildaConfig.flowEncapsulationType + def flow = flowFactory.getBuilder(swPair).withEncapsulationType(null).build().create() + assert flow.retrieveDetails().encapsulationType.toString() == initKildaConfig.flowEncapsulationType when: "Update default flow encapsulation type in kilda configuration" def newFlowEncapsulationType = initKildaConfig.flowEncapsulationType == "transit_vlan" ? @@ -131,17 +138,17 @@ feature toggle"() { kildaConfiguration.updateFlowEncapsulationType(newFlowEncapsulationType) and: "Init a flow reroute by breaking current path" - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def islToBreak = pathHelper.getInvolvedIsls(currentPath).first() + def flowPathInfo = flow.retrieveAllEntityPaths() + def islToBreak = flowPathInfo.flowPath.getInvolvedIsls().first() islHelper.breakIsl(islToBreak) then: "Flow is rerouted" wait(WAIT_OFFSET + rerouteDelay) { - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) != currentPath + assert flow.retrieveAllEntityPaths().getPathNodes() != flowPathInfo.getPathNodes() } and: "Encapsulation type is changed according to kilda configuration" - northboundV2.getFlow(flow.flowId).encapsulationType == newFlowEncapsulationType.toString().toLowerCase() + flow.retrieveDetails().encapsulationType.toString() == newFlowEncapsulationType.toString().toLowerCase() when: "Update default flow encapsulation type in kilda configuration" kildaConfiguration.updateFlowEncapsulationType(initKildaConfig.flowEncapsulationType) @@ -153,17 +160,17 @@ feature toggle"() { islHelper.restoreIsl(islToBreak) and: "Init a flow reroute by breaking a new current path" - def newCurrentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def newIslToBreak = pathHelper.getInvolvedIsls(newCurrentPath).first() + def newFlowPathInfo = flow.retrieveAllEntityPaths() + def newIslToBreak = newFlowPathInfo.flowPath.getInvolvedIsls().first() islHelper.breakIsl(newIslToBreak) then: "Flow is rerouted" wait(WAIT_OFFSET + rerouteDelay) { - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) != newCurrentPath + assert flow.retrieveAllEntityPaths().getPathNodes() != newFlowPathInfo.getPathNodes() } and: "Encapsulation type is not changed according to kilda configuration" - northboundV2.getFlow(flow.flowId).encapsulationType != initKildaConfig.flowEncapsulationType + flow.retrieveDetails().encapsulationType.toString() != initKildaConfig.flowEncapsulationType } @Tags([ISL_RECOVER_ON_FAIL, LOW_PRIORITY]) @@ -182,8 +189,7 @@ feature toggle"() { featureToggles.flowsRerouteUsingDefaultEncapType(true) and: "A flow with transit_vlan encapsulation" - def flow = flowHelperV2.randomFlow(swPair).tap { encapsulationType = FlowEncapsulationType.TRANSIT_VLAN } - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(swPair).withEncapsulationType(TRANSIT_VLAN).build().create() when: "Set vxlan as default flow encapsulation type in kilda configuration if it is not set" def initGlobalConfig = kildaConfiguration.getKildaConfiguration() @@ -192,44 +198,43 @@ feature toggle"() { kildaConfiguration.updateFlowEncapsulationType(vxlanEncapsulationType) and: "Init a flow reroute by breaking current path" - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def islToBreak = pathHelper.getInvolvedIsls(currentPath).first() + def flowPathInfo = flow.retrieveAllEntityPaths() + def islToBreak = flowPathInfo.flowPath.getInvolvedIsls().first() islHelper.breakIsl(islToBreak) then: "Flow is not rerouted" sleep(rerouteDelay * 1000) - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == currentPath + flow.retrieveAllEntityPaths().getPathNodes() == flowPathInfo.getPathNodes() and: "Encapsulation type is NOT changed according to kilda configuration" - northboundV2.getFlow(flow.flowId).encapsulationType != vxlanEncapsulationType.toString().toLowerCase() + flow.retrieveDetails().encapsulationType.toString() != vxlanEncapsulationType.toString().toLowerCase() and: "Flow is in DOWN state" wait(WAIT_OFFSET) { - northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + flow.retrieveFlowStatus().status == FlowState.DOWN } when: "Update the flow" - northboundV2.updateFlow(flow.flowId, flow.tap { it.description = description + " updated" }) - wait(WAIT_OFFSET / 2) { - assert northboundV2.getFlow(flow.flowId).description == flow.description - } + String newDescription = flow.description + "updated" + flow = flow.update( flow.tap { it.description = newDescription }) + assert flow.retrieveDetails().description == newDescription then: "Encapsulation type is NOT changed according to kilda configuration" - northboundV2.getFlow(flow.flowId).encapsulationType != vxlanEncapsulationType.toString().toLowerCase() + flow.encapsulationType.toString() != vxlanEncapsulationType.toString().toLowerCase() and: "Flow is rerouted and in UP state" wait(RULES_INSTALLATION_TIME) { - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == FlowState.UP } when: "Synchronize the flow" - with(northbound.synchronizeFlow(flow.flowId)) { !it.rerouted } + with(flow.sync()) { !it.rerouted } then: "Encapsulation type is NOT changed according to kilda configuration" - northboundV2.getFlow(flow.flowId).encapsulationType != vxlanEncapsulationType.toString().toLowerCase() + flow.retrieveDetails().encapsulationType.toString() != vxlanEncapsulationType.toString().toLowerCase() and: "Flow is still in UP state" - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == FlowState.UP cleanup: initGlobalConfig && initGlobalConfig.flowEncapsulationType != vxlanEncapsulationType.toString().toLowerCase() && @@ -240,12 +245,11 @@ feature toggle"() { def "System doesn't reroute flow when flows_reroute_on_isl_discovery: false"() { given: "A flow with alternative paths" def switchPair = switchPairs.all().neighbouring().withAtLeastNPaths(2).random() - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPair) //you have to break all altPaths to avoid rerouting when flowPath is broken - def flowPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def flowInvolvedIsls = pathHelper.getInvolvedIsls(flowPath) + def flowPathInfo = flow.retrieveAllEntityPaths() + def flowInvolvedIsls = flowPathInfo.flowPath.getInvolvedIsls() def altIsls = topology.getRelatedIsls(switchPair.src) - flowInvolvedIsls.first() islHelper.breakIsls(altIsls) @@ -258,19 +262,19 @@ feature toggle"() { then: "The flow becomes 'Down'" wait(discoveryTimeout + rerouteDelay + WAIT_OFFSET * 2) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).find { + assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).find { it.taskId =~ (/.+ : retry #1 ignore_bw true/) - }?.payload?.last()?.action == REROUTE_FAIL - assert northboundV2.getFlowHistoryStatuses(flow.flowId, 1).historyStatuses*.statusBecome == ["DOWN"] + }?.payload?.last()?.action == REROUTE_FAILED.payloadLastAction + assert flow.retrieveFlowHistoryStatus(1).statusBecome == [DOWN] } wait(WAIT_OFFSET) { - def prevHistorySize = flowHelper.getHistorySize(flow.flowId) + def prevHistorySize = flow.retrieveFlowHistory().entries.size() Wrappers.timedLoop(4) { //history size should no longer change for the flow, all retries should give up - def newHistorySize = flowHelper.getHistorySize(flow.flowId) + def newHistorySize = flow.retrieveFlowHistory().entries.size() assert newHistorySize == prevHistorySize - assert northbound.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == FlowState.DOWN sleep(500) } } @@ -278,9 +282,9 @@ feature toggle"() { islHelper.restoreIsls(altIsls + islToBreak) then: "The flow is still in 'Down' status, because flows_reroute_on_isl_discovery: false" - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == FlowState.DOWN and: "Flow is not rerouted" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == flowPath + flow.retrieveAllEntityPaths().getPathNodes() == flowPathInfo.getPathNodes() } } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ChaosSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ChaosSpec.groovy index 8a683dde969..4a4e0078e9f 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ChaosSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ChaosSpec.groovy @@ -1,24 +1,24 @@ package org.openkilda.functionaltests.spec.xresilience -import spock.lang.Ignore - -import static org.openkilda.model.MeterId.MAX_SYSTEM_RULE_METER_ID import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME -import static org.openkilda.testing.Constants.RULES_DELETION_TIME import static org.openkilda.testing.Constants.WAIT_OFFSET import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan +import org.openkilda.functionaltests.model.stats.Direction import org.openkilda.messaging.info.event.IslChangeType -import org.openkilda.messaging.payload.flow.FlowPathPayload import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.messaging.payload.flow.PathNodePayload import org.openkilda.model.SwitchId -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import groovy.util.logging.Slf4j +import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value +import spock.lang.Ignore import spock.lang.Narrative +import spock.lang.Shared import java.util.concurrent.TimeUnit @@ -26,6 +26,10 @@ import java.util.concurrent.TimeUnit @Narrative("Test system behavior under different factors and events that randomly appear across the topology") class ChaosSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Value('${antiflap.cooldown}') int antiflapCooldown @@ -37,10 +41,11 @@ class ChaosSpec extends HealthCheckSpecification { def "Nothing breaks when multiple flows get rerouted due to randomly failing ISLs"() { setup: "Create multiple random flows" def flowsAmount = topology.activeSwitches.size() * 10 - List flows = [] + List flows = [] + List busyEndpoints = [] flowsAmount.times { - def flow = flowHelperV2.randomFlow(*topologyHelper.randomSwitchPair, false, flows) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(switchPairs.all().random(), false, FlowState.UP, busyEndpoints) + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } @@ -64,24 +69,15 @@ class ChaosSpec extends HealthCheckSpecification { Wrappers.wait(PATH_INSTALLATION_TIME * 3 + flowsAmount) { flows.each { flow -> - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - bothDirectionsHaveSamePath(northbound.getFlowPath(flow.flowId)) + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.validateAndCollectDiscrepancies().isEmpty() + def flowPathInfo = flow.retrieveAllEntityPaths() + assert flowPathInfo.getPathNodes(Direction.FORWARD).reverse() == flowPathInfo.getPathNodes(Direction.REVERSE) } } and: "All switches are valid" - switchHelper.validate(topology.activeSwitches*.dpId).isEmpty() - } - - def bothDirectionsHaveSamePath(FlowPathPayload path) { - [path.forwardPath, path.reversePath.reverse()].transpose().each { PathNodePayload forwardNode, - PathNodePayload reverseNode -> - def failureMessage = "Failed nodes: $forwardNode $reverseNode" - assert forwardNode.switchId == reverseNode.switchId, failureMessage - assert forwardNode.outputPort == reverseNode.inputPort, failureMessage - assert forwardNode.inputPort == reverseNode.outputPort, failureMessage - } + switchHelper.synchronizeAndCollectFixedDiscrepancies(topology.activeSwitches*.dpId).isEmpty() } def blinkPort(SwitchId swId, int port) { diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ContentionSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ContentionSpec.groovy index 2417f176983..cf9b251c111 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ContentionSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/ContentionSpec.groovy @@ -5,26 +5,35 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET import org.openkilda.functionaltests.BaseSpecification import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.payload.flow.FlowState -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import groovyx.gpars.group.DefaultPGroup +import org.springframework.beans.factory.annotation.Autowired import org.springframework.http.HttpStatus import org.springframework.web.client.HttpClientErrorException import spock.lang.Ignore import spock.lang.Narrative +import spock.lang.Shared @Narrative("""This spec is aimed to test different race conditions and system behavior in a concurrent environment (using v2 APIs)""") class ContentionSpec extends BaseSpecification { + @Autowired + @Shared + FlowFactory flowFactory + def "Parallel flow creation requests with the same name creates only 1 flow"() { when: "Create the same flow in parallel multiple times" def flowsAmount = 20 def group = new DefaultPGroup(flowsAmount) - def flow = flowHelperV2.randomFlow(switchPairs.all().nonNeighbouring().random()) + + def flow = flowFactory.getBuilder(switchPairs.all().nonNeighbouring().random()).build() def tasks = (1..flowsAmount).collect { - group.task { flowHelperV2.addFlow(flow) } + group.task { flow.create() } } tasks*.join() @@ -46,16 +55,22 @@ class ContentionSpec extends BaseSpecification { when: "Create multiple flows on the same ISLs concurrently" def flowsAmount = 20 def group = new DefaultPGroup(flowsAmount) - List flows = [] - flowsAmount.times { flows << flowHelperV2.randomFlow(topologyHelper.notNeighboringSwitchPair, false, flows) } + List flows = [] + List busyEndpoints = [] + flowsAmount.times { + def flowEntity = flowFactory.getBuilder(switchPairs.all().nonNeighbouring().random(), false, busyEndpoints).build() + busyEndpoints.addAll(flowEntity.occupiedEndpoints()) + flows << flowEntity + } + def createTasks = flows.collect { flow -> - group.task { flowHelperV2.addFlow(flow) } + group.task { flow.create()} } createTasks*.join() assert createTasks.findAll { it.isError() }.empty - def relatedIsls = pathHelper.getInvolvedIsls(northbound.getFlowPath(flows[0].flowId)) + def relatedIsls = flows[0].retrieveAllEntityPaths().flowPath.getInvolvedIsls() //all flows use same isls - flows[1..-1].each { assert pathHelper.getInvolvedIsls(northbound.getFlowPath(it.flowId)) == relatedIsls } + flows[1..-1].each { assert it.retrieveAllEntityPaths().flowPath.getInvolvedIsls() == relatedIsls } then: "Available bandwidth on related isls is reduced based on bandwidth of created flows" relatedIsls.each { isl -> @@ -68,7 +83,7 @@ class ContentionSpec extends BaseSpecification { when: "Simultaneously remove all the flows" def deleteTasks = flows.collect { flow -> - group.task { flowHelperV2.deleteFlow(flow.flowId) } + group.task { flow.delete() } } deleteTasks*.get() @@ -86,39 +101,39 @@ class ContentionSpec extends BaseSpecification { def "Reroute can be simultaneously performed with sync rules requests, removeExcess=#removeExcess"() { given: "A flow with reroute potential" def switches = switchPairs.all().nonNeighbouring().random() - def flow = flowHelperV2.randomFlow(switches) - flowHelperV2.addFlow(flow) - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - def newPath = switches.paths.find { it != currentPath } + def flow = flowFactory.getRandom(switches) + + def flowPathInfo = flow.retrieveAllEntityPaths() + def mainPath = flowPathInfo.getPathNodes() + def newPath = switches.paths.find { it != mainPath } switches.paths.findAll { it != newPath }.each { pathHelper.makePathMorePreferable(newPath, it) } - def relatedSwitches = (pathHelper.getInvolvedSwitches(currentPath) + - pathHelper.getInvolvedSwitches(newPath)).unique() + def relatedSwitches = (flowPathInfo.getInvolvedSwitches() + + pathHelper.getInvolvedSwitches(newPath).dpId).unique() when: "Flow reroute is simultaneously requested together with sync rules requests for all related switches" withPool { - def rerouteTask = { northboundV2.rerouteFlow(flow.flowId) } + def rerouteTask = { flow.reroute() } rerouteTask.callAsync() - 3.times { relatedSwitches.eachParallel { switchHelper.synchronize(it.dpId, removeExcess) } } + 3.times { relatedSwitches.eachParallel { switchHelper.synchronize(it, removeExcess) } } } then: "Flow is Up and path has changed" Wrappers.wait(WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == newPath + assert flow.retrieveFlowStatus().status == FlowState.UP + assert flow.retrieveAllEntityPaths().getPathNodes() == newPath } and: "Related switches have no rule discrepancies" Wrappers.wait(WAIT_OFFSET) { - assert switchHelper.validateAndCollectFoundDiscrepancies(relatedSwitches*.getDpId()).isEmpty() + assert switchHelper.validateAndCollectFoundDiscrepancies(relatedSwitches).isEmpty() } and: "Flow is healthy" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } + flow.validateAndCollectDiscrepancies().isEmpty() where: removeExcess << [ false, // true https://github.com/telstra/open-kilda/issues/4214 ] } - } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/FloodlightKafkaConnectionSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/FloodlightKafkaConnectionSpec.groovy index aa9af0a913e..22abd267e75 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/FloodlightKafkaConnectionSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/FloodlightKafkaConnectionSpec.groovy @@ -8,13 +8,16 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW import org.openkilda.functionaltests.HealthCheckSpecification +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.SwitchChangeType import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.SwitchFeature +import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value import spock.lang.Isolated +import spock.lang.Shared import java.util.concurrent.TimeUnit @@ -22,6 +25,10 @@ import java.util.concurrent.TimeUnit class FloodlightKafkaConnectionSpec extends HealthCheckSpecification { static final int PERIODIC_SYNC_TIME = 60 + @Autowired + @Shared + FlowFactory flowFactory + @Value('${floodlight.alive.timeout}') int floodlightAliveTimeout @Value('${antiflap.cooldown}') @@ -36,7 +43,7 @@ class FloodlightKafkaConnectionSpec extends HealthCheckSpecification { def otherRegions = sw.regions - rwRegions def regionToStay = rwRegions[i % rwRegions.size()] def regionsToDc = rwRegions - regionToStay - knockoutData << [(sw): lockKeeper.knockoutSwitch(sw, regionsToDc)] + knockoutData << [(sw): switchHelper.knockoutSwitch(sw, regionsToDc)] updatedRegions[sw.dpId] = [regionToStay] + otherRegions } assumeTrue(updatedRegions.values().flatten().unique().size() > 1, @@ -104,11 +111,11 @@ class FloodlightKafkaConnectionSpec extends HealthCheckSpecification { [pair.src, pair.dst].any { updatedRegions[it.dpId].contains(regionToBreak) } && updatedRegions[pair.src.dpId] != updatedRegions[pair.dst.dpId] } - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.attemptToAddFlow(flow) - wait(WAIT_OFFSET * 2) { //FL may be a bit laggy right after comming up, so this may take a bit longer than usual - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP } - northbound.validateFlow(flow.flowId).each { assert it.asExpected } + def flow = flowFactory.getBuilder(swPair).build().sendCreateRequest() + wait(WAIT_OFFSET * 2) { + //FL may be a bit laggy right after comming up, so this may take a bit longer than usual + assert flow.retrieveFlowStatus().status == FlowState.UP } + flow.validateAndCollectDiscrepancies().isEmpty() cleanup: nonRtlShouldFail?.join() diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/RetriesSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/RetriesSpec.groovy index f34d3b8d68e..86387dbaba2 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/RetriesSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/RetriesSpec.groovy @@ -3,15 +3,16 @@ package org.openkilda.functionaltests.spec.xresilience import static org.openkilda.functionaltests.extension.tags.Tag.ISL_PROPS_DB_RESET import static org.openkilda.functionaltests.extension.tags.Tag.ISL_RECOVER_ON_FAIL import static org.openkilda.functionaltests.extension.tags.Tag.LOCKKEEPER +import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.SMOKE_SWITCHES import static org.openkilda.functionaltests.extension.tags.Tag.SWITCH_RECOVER_ON_FAIL -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.DELETE_SUCCESS -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.PATH_SWAP_ACTION -import static org.openkilda.functionaltests.helpers.FlowHistoryConstants.REROUTE_ACTION import static org.openkilda.functionaltests.helpers.Wrappers.timedLoop import static org.openkilda.functionaltests.helpers.Wrappers.wait +import static org.openkilda.functionaltests.helpers.model.FlowActionType.* import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESET_ISLS_COST import static org.openkilda.functionaltests.model.cleanup.CleanupActionType.RESTORE_ISL +import static org.openkilda.functionaltests.model.stats.Direction.* +import static org.openkilda.messaging.payload.flow.FlowState.* import static org.openkilda.testing.Constants.PATH_INSTALLATION_TIME import static org.openkilda.testing.Constants.WAIT_OFFSET import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMode.RW @@ -19,13 +20,10 @@ import static org.openkilda.testing.service.floodlight.model.FloodlightConnectMo import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.factory.FlowFactory -import org.openkilda.functionaltests.helpers.model.FlowActionType +import org.openkilda.functionaltests.helpers.model.FlowExtended import org.openkilda.functionaltests.model.cleanup.CleanupManager import org.openkilda.messaging.info.event.PathNode -import org.openkilda.messaging.payload.flow.FlowState import org.openkilda.model.SwitchStatus -import org.openkilda.northbound.dto.v1.flows.PingInput -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 import org.openkilda.testing.model.topology.TopologyDefinition.Isl import org.openkilda.testing.model.topology.TopologyDefinition.Switch import org.openkilda.testing.service.lockkeeper.model.TrafficControlData @@ -40,6 +38,10 @@ import java.util.concurrent.TimeUnit @Slf4j class RetriesSpec extends HealthCheckSpecification { + @Autowired + @Shared + FlowFactory flowFactory + @Tags([ISL_RECOVER_ON_FAIL, ISL_PROPS_DB_RESET, SWITCH_RECOVER_ON_FAIL]) def "System retries the reroute (global retry) if it fails to install rules on one of the current target path's switches"() { given: "Switch pair with at least 3 available paths, one path should have a transit switch that we will break \ @@ -73,9 +75,8 @@ and at least 1 path must remain safe" and: "A flow using given switch pair" switchPair.paths.findAll { it != mainPath }.each { pathHelper.makePathMorePreferable(mainPath, it) } - def flow = flowHelperV2.randomFlow(switchPair) - flowHelperV2.addFlow(flow) - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + def flow = flowFactory.getRandom(switchPair) + assert flow.retrieveAllEntityPaths().getPathNodes() == mainPath and: "Switch on the preferred failover path will suddenly be unavailable for rules installation when the reroute starts" //select a required failover path beforehand @@ -90,7 +91,7 @@ and at least 1 path must remain safe" then: "System fails to install rules on desired path and tries to retry reroute and find new path (global retry)" wait(WAIT_OFFSET * 3, 0.1) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION).find { + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE).find { it.taskId =~ (/.+ : retry #1/) } } @@ -100,17 +101,18 @@ and at least 1 path must remain safe" then: "System finds another working path and successfully reroutes the flow (one of the retries succeeds)" wait(PATH_INSTALLATION_TIME) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + assert flow.retrieveFlowStatus().status == UP } - def currentPath = pathHelper.convert(northbound.getFlowPath(flow.flowId)) - currentPath != mainPath - currentPath != failoverPath - !pathHelper.getInvolvedSwitches(currentPath).contains(switchToBreak) - !pathHelper.getInvolvedIsls(currentPath).contains(islToBreak) - !pathHelper.getInvolvedIsls(currentPath).contains(islToBreak.reversed) + def flowPathInfo = flow.retrieveAllEntityPaths() + def mainFlowPath = flowPathInfo.getPathNodes() + mainFlowPath != mainPath + mainFlowPath != failoverPath + !flow.retrieveAllEntityPaths().getInvolvedSwitches().contains(switchToBreak) + !flowPathInfo.flowPath.getInvolvedIsls().contains(islToBreak) + !flowPathInfo.flowPath.getInvolvedIsls().contains(islToBreak.reversed) and: "All related switches have no rule anomalies" - def switchesToVerify = [mainPath, failoverPath, currentPath].collectMany { pathHelper.getInvolvedSwitches(it) }.unique() + def switchesToVerify = [mainPath, failoverPath, mainFlowPath].collectMany { pathHelper.getInvolvedSwitches(it) }.unique() .findAll { it != switchToBreak } switchHelper.validateAndCollectFoundDiscrepancies(switchesToVerify*.getDpId()).isEmpty() } @@ -147,12 +149,10 @@ and at least 1 path must remain safe" * transitSwitch * **/ - def flow = flowHelperV2.randomFlow(swPair) - flow.allocateProtectedPath = true - flowHelperV2.addFlow(flow) - def flowPathInfo = northbound.getFlowPath(flow.flowId) - assert pathHelper.convert(flowPathInfo) == mainPath - assert pathHelper.convert(flowPathInfo.protectedPath) == protectedPath + def flow = flowFactory.getBuilder(swPair).withProtectedPath(true).build().create() + def flowPathInfo = flow.retrieveAllEntityPaths() + assert flowPathInfo.getPathNodes(FORWARD, false) == mainPath + assert flowPathInfo.getPathNodes(FORWARD, true) == protectedPath when: "Disconnect dst switch on protected path" def swToManipulate = swPair.dst @@ -167,19 +167,19 @@ and at least 1 path must remain safe" then: "System retried to #data.description" wait(WAIT_OFFSET) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, data.historyAction) + assert flow.retrieveFlowHistory().getEntriesByType(data.historyAction) .last().payload*.details.findAll{ it =~ /.+ Retrying/}.size() == data.retriesAmount } then: "Flow is DOWN" wait(WAIT_OFFSET) { - assert northboundV2.getFlowStatus(flow.flowId).status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == DOWN } and: "Flow is not rerouted" - def flowPathInfoAfterSwap = northbound.getFlowPath(flow.flowId) - pathHelper.convert(flowPathInfoAfterSwap) == mainPath - pathHelper.convert(flowPathInfoAfterSwap.protectedPath) == protectedPath + def flowPathInfoAfterSwap = flow.retrieveAllEntityPaths() + flowPathInfoAfterSwap.getPathNodes(FORWARD, false) == mainPath + flowPathInfoAfterSwap.getPathNodes(FORWARD, true) == protectedPath and: "All involved switches pass switch validation(except dst switch)" @@ -195,12 +195,12 @@ and at least 1 path must remain safe" then: "Flow is UP" wait(discoveryInterval + rerouteDelay + WAIT_OFFSET) { - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == UP } and: "Flow is valid and pingable" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - with(northbound.pingFlow(flow.flowId, new PingInput())) { + flow.validateAndCollectDiscrepancies().isEmpty() + with(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -215,23 +215,21 @@ and at least 1 path must remain safe" where: data << [ - //issue #3237 - //[ - // description: "update", - // historyAction: UPDATE_ACTION, - // retriesAmount: 15, // - // //install: ingress 2 * 3(attempts) + remove: ingress 2 * 3(attempts) + 1 egress * 3(attempts) - // action: { FlowRequestV2 f -> - // getNorthboundV2().updateFlow(f.flowId, f.tap { it.description = "updated" }) } - //], +// issue #3237 +// [ +// description: "update", +// historyAction: FlowActionType.UPDATE, +// retriesAmount: 15, // +// //install: ingress 2 * 3(attempts) + remove: ingress 2 * 3(attempts) + 1 egress * 3(attempts) +// action: { FlowExtended f -> f.update(f.tap { it.description = "updated" }) } +// ], [ description: "swap paths", - historyAction: PATH_SWAP_ACTION, + historyAction: PATH_SWAP, retriesAmount: 15, // swap: install: 3 attempts, revert: delete 9 attempts + install 3 attempts // delete: 3 attempts * (1 flow rule + 1 ingress mirror rule + 1 egress mirror rule) = 9 attempts - action: { FlowRequestV2 f -> - getNorthbound().swapFlowPath(f.flowId) } + action: { FlowExtended f -> f.swapFlowPath() } ] ] } @@ -240,24 +238,23 @@ and at least 1 path must remain safe" def "Flow is successfully deleted from the system even if some rule delete commands fail (no rollback for delete)"() { given: "A flow" def swPair = switchPairs.all().first() - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) + def flow = flowFactory.getRandom(swPair) when: "Send delete request for the flow" switchHelper.shapeSwitchesTraffic([swPair.src], new TrafficControlData(1000)) - northboundV2.deleteFlow(flow.flowId) + flow.sendDeleteRequest() and: "One of the related switches does not respond" switchHelper.knockoutSwitch(swPair.src, RW) then: "Flow history shows failed delete rule retry attempts but flow deletion is successful at the end" wait(WAIT_OFFSET) { - def history = flowHelper.getLatestHistoryEntry(flow.flowId).payload + def history = flow.retrieveFlowHistory().getEntriesByType(DELETE).last().payload //egress and ingress rule and egress and ingress mirror rule on a broken switch, 3 retries each = total 12 assert history.count { it.details ==~ /Failed to remove the rule.*Retrying \(attempt \d+\)/ } == 12 - assert history.last().action == DELETE_SUCCESS + assert history.last().action == DELETE.payloadLastAction } - !northboundV2.getFlowStatus(flow.flowId) + !flow.retrieveFlowStatus() } @Tags([ISL_RECOVER_ON_FAIL, ISL_PROPS_DB_RESET, SWITCH_RECOVER_ON_FAIL]) @@ -275,9 +272,8 @@ and at least 1 path must remain safe" islHelper.breakIsls(altIsls) and: "A flow on the main path" - def flow = flowHelperV2.randomFlow(swPair) - flowHelperV2.addFlow(flow) - assert pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + def flow = flowFactory.getRandom(swPair) + assert flow.retrieveAllEntityPaths().getPathNodes() == mainPath when: "Make backupPath more preferable than mainPath" pathHelper.makePathMorePreferable(backupPath, mainPath) @@ -291,18 +287,18 @@ and at least 1 path must remain safe" database.setSwitchStatus(swToManipulate.dpId, SwitchStatus.ACTIVE) and: "Init intentional flow reroute(APIv1)" - northbound.rerouteFlow(flow.flowId) + flow.rerouteV1() then: "System retries to install/delete rules on the dst switch" wait(WAIT_OFFSET) { - assert flowHelper.getHistoryEntriesByAction(flow.flowId, REROUTE_ACTION) + assert flow.retrieveFlowHistory().getEntriesByType(REROUTE) .last().payload*.details.findAll{ it =~ /.+ Retrying/}.size() == 15 //install: 3 attempts, revert: delete 9 attempts + install 3 attempts // delete: 3 attempts * (1 flow rule + 1 ingress mirror rule + 1 egress mirror rule) = 9 attempts } then: "Flow is not rerouted" - pathHelper.convert(northbound.getFlowPath(flow.flowId)) == mainPath + flow.retrieveAllEntityPaths().getPathNodes() == mainPath and: "All involved switches pass switch validation(except dst switch)" def involvedSwitchIds = pathHelper.getInvolvedSwitches(backupPath)[0..-2]*.dpId @@ -317,12 +313,12 @@ and at least 1 path must remain safe" then: "Flow is UP" wait(discoveryInterval + rerouteDelay + WAIT_OFFSET) { - northboundV2.getFlowStatus(flow.flowId).status == FlowState.UP + flow.retrieveFlowStatus().status == UP } and: "Flow is valid and pingable" - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - with(northbound.pingFlow(flow.flowId, new PingInput())) { + flow.validateAndCollectDiscrepancies().isEmpty() + with(flow.ping()) { it.forward.pingSuccess it.reverse.pingSuccess } @@ -351,10 +347,11 @@ class RetriesIsolatedSpec extends HealthCheckSpecification { FlowFactory flowFactory //isolation: requires no 'up' events in the system while flow is Down - @Tags([ISL_RECOVER_ON_FAIL]) + @Tags([ISL_RECOVER_ON_FAIL, LOW_PRIORITY]) def "System does not retry after global timeout for reroute operation"() { given: "A flow with ability to reroute" - def swPair = switchPairs.all().nonNeighbouring().random() + def swPair = switchPairs.all().nonNeighbouring().switchPairs + .find { it.src.dpId.toString().contains("03") && it.dst.dpId.toString().contains("07")} def allFlowPaths = swPair.paths def preferableIsls = pathHelper.getInvolvedIsls(allFlowPaths.find{ it.size() >= 10 }) pathHelper.updateIslsCost(preferableIsls, 1) @@ -362,29 +359,29 @@ class RetriesIsolatedSpec extends HealthCheckSpecification { def flow = flowFactory.getRandom(swPair) when: "Break current path to trigger a reroute" - def islToBreak = pathHelper.getInvolvedIsls(flow.flowId).first() + def islToBreak = flow.retrieveAllEntityPaths().flowPath.getInvolvedIsls().first() cleanupManager.addAction(RESTORE_ISL, {islHelper.restoreIsl(islToBreak)}) cleanupManager.addAction(RESET_ISLS_COST,{database.resetCosts(topology.isls)}) northbound.portDown(islToBreak.srcSwitch.dpId, islToBreak.srcPort) and: "Connection to src switch is slow in order to simulate a global timeout on reroute operation" - switchHelper.shapeSwitchesTraffic([swPair.src], new TrafficControlData(9000)) + switchHelper.shapeSwitchesTraffic([swPair.src], new TrafficControlData(9200)) then: "After global timeout expect flow reroute to fail and flow to become DOWN" TimeUnit.SECONDS.sleep(globalTimeout) int eventsAmount wait(globalTimeout + WAIT_OFFSET, 1) { //long wait, may be doing some revert actions after global t/o def history = flow.retrieveFlowHistory() - def rerouteEvent = history.getEntriesByType(FlowActionType.REROUTE).first() + def rerouteEvent = history.getEntriesByType(REROUTE).first() assert rerouteEvent.payload.find { it.action == sprintf('Global timeout reached for reroute operation on flow "%s"', flow.flowId) } - assert rerouteEvent.payload.last().action == FlowActionType.REROUTE_FAILED.payloadLastAction - assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert rerouteEvent.payload.last().action == REROUTE_FAILED.payloadLastAction + assert flow.retrieveFlowStatus().status == DOWN eventsAmount = history.entries.size() } and: "Flow remains down and no new history events appear for the next 3 seconds (no retry happens)" timedLoop(3) { - assert flow.retrieveFlowStatus().status == FlowState.DOWN + assert flow.retrieveFlowStatus().status == DOWN assert flow.retrieveFlowHistory().entries.size() == eventsAmount } diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormHeavyLoadSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormHeavyLoadSpec.groovy index 7a468c4edfe..6cf126ba395 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormHeavyLoadSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormHeavyLoadSpec.groovy @@ -7,6 +7,7 @@ import static org.openkilda.testing.Constants.WAIT_OFFSET import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory import org.openkilda.messaging.Message import org.openkilda.messaging.info.InfoData import org.openkilda.messaging.info.InfoMessage @@ -20,6 +21,7 @@ import org.apache.kafka.clients.producer.ProducerRecord import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Qualifier import org.springframework.beans.factory.annotation.Value +import spock.lang.Shared @Slf4j class StormHeavyLoadSpec extends HealthCheckSpecification { @@ -31,6 +33,10 @@ class StormHeavyLoadSpec extends HealthCheckSpecification { @Qualifier("kafkaProducerProperties") Properties producerProps + @Autowired + @Shared + FlowFactory flowFactory + def r = new Random() /** @@ -62,10 +68,10 @@ class StormHeavyLoadSpec extends HealthCheckSpecification { then: "Still able to create and delete flows while Storm is swallowing the messages" def checkFlowCreation = { - def flow = flowHelper.randomFlow(topology.islsForActiveSwitches[1].srcSwitch, - topology.islsForActiveSwitches[1].dstSwitch) - flowHelper.addFlow(flow) - flowHelper.deleteFlow(flow.id) + def flow = flowFactory.getBuilder(topology.islsForActiveSwitches[1].srcSwitch, + topology.islsForActiveSwitches[1].dstSwitch).build() + flow.create() + flow.delete() sleep(500) } def endProducing = new Thread({ producers.each({ it.close() }) }) diff --git a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormLcmSpec.groovy b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormLcmSpec.groovy index 2f40d98337a..8a7aebd0c62 100644 --- a/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormLcmSpec.groovy +++ b/src-java/testing/functional-tests/src/test/groovy/org/openkilda/functionaltests/spec/xresilience/StormLcmSpec.groovy @@ -1,7 +1,5 @@ package org.openkilda.functionaltests.spec.xresilience -import org.openkilda.model.IslStatus - import static com.shazam.shazamcrest.matcher.Matchers.sameBeanAs import static org.openkilda.functionaltests.extension.tags.Tag.LOW_PRIORITY import static org.openkilda.functionaltests.extension.tags.Tag.VIRTUAL @@ -15,11 +13,15 @@ import org.openkilda.functionaltests.HealthCheckSpecification import org.openkilda.functionaltests.extension.tags.Tags import org.openkilda.functionaltests.helpers.WfmManipulator import org.openkilda.functionaltests.helpers.Wrappers +import org.openkilda.functionaltests.helpers.factory.FlowFactory +import org.openkilda.functionaltests.helpers.model.FlowExtended +import org.openkilda.functionaltests.helpers.model.SwitchPortVlan import org.openkilda.messaging.info.event.IslChangeType import org.openkilda.messaging.info.event.SwitchChangeType -import org.openkilda.northbound.dto.v2.flows.FlowRequestV2 +import org.openkilda.model.IslStatus import org.openkilda.testing.Constants +import org.springframework.beans.factory.annotation.Autowired import org.springframework.beans.factory.annotation.Value import spock.lang.Ignore import spock.lang.Isolated @@ -41,11 +43,17 @@ verify their consistency after restart. @Tags(VIRTUAL) @Isolated class StormLcmSpec extends HealthCheckSpecification { + + static final IntRange KILDA_ALLOWED_VLANS = 1..4095 + @Shared WfmManipulator wfmManipulator @Value('${docker.host}') @Shared String dockerHost + @Autowired + @Shared + FlowFactory flowFactory def setupSpec() { //since we simulate storm restart by restarting the docker container, for now this is only possible on virtual @@ -58,19 +66,20 @@ class StormLcmSpec extends HealthCheckSpecification { // note: it takes ~15 minutes to run this test def "System survives Storm topologies restart"() { given: "Non-empty system with some flows created" - List flows = [] + List flows = [] def flowsAmount = topology.activeSwitches.size() * 3 + List busyEndpoints = [] flowsAmount.times { - def flow = flowHelperV2.randomFlow(switchPairs.all().random(), false, flows) - flow.maximumBandwidth = 500000 - flowHelperV2.addFlow(flow) + def flow = flowFactory.getBuilder(switchPairs.all().random(), false, busyEndpoints) + .withBandwidth(500000).build() + .create() + + busyEndpoints.addAll(flow.occupiedEndpoints()) flows << flow } and: "All created flows are valid" - flows.each { flow -> - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - } + flows.each { flow -> flow.validateAndCollectDiscrepancies().isEmpty() } and: "Database dump" //unstable for parallel runs even when isolated. why? @@ -109,17 +118,16 @@ class StormLcmSpec extends HealthCheckSpecification { } and: "Flows remain valid in terms of installed rules and meters" - flows.each { flow -> - northbound.validateFlow(flow.flowId).each { direction -> assert direction.asExpected } - } + flows.each { flow -> flow.validateAndCollectDiscrepancies().isEmpty() } and: "Flow can be updated" - def flowToUpdate = flows[0] + FlowExtended flowToUpdate = flows[0] //expect enough free vlans here, ignore used switch-ports for simplicity of search - def unusedVlan = (flowHelper.KILDA_ALLOWED_VLANS - flows + def unusedVlan = (KILDA_ALLOWED_VLANS - flows .collectMany { [it.source.vlanId, it.destination.vlanId] })[0] - flowHelperV2.updateFlow(flowToUpdate.flowId, flowToUpdate.tap { it.source.vlanId = unusedVlan }) - northbound.validateFlow(flowToUpdate.flowId).each { direction -> assert direction.asExpected } + + flowToUpdate.update(flowToUpdate.tap { it.source.vlanId = unusedVlan }) + flowToUpdate.validateAndCollectDiscrepancies().isEmpty() } @Ignore