diff --git a/.github/workflows/actions-test.yml b/.github/workflows/actions-test.yml deleted file mode 100644 index e54264279..000000000 --- a/.github/workflows/actions-test.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: actions-test - -on: [repository_dispatch] - - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v1 - - name: Run a one-line script - run: echo '${{ toJson(github.event) }}' - - name: Run a multi-line script - run: | - echo Add other actions to build, - echo test, and deploy your project. diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 000000000..a878566cf --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,119 @@ +name: "Yorc GH Actions Build" + +on: [push, pull_request] + + +defaults: + run: + shell: bash + +jobs: + + security: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Run Snyk to check for vulnerabilities + uses: snyk/actions/golang@master + continue-on-error: true # To make sure that SARIF upload gets called + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + args: --sarif-file-output=snyk.sarif + - name: Upload result to GitHub Code Scanning + uses: github/codeql-action/upload-sarif@v1 + with: + sarif_file: snyk.sarif + - name: Run Snyk to check for vulnerabilities and send it to Snyk.io + uses: snyk/actions/golang@master + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} + with: + command: monitor + + tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + # Disabling shallow clone is recommended for improving relevancy of reporting (for sonar) + fetch-depth: 0 + - name: Setup go + uses: actions/setup-go@v1 + with: + go-version: "1" + - name: Test + run: | + go version + go env + echo "YORC_VERSION=$(grep "yorc_version" versions.yaml | awk '{print $2}')" >> $GITHUB_ENV + make tools + TESTARGS='-coverprofile coverage-sonar.out -coverpkg=./...' make json-test + - name: SonarCloud Scan + uses: sonarsource/sonarcloud-github-action@master + # Do this only on push commit do not need to be re-analyzed on PR + if: github.event_name == 'push' + with: + args: > + -Dsonar.projectVersion=${{ env.YORC_VERSION }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} + + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Setup jfrog CLI + uses: jfrog/setup-jfrog-cli@v1 + env: + JF_ARTIFACTORY_1: ${{ secrets.JF_ARTIFACTORY_SERVER_1 }} + + - name: Ping Artifactory with jfrog CLI + run: | + # Ping the server + jfrog rt ping + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install sphinx dependencies + run: | + pip install --user --upgrade sphinx==1.8.1 semantic-version requests urllib3[secure]==1.23 Pygments>=2.7.1 + pip install -r doc/requirements.txt + sudo apt-get install -y jq \ + latexmk \ + texlive-binaries \ + texlive-fonts-recommended \ + texlive-latex-base \ + texlive-latex-extra \ + texlive-latex-recommended + + - name: Setup go + uses: actions/setup-go@v1 + with: + go-version: "1" + + - name: Make distribution + run: | + set -euo pipefail + make tools + SKIP_TESTS=1 make dist + + - name: Login to Docker Hub + uses: docker/login-action@v1 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Make Docker container + run: | + ./docker_build.sh + + - name: Deploy artifacts to Artifactory + run: | + ./build/deploy_artifactory.sh diff --git a/.github/workflows/cleanup_artifactory.yml b/.github/workflows/cleanup_artifactory.yml new file mode 100644 index 000000000..c5faa98f2 --- /dev/null +++ b/.github/workflows/cleanup_artifactory.yml @@ -0,0 +1,39 @@ +name: Artifactory Cleanup +on: + workflow_dispatch: + inputs: + from_date: + description: '' + required: false + default: '30 days ago' + schedule: + - cron: '0 12 7,14,21,28 * *' + +defaults: + run: + shell: bash + +jobs: + cleanup: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Setup jfrog CLI + uses: jfrog/setup-jfrog-cli@v1 + env: + JF_ARTIFACTORY_1: ${{ secrets.JF_ARTIFACTORY_SERVER_1 }} + + - name: Ping Artifactory with jfrog CLI + run: | + # Ping the server + jfrog rt ping + + - name: Run Cleanup + run: | + ./build/gh-action-cleanup-artifactory.sh + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + FROM_DATE: ${{ github.event.inputs.from_date || '30 days ago' }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..910b2ac47 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,110 @@ +name: Release +on: + workflow_dispatch: + inputs: + release_version: + description: 'version to be released' + required: true + default: '' + + +defaults: + run: + shell: bash + +jobs: + release: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + with: + # Disabling shallow clone to access git history (specially tags for comparing) + fetch-depth: 0 + token: ${{ secrets.YSTIA_BOT_TOKEN }} + - name: Configure Git user + run: | + git config user.email "ystiabot@users.noreply.github.com" + git config user.name "@YstiaBot" + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install dependencies + run: pip install semantic_version + + - name: Tag and push a release + id: release + run: | + ./build/release.sh -v "${{ github.event.inputs.release_version }}" + read -r major minor patch prerelease build <<< $(python -c "import semantic_version; v = semantic_version.Version('${{ github.event.inputs.release_version }}'); print(v.major, v.minor, v.patch, '.'.join(v.prerelease), '.'.join(v.build));") + if [[ -z "${prerelease}" ]] ; then + echo "PRERELEASE=false" >> $GITHUB_ENV + else + echo "PRERELEASE=true" >> $GITHUB_ENV + fi + tagName="v${{ github.event.inputs.release_version }}" + echo "TAG_NAME=${tagName}" >> $GITHUB_ENV + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Checkout tag + uses: actions/checkout@v2 + with: + ref: ${{ env.TAG_NAME }} + token: ${{ secrets.YSTIA_BOT_TOKEN }} + + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + + - name: Install sphinx dependencies + run: | + pip install --user --upgrade sphinx==1.8.1 semantic-version requests urllib3[secure]==1.23 Pygments>=2.7.1 + pip install -r doc/requirements.txt + sudo apt-get install -y jq \ + latexmk \ + texlive-binaries \ + texlive-fonts-recommended \ + texlive-latex-base \ + texlive-latex-extra \ + texlive-latex-recommended + + - name: Setup go + uses: actions/setup-go@v1 + with: + go-version: "1" + + - name: Generate distribution and changelog + run: | + set -euo pipefail + make tools + SKIP_TESTS=1 make dist + # Generate changelog + awk '{f=1} f{ if (/^## / && i++>=1) exit; else print $0}' CHANGELOG.md | tee CHANGELOG-for-version.md + + + - name: Create or Update Github Release draft + id: update_release + uses: loicalbertin/action-gh-release@080e2e752ac77817dcfd2e8809873bdc24817584 + with: + tag_name: ${{ env.TAG_NAME }} + body_path: CHANGELOG-for-version.md + name: ${{ env.TAG_NAME }} + prerelease: ${{ env.PRERELEASE }} + draft: true + files: | + dist/yorc-*.tgz + dist/yorc-server*-distrib.zip + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Publish Github Release + uses: eregon/publish-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + release_id: ${{ steps.update_release.outputs.id }} diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 6b1e3fd36..000000000 --- a/.travis.yml +++ /dev/null @@ -1,65 +0,0 @@ -language: go -go: - - stable - -dist: xenial -sudo: required - -services: - - docker - -env: - global: - # Make GO111MODULE=on globally for codecov - - GO111MODULE=on - # Docker hub key (DOCKER_HUB_USER) - - secure: j6S26JA04B+6bwr6detZauFu/UVPwmPhWvEcbVfNhvnNX3YKma8D2X9hvE6jDUOKeapiE/UsCHVgg8GFyPGu4M0ixUXUmu7HPNHFQ2x4a0tDgTyu0p9im5fPeJv4qbV+ORuE4Kvg54ZaZec3iBN94MegPVISpme86pMKdJui0cMEy/YPMUd1sh13h95WkESAshJd0n5AHO4xwD0NJjCK2waoA6ygvcwXhIpfTc9HsNgs6S8WVpRAfjkTj7+VGjoXTqov5g9d6SxBYvcJI/iBa0KgY7LhBRiC6AsE2WowVZeGDpnvL1nFsA2DQKcCC6Tv4VlP7jcyZMWcnELn8n6ucsHzT52bEhGu60KtZc5eohqW/1Ejb0riHvEJQMy875keBwIjuzERgxRaKEVGPQ3nZFu/rjEjHZSm0qi9+usb/vBcdKeu9fgjBeEDIkL5bE632P379VLI1bzzQS+dt+sPw1gqqDP+FJ1nED5r3g4zSPh4WqXAz4ohrX8CCqbxpy+Wjirer/yc7S+Bqx/iI2gKjdb6kwy1xePobFJyAMlOyHrwh5Tb6K23wJqVMoHL5mqG0Ent/3iXmvRS6PxsxwNPscyATUbazyGwXMj4oMWzcfs7yf0ADy/nbZLYUPVfHuysRZ6LAjptn+V8bdJ9A0rQmFUFDcP6VmMidxRlTcHR8yA= - # Docker hub key (DOCKER_HUB_PASS) - - secure: "ZNF+PdDD1D3RJN6/ENFt9jZL+QQp1s9sdOMyQBRPPmc/oBOh0j6ELBpvQTgR7JnhzlUyYvfAk1IXPTJPEwoQ++WmAumdGUu6mEqemN//CggpYNtDRc3vnQfRrfWK5M9pVtODSwtMLT0WENkMURCUPBmlGcISjdXrHAAsLiiJxDalM6rBiPDnDX/yawvfJh2mNVkqi+zwJWGdEZibcNdU8Bdz5vU5eJJFxlR+ZYRm2ccLE/sgvArofovcv32MSy9DnvI15Q8kolTS8+3k6eOTvckijgJizYOhRyAZQRrLeF3taYw5GtTRwjs2UFAkxgbx/NrgSHEe/e+31k5Q4sUFJJr3iIilMC+VCVpIDnFbDXJJR8VTXLG3Xjb4NZycGU5wo2PUGXedR+vR+NUVw2E2LH6L1aJBse4lHba1yRh+255jaiebg5iocOnsiMfRtboLKDy0uNoXaAsI/wGNSlxGncXS9euHYPTXp595TKUPyfEUwF5NVvihHAUJOagr4c/bbLe6EHbmY7XFJHdqX/jxul13RQjpEHmVC6ZlpPlRqWIZVrfG1B4SF/m6NVVLc+8dz1xPb2FWmkjiijtElQhAqbzImW1GPf1QiX3jlHWDxE3DdUdy4kbh10zyh66gqWRnfkS3pVJaB2wLHlfZc7GU+eFgs78b3yk9KZivPOaTAtU=" - # ARTIFACTORY_API_KEY - - secure: "Zt5jCb10K4mnjqL0Bx5pR+qvexrLDz3nIk/IaOjL+QfHQI5w3uXv9qc+ultgsYpdT4ORmN235kJia294UTa+N/F/aJwCS4y+gBlssDkABz0D2/dSSK9iMjdUxyv2lg4NT6yUzefYI+nWziJqhVDg3tgcxy61ENn2c+AWHeEYrRyfgbuvehuyl56ZKggJe1WJkaHSRd1UVc/7i/pPo2nomaaWF8jjDxcqlIiV1onqVzb5p/79psuFZyHKH9Xr59lNsLDPEunyXQu0U8Y3ivV4gCL2+GyiZz7INpPemu5IQtA2luViepJcyRWAZOitL+vTScJgeV4k/OKZWHtNAspH4aiRFc3xVV0xe8itoHvnRN4IMJtEAqD4ODZ1XEyGovUkozoLLTfM20fXfLujLT+g9EjtmSWFap5CdvELs2foPYyiKjx2tB35Km/Pg9DgTMn5lsjMNPOm9N6QhHYCrMm095CceYHFZsfqIfONLJSd5RssnNrScsqGbS5BqKuqza8ffH6BvdoBwJauJwGvSBakaOpEtkCs+Sl4RVCtajTPMG/KgatCUk0HJELIUJyj83LpTUA8K85WcTh8dYb/0K2Lo5TU4Hd9pa8u3KbHtf3CVEmxIaece7rDiwuzQK0DTB4E599u5FJiG7tPaMG+Hvll98cn1czcqlYSU6jgn/nTeC8=" - -install: - - make tools - - pip install --user --upgrade sphinx==1.8.1 semantic-version requests urllib3[secure]==1.23 - -script: - # Test and compute coverage - - TESTARGS="-coverprofile coverage-sonar.out -coverpkg=./..." make json-test - - ./build/travis-sonar.sh - # Generate distribution - - SKIP_TESTS=1 make dist - - "./docker_build.sh" - - echo "Deploying artifacts" - - bash "./build/deploy_artifactory.sh" - -before_deploy: - - "bash ./build/pre_bintray_release.sh" - -deploy: - - provider: bintray - file: "build/bintray_release.json" - user: "loicalbertin" - key: - secure: "qC48VD6cU2jxx1Px+jsu3s3D7Qz9dAMSrNwLEoV4h+qG0TYR/2qLuHQIWj4amO4aLXjDQKxg8YcUb5Z63a8AKDoaXH5kiWoYt3+zNj0Sv5C9kv+/DqA4T/G63mOZXxbDkD/3WYxslZhnB4R4/qMhK+yyZNRp7BmuO1IDj320fTyZqBd4ZoHM29ihOHIr/+GRENXY+VSHFvyiZ7JMOiUwWVyR/8miBaNLblQqU5vTy0HdJmuJD4jlNaS68pnvuhSnIGuVHuYbdo9BOHemw1XYCt7T3te8C1CkMk9eGhuBlhxlFDZeKInqioaquoD7dcz7kw1tvfD5kM/XrZ4fw+E2yOP3ZY9bIkHzh9kFh+mknT3VHQ7K8BWT5OPHLoFmTtdld9q96PRVvBQMiBssckBqnxD/MFiym/498L4nN7R6E4yydkHeH9RWkPn7LMjfGJl/GbkThGXg4aViNbs0a9XpVGl+TcKKY7zZdh+Wj/OvEHZZbpmm44EcnMcyE04AMyhgVqEipB61FhIMDXWwlQRJX0wF+YKMJo0BfDjU2YEeNYL87bhslQQf4z46ZHL9EAAaqq74r5KI6ivvLK8hYpYRSkS0l3DOmNunbnfw38MxHNTZUMer7pD8quRhdHCBiSwPbj/FIKeY4/Ujt66evkASqEkKR20y1MYmE3N1VI0DusE=" - skip_cleanup: true - on: - tags: true - -addons: - apt: - packages: - - jq - - latexmk - - texlive-binaries - - texlive-fonts-recommended - - texlive-latex-base - - texlive-latex-extra - - texlive-latex-recommended - - sonarcloud: - organization: "ystia" - -cache: - directories: - - '$HOME/.sonar/cache' diff --git a/CHANGELOG.md b/CHANGELOG.md index b18520a44..ad89d3a71 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,26 @@ ## UNRELEASED +### ENHANCEMENTS + +* Add the ability to define OpenStack Compute Instance user_data ([GH-735](https://github.com/ystia/yorc/issues/735)) + +### BUG FIXES + +* Workflow with asynchronous action never stops after another step failure ([GH-733](https://github.com/ystia/yorc/issues/733)) + +## 4.2.0-milestone.1 (May 06, 2021) + +### ENHANCEMENTS + +* Support Alien4Cloud 3.2.0 ([GH-723](https://github.com/ystia/yorc/issues/723)) + +### BUG FIXES + +* Can't bootstrap Yorc as BinTray is now unavailable ([GH-727](https://github.com/ystia/yorc/issues/727)) + +## 4.1.0 (April 11, 2021) + ### DEPENDENCIES * The orchestrator requires now at least Ansible 2.10.0 (upgrade from 2.7.9 introduced in [GH-648](https://github.com/ystia/yorc/issues/648)) @@ -20,6 +40,7 @@ ### ENHANCEMENTS +* Alllow shards and replicas configuration for Elastic storage ([GH-722](https://github.com/ystia/yorc/issues/722)) * Add a new synchronous purge API endpoint ([GH-707](https://github.com/ystia/yorc/issues/707)) * Should be able to specify the type of volume when creating an openstack instance ([GH-703](https://github.com/ystia/yorc/issues/703)) * Support ssh connection retries ([GH-688](https://github.com/ystia/yorc/issues/688)) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e008829c..a4da98ca7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -96,3 +96,17 @@ If you are having trouble getting into the mood of idiomatic Go, we recommend reading through [Effective Go](https://golang.org/doc/effective_go.html). The [Go Blog](https://blog.golang.org) is also a great resource. Drinking the kool-aid is a lot easier than going thirsty. + +## Release Yorc + +Releases are now handled by a [GitHub Action Workflow](https://github.com/ystia/yorc/actions/workflows/release.yml). +Contributors with `members` role on this project can trigger this workflow. it requires as an input the release version +in semver format (leading 'v' should be omitted). + +This workflow will: + +1. call the `build/release.sh` script +2. checkout the generated tag +3. generate the distribution and a changelog for this version +4. create a GH Release and upload assets +5. publish the GH Release diff --git a/README.md b/README.md index 517e359cb..1d5386548 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Ystia Orchestrator -[![Download](https://api.bintray.com/packages/ystia/yorc-engine/distributions/images/download.svg?version=4.0.0)](https://bintray.com/ystia/yorc-engine/distributions/4.0.0/link) [![Build Status](https://travis-ci.org/ystia/yorc.svg?branch=release/4.0)](https://travis-ci.org/ystia/yorc) [![Documentation Status](https://readthedocs.org/projects/yorc/badge/?version=latest)](http://yorc.readthedocs.io/en/latest/?badge=latest) [![Go Report Card](https://goreportcard.com/badge/github.com/ystia/yorc)](https://goreportcard.com/report/github.com/ystia/yorc) [![license](https://img.shields.io/github/license/ystia/yorc.svg)](https://github.com/ystia/yorc/blob/develop/LICENSE) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) [![Docker Pulls](https://img.shields.io/docker/pulls/ystia/yorc.svg?style=flat)](https://hub.docker.com/r/ystia/yorc) [![Join the chat at https://gitter.im/ystia/yorc](https://badges.gitter.im/ystia/yorc.svg)](https://gitter.im/ystia/yorc?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Download](https://img.shields.io/badge/download-v4.2.0--milestone.1-blue)](https://github.com/ystia/yorc/releases/tag/v4.2.0-milestone.1) [![Build Status](https://github.com/ystia/yorc/actions/workflows/build.yml/badge.svg?branch=develop)](https://github.com/ystia/yorc/actions) [![Documentation Status](https://readthedocs.org/projects/yorc/badge/?version=latest)](http://yorc.readthedocs.io/en/latest/?badge=latest) [![Go Report Card](https://goreportcard.com/badge/github.com/ystia/yorc)](https://goreportcard.com/report/github.com/ystia/yorc) [![license](https://img.shields.io/github/license/ystia/yorc.svg)](https://github.com/ystia/yorc/blob/develop/LICENSE) [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat-square)](http://makeapullrequest.com) [![Docker Pulls](https://img.shields.io/docker/pulls/ystia/yorc.svg?style=flat)](https://hub.docker.com/r/ystia/yorc) [![Join the chat at https://gitter.im/ystia/yorc](https://badges.gitter.im/ystia/yorc.svg)](https://gitter.im/ystia/yorc?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)

@@ -24,9 +24,9 @@ Yorc is now the official orchestrator for Alien4Cloud and Alien4Cloud distributi ## How to download the Ystia Orchestrator -Yorc releases can be downloaded from our [BinTray account](https://bintray.com/ystia/yorc-engine/distributions). +Yorc releases can be downloaded from our [GitHub Release](https://github.com/ystia/yorc/releases). -Grab the [latest release here](https://bintray.com/ystia/yorc-engine/distributions/_latestVersion). +Grab the [latest release here](https://github.com/ystia/yorc/releases/latest). Docker images could be found on [Docker Hub](https://hub.docker.com/r/ystia/yorc). diff --git a/SECURITY.md b/SECURITY.md index 03bf4c004..1184de535 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -6,8 +6,9 @@ This section describes which versions of Yorc are currently being supported with | Version | Supported | | ------- | ------------------ | +| 4.1.x | :white_check_mark: | | 4.0.x | :white_check_mark: | -| 3.2.x | :white_check_mark: | +| 3.2.x | :x: | | < 3.2 | :x: | ## Vulnerabilities in Yorc diff --git a/build/bintray_release.json.tpl b/build/bintray_release.json.tpl deleted file mode 100644 index 6f8db016a..000000000 --- a/build/bintray_release.json.tpl +++ /dev/null @@ -1,36 +0,0 @@ -{ - "package": { - "name": "distributions", - "repo": "yorc-engine", - "subject": "ystia", - "desc": "Ystia Orchestrator distributions and documentations", - "website_url": "https://ystia.github.io/", - "issue_tracker_url": "https://github.com/ystia/yorc/issues", - "vcs_url": "https://github.com/ystia/yorc", - "github_use_tag_release_notes": false, - "github_release_notes_file": "CHANGELOG.md", - "licenses": ["Apache-2.0"], - "labels": [], - "public_download_numbers": true, - "public_stats": false, - "attributes": [] - }, - - "version": { - "name": "${VERSION_NAME}", - "desc": "Ystia Orchestrator distributions and documentations ${VERSION_NAME}", - "released": "${RELEASE_DATE}", - "vcs_tag": "${TAG_NAME}", - "attributes": [], - "gpgSign": false - }, - - "files": - [ - {"includePattern": "dist/(yorc-.*\\.tgz)", "uploadPattern": "${VERSION_NAME}/$1"}, - {"includePattern": "dist/(yorc-server-.*-distrib\\.zip)", "uploadPattern": "${VERSION_NAME}/$1"}, - {"includePattern": "pkg/(docker-ystia-yorc-.*\\.tgz)", "uploadPattern": "${VERSION_NAME}/$1"} - ], - "publish": true -} - diff --git a/build/deploy_artifactory.sh b/build/deploy_artifactory.sh index d368d40ab..7826dc157 100755 --- a/build/deploy_artifactory.sh +++ b/build/deploy_artifactory.sh @@ -13,43 +13,36 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -euo pipefail scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" rootDir=$(readlink -f "${scriptDir}/..") -if [[ "${TRAVIS}" != "true" ]] ; then +if [[ "${GITHUB_ACTIONS}" != "true" ]] ; then echo "This script is designed to publish CI build artifacts" exit 0 fi -if [[ "${DISABLE_ARTIFACTORY}" == "true" ]] ; then +if [[ "${DISABLE_ARTIFACTORY:=false}" == "true" ]] ; then echo "Skipping Artifactory publication" exit 0 fi -if [[ "${TRAVIS_PULL_REQUEST}" != "false" ]] && [[ -z "${ARTIFACTORY_API_KEY}" ]] ; then - echo "Building an external pull request, artifactory publication is disabled" - exit 0 -fi - -if [[ -n "${TRAVIS_TAG}" ]] ; then - deploy_path="yorc-engine-product-ystia-dist/ystia/yorc/dist/${TRAVIS_TAG}/{1}" -elif [[ "${TRAVIS_PULL_REQUEST}" != "false" ]]; then - deploy_path="yorc-bin-dev-local/ystia/yorc/dist/PR-${TRAVIS_PULL_REQUEST}/{1}" +ref="${GITHUB_REF#refs/*/}" +if [[ "${GITHUB_REF}" == refs/tags/* ]] ; then + deploy_path="yorc-engine-product-ystia-dist/ystia/yorc/dist/${ref}/{1}" +elif [[ "${GITHUB_REF}" == refs/pull/* ]] ; then + # For PRs ref is different + ref=$(echo "${GITHUB_REF}" | awk -F / '{print $3;}') + deploy_path="yorc-bin-dev-local/ystia/yorc/dist/PR-${ref}/{1}" else - deploy_path="yorc-bin-dev-local/ystia/yorc/dist/${TRAVIS_BRANCH}/{1}" + deploy_path="yorc-bin-dev-local/ystia/yorc/dist/${ref}/{1}" fi -curl -fL https://getcli.jfrog.io | sh - -build_name="yorc-travis-ci" - -# Disabling interactive mode as config ask for a question about client certificates we do not use -./jfrog rt c --interactive=false --apikey="${ARTIFACTORY_API_KEY}" --user=travis --url=https://ystia.jfrog.io/ystia ystia +cd "${rootDir}" -./jfrog rt u --build-name="${build_name}" --build-number="${TRAVIS_BUILD_NUMBER}" --props="artifactory.licenses=Apache-2.0" --regexp "dist/(yorc-.*.tgz)" "${deploy_path}" -./jfrog rt u --build-name="${build_name}" --build-number="${TRAVIS_BUILD_NUMBER}" --props="artifactory.licenses=Apache-2.0" --regexp "dist/(yorc-server.*-distrib.zip)" "${deploy_path}" -# Do not publish environment variables as it may expose some secrets -#./jfrog rt bce "${build_name}" "${TRAVIS_BUILD_NUMBER}" -./jfrog rt bag "${build_name}" "${TRAVIS_BUILD_NUMBER}" "${rootDir}" -./jfrog rt bp "${build_name}" "${TRAVIS_BUILD_NUMBER}" +jfrog rt u --target-props="artifactory.licenses=Apache-2.0" --regexp "dist/(yorc-.*.tgz)" "${deploy_path}" +jfrog rt u --target-props="artifactory.licenses=Apache-2.0" --regexp "dist/(yorc-server.*-distrib.zip)" "${deploy_path}" +jfrog rt bce +jfrog rt bag +jfrog rt bp diff --git a/build/gh-action-cleanup-artifactory.sh b/build/gh-action-cleanup-artifactory.sh new file mode 100755 index 000000000..ca59e9411 --- /dev/null +++ b/build/gh-action-cleanup-artifactory.sh @@ -0,0 +1,58 @@ +#!/usr/bin/env bash +# Copyright 2019 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -euo pipefail + +comp_date=$(date --date="${FROM_DATE:=14 days ago}" +"%Y-%m-%dT%H:%M:%S.000Z") + +local_bin_dist_path="yorc-bin-dev-local/ystia/yorc/dist" +local_docker_path="yorc-docker-dev-local/ystia/yorc" + +bin_paths=$(jfrog rt s "${local_bin_dist_path}/*/yorc-*.tgz" --limit 0 | jq -r ".[]| [.modified, .path] | @tsv" | sed -e "s@\(${local_bin_dist_path}/\(.*\)\)/yorc-.*\.tgz@\2\t\1@g") +docker_paths=$(jfrog rt s "${local_docker_path}/PR-*/manifest.json" --limit 0 | jq -r ".[]| [.modified, .path] | @tsv" | sed -e "s@\(${local_docker_path}/\(PR-.*\)\)/manifest.json@\2\t\1@g") + +all_paths=$(echo -e "${bin_paths}\n${docker_paths}" | sort) + +function get_pr_state() { + gh pr view "${1}" --json state | jq -r ".state" +} + +function does_branch_exit() { + gh api --silent "/repos/:owner/:repo/branches/${1}" 2> /dev/null + return $? +} + +function delete_artifactory_path() { + jfrog rt del --quiet "${1}" || echo "failed to delete ${1}" +} + +echo "${all_paths}" | while read line ; do + item_date=$(echo "$line" | awk '{print $1}') + if [[ "${item_date}" > "${comp_date}" ]] ; then + continue + fi + ref=$(echo "${line}" | awk -F '\t' '{print $2}') + artifact_path=$(echo "${line}" | awk -F '\t' '{print $3}') + if [[ "${ref}" == PR-* ]] ; then + if [[ "$(get_pr_state "${ref##*PR-}")" != "OPEN" ]] ; then + delete_artifactory_path "${artifact_path}" + fi + else + if ! does_branch_exit "${ref}" ; then + delete_artifactory_path "${artifact_path}" + fi + fi +done diff --git a/build/pre_bintray_release.sh b/build/pre_bintray_release.sh deleted file mode 100755 index f6ce4ba04..000000000 --- a/build/pre_bintray_release.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -e - -scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -if [[ -z "${TRAVIS_TAG}" ]]; then - echo "not a travis release build, no need to publish it on bintray. Exiting." - exit 0 -fi - -TAG_NAME="${TRAVIS_TAG}" -VERSION_NAME="${TAG_NAME#v*}" -RELEASE_DATE="$(git tag -l --format='%(creatordate:short)' "${TAG_NAME}")" - -export TAG_NAME VERSION_NAME RELEASE_DATE -envsubst < "${scriptDir}/bintray_release.json.tpl" > "${scriptDir}/bintray_release.json" - -echo "Resulting bintray release spec" -cat "${scriptDir}/bintray_release.json" diff --git a/build/release.sh b/build/release.sh index 3d7fd8b02..e88a4d2af 100755 --- a/build/release.sh +++ b/build/release.sh @@ -14,7 +14,7 @@ # limitations under the License. #set -x -set -e +set -eo pipefail scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" componentVersionName="yorc_version" @@ -56,13 +56,13 @@ if [[ -z "${version}" ]]; then exit 1 fi -if [[ "$(python -c "import semantic_version; print semantic_version.validate('${version}')" )" != "True" ]]; then +if [[ "$(python -c "import semantic_version; print(semantic_version.validate('${version}'))" )" != "True" ]]; then echo "Parameter -v should be a semver 2.0 compatible version (http://semver.org/)" >&2 exit 1 fi # read version -read -r major minor patch prerelease build <<< $(python -c "import semantic_version; v = semantic_version.Version('${version}'); print v.major, v.minor, v.patch, '.'.join(v.prerelease), '.'.join(v.build);") +read -r major minor patch prerelease build <<< $(python -c "import semantic_version; v = semantic_version.Version('${version}'); print(v.major, v.minor, v.patch, '.'.join(v.prerelease), '.'.join(v.build));") # Detect correct supporting branch branch=$(git branch --list -r "*/release/${major}.${minor}") @@ -79,7 +79,7 @@ if [[ -e versions.yaml ]]; then currentVersion=$(grep "${componentVersionName}:" versions.yaml | head -1 | sed -e 's/^[^:]\+:\s*\(.*\)\s*$/\1/') # Change -SNAPSHOT into -0 for comparaison as a snapshot is never revelant checkVers=$(echo ${currentVersion} | sed -e "s/-SNAPSHOT/-0/") - if [[ "True" != "$(python -c "import semantic_version; print semantic_version.Version('${version}') >= semantic_version.Version('${checkVers}')" )" ]]; then + if [[ "True" != "$(python -c "import semantic_version; print(semantic_version.Version('${version}') >= semantic_version.Version('${checkVers}'))" )" ]]; then echo "Warning: releasing version ${version} on top of branch ${branch} while its current version is ${currentVersion}" >&2 read -p "Are you sure? [y/N]" CONFIRM if [[ "${CONFIRM}" != "y" && "${CONFIRM}" != "Y" ]] ; then @@ -94,7 +94,7 @@ branchTag=$(git describe --abbrev=0 --tags ${branch}) || { } branchTag=$(echo $branchTag | sed -e 's/^v\(.*\)$/\1/') -if [[ "True" != "$(python -c "import semantic_version; print semantic_version.Version('${version}') > semantic_version.Version('${branchTag}')" )" ]]; then +if [[ "True" != "$(python -c "import semantic_version; print(semantic_version.Version('${version}') > semantic_version.Version('${branchTag}'))" )" ]]; then echo "Warning: releasing version ${version} on top of branch ${branch} while it contains a newer tag: ${branchTag}" >&2 read -p "Are you sure? [y/N]" CONFIRM if [[ "${CONFIRM}" != "y" && "${CONFIRM}" != "Y" ]] ; then @@ -107,7 +107,7 @@ if [[ "develop" == "${branch}" ]] && [[ -z "${prerelease}" ]]; then releaseBranch="release/${major}.${minor}" git checkout -b "${releaseBranch}" sed -i -e "s@svg?branch=[^)]*@svg?branch=${releaseBranch}@g" README.md - git commit -m "Update travis links in readme for release ${version}" README.md + git commit -m "Update CI links in readme for release ${version}" README.md fi # Now checks are passed then tag, build, release and cleanup :) @@ -115,7 +115,9 @@ cherries=() # Update changelog Release date sed -i -e "s/^## UNRELEASED.*$/## ${version} ($(LC_ALL=C date +'%B %d, %Y'))/g" CHANGELOG.md # Update readme for Release number -sed -i -e "s@download.svg?version=[^)]*@download.svg?version=${version}@g" -e "s@distributions/[^/]*/link@distributions/${version}/link@g" README.md +versionShield=$(echo "${version}" | sed -e 's/-/--/g') +sed -i -e "s@https://img.shields.io/badge/download-[^-]*-blue@https://img.shields.io/badge/download-v${versionShield}-blue@g" \ + -e "s@releases/tag/[^)]*@releases/tag/v${version}@g" README.md git commit -m "Update changelog and readme for release ${version}" CHANGELOG.md README.md cherries+=("$(git log -1 --pretty=format:"%h")") @@ -137,7 +139,7 @@ if [[ -e versions.yaml ]]; then nextDevelopmentVersion="" if [[ -z "${prerelease}" ]]; then # We are releasing a final version - nextDevelopmentVersion=$(python -c "import semantic_version; v=semantic_version.Version('${version}'); print v.next_patch()" ) + nextDevelopmentVersion=$(python -c "import semantic_version; v=semantic_version.Version('${version}'); print(v.next_patch())" ) nextDevelopmentVersion="${nextDevelopmentVersion}-SNAPSHOT" else # in prerelease revert to version minus prerelease plus -SNAPSHOT @@ -159,7 +161,7 @@ if [[ "develop" == "${branch}" ]] && [[ -z "${prerelease}" ]]; then if [[ -e versions.yaml ]]; then # Update version - nextDevelopmentVersion=$(python -c "import semantic_version; v=semantic_version.Version('${version}'); print v.next_minor()" ) + nextDevelopmentVersion=$(python -c "import semantic_version; v=semantic_version.Version('${version}'); print(v.next_minor())" ) nextDevelopmentVersion="${nextDevelopmentVersion}-SNAPSHOT" sed -i -e "/${componentVersionName}: /c${componentVersionName}: ${nextDevelopmentVersion}" versions.yaml git commit -m "Prepare for next development cycle ${nextDevelopmentVersion}" versions.yaml @@ -168,12 +170,12 @@ fi if [[ -z "${prerelease}" ]]; then # Merge on master only final version - masterTag=$(git describe --abbrev=0 --tags master) || { + masterTag=$(git describe --abbrev=0 --tags origin/master) || { masterTag="v0.0.0" } masterTag=$(echo ${masterTag} | sed -e 's/^v\(.*\)$/\1/') - if [[ "True" == "$(python -c "import semantic_version; print semantic_version.Version('${version}') > semantic_version.Version('${masterTag}')" )" ]]; then + if [[ "True" == "$(python -c "import semantic_version; print(semantic_version.Version('${version}') > semantic_version.Version('${masterTag}'))" )" ]]; then # We should merge the tag to master as it is our highest release git checkout master git merge --no-ff "v${version}" -X theirs -m "merging latest tag v${version} into master" || { diff --git a/build/travis-sonar.sh b/build/travis-sonar.sh deleted file mode 100755 index 766d14e72..000000000 --- a/build/travis-sonar.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# Copyright 2019 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -scriptDir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -if [[ -z "${SONAR_TOKEN}" ]] ; then - echo "No sonar token detected, we are probably building an external PR, lets skip sonar publication..." - exit 0 -fi - -cd "${scriptDir}/.." || { echo "failed to move to yorc directory ${scriptDir}/.."; exit 1; } -sed -i -e "s@$(go list)@github.com/ystia/yorc@g" coverage-sonar.out -git fetch --no-tags origin "+refs/heads/develop:refs/remotes/origin/develop" "+refs/heads/release/*:refs/remotes/origin/release/*" -git fetch --unshallow --quiet -sonar-scanner --define "sonar.projectVersion=$(grep "yorc_version" versions.yaml | awk '{print $2}')" diff --git a/commands/bootstrap/inputs.go b/commands/bootstrap/inputs.go index 96c2b8112..afb51238b 100644 --- a/commands/bootstrap/inputs.go +++ b/commands/bootstrap/inputs.go @@ -178,11 +178,11 @@ var ( jdkDefaultInputs = map[string]defaultInputType{ "jdk.download_url": defaultInputType{ description: "Java Development Kit download URL", - value: "https://api.adoptopenjdk.net/v2/binary/releases/openjdk8?openjdk_impl=hotspot&os=linux&arch=x64&release=jdk8u212-b03&type=jdk", + value: "https://github.com/AdoptOpenJDK/openjdk15-binaries/releases/download/jdk-15.0.2%2B7/OpenJDK15U-jdk_x64_linux_hotspot_15.0.2_7.tar.gz", }, "jdk.version": defaultInputType{ - description: "Java Development Kit version", - value: "1.8.0-212-b03", + description: "OpenJDK version", + value: "15.0.2", }, } @@ -1552,7 +1552,7 @@ func getYorcDownloadURL() string { yorcVersion) } else { downloadURL = fmt.Sprintf( - "https://dl.bintray.com/ystia/yorc-engine/%s/yorc-%s.tgz", + "https://github.com/ystia/yorc/releases/download/v%s/yorc-%s.tgz", yorcVersion, yorcVersion) } return downloadURL diff --git a/commands/bootstrap/resources/topology/tosca_types.zip b/commands/bootstrap/resources/topology/tosca_types.zip index e2089ab18..71620a04c 100644 Binary files a/commands/bootstrap/resources/topology/tosca_types.zip and b/commands/bootstrap/resources/topology/tosca_types.zip differ diff --git a/data/tosca/yorc-openstack-types.yml b/data/tosca/yorc-openstack-types.yml index 0c0f0ef13..eda6cd3f7 100644 --- a/data/tosca/yorc-openstack-types.yml +++ b/data/tosca/yorc-openstack-types.yml @@ -3,7 +3,7 @@ tosca_definitions_version: yorc_tosca_simple_yaml_1_0 metadata: template_name: yorc-openstack-types template_author: yorc - template_version: 1.2.1 + template_version: 1.3.0 imports: - yorc: @@ -144,6 +144,10 @@ node_types: entry_schema: type: string required: false + user_data: + type: string + description: User data to provide when launching the instance + required: false requirements: - group: capability: yorc.capabilities.Group diff --git a/doc/conf.py b/doc/conf.py index a81914d26..441814b37 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -53,19 +53,19 @@ master_doc = 'index' # General information about the project. -project = u'Yorc' +project = 'Yorc' copyright=str(date.today().year) -copyright += u', Atos BDS R&D' -author = u'Atos BDS R&D' +copyright += ', Atos BDS R&D' +author = 'Atos BDS R&D' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'T.B.D' +version = 'T.B.D' # The full version, including alpha/beta/rc tags. -release = u'T.B.D' +release = 'T.B.D' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -242,8 +242,8 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'Yorc.tex', u'Yorc Documentation', - u'Atos BDS R\\&D', 'manual'), + (master_doc, 'Yorc.tex', 'Yorc Documentation', + 'Atos BDS R\\&D', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of @@ -272,7 +272,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'yorc', u'Yorc Documentation', + (master_doc, 'yorc', 'Yorc Documentation', [author], 1) ] @@ -286,7 +286,7 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'Yorc', u'Yorc Documentation', + (master_doc, 'Yorc', 'Yorc Documentation', author, 'Yorc', 'One line description of project.', 'Miscellaneous'), ] @@ -309,14 +309,15 @@ if os.path.exists('../versions.yaml'): - versions_file = yaml.load(file('../versions.yaml', 'r')) - rst_epilog = '' - for product in versions_file: - rst_epilog += ".. |" + product + "| replace:: " + versions_file[product] + "\n" - m = re.search(r"^(([0-9.]+)[^.]+)$", versions_file['yorc_version']) - if m: - release = m.group(1) - version = m.group(2) + with open('../versions.yaml', 'r') as file: + versions_file = yaml.load(file, Loader=yaml.SafeLoader) + rst_epilog = '' + for product in versions_file: + rst_epilog += ".. |" + product + "| replace:: " + versions_file[product] + "\n" + m = re.search(r"^(([0-9.]+)[^.]+)$", versions_file['yorc_version']) + if m: + release = m.group(1) + version = m.group(2) # print rst_epilog # print release diff --git a/doc/configuration.rst b/doc/configuration.rst index 774e5b8b9..71ba55982 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1459,7 +1459,7 @@ This store ables you to store ``Log`` s and ``Event`` s in elasticsearch. | ``key_path`` | path to a PEM encoded private key file when TLS | string | no | | | | is activated for ES | | | | +-----------------------------+----------------------------------------------------+-----------+------------------+-----------------+ -| ``index_prefix`` | indexes used by yorc can be prefixed | string | no | yorc_ | +| ``index_prefix`` | indexes used by yorc can be prefixed | string | no | yorc\_ | +-----------------------------+----------------------------------------------------+-----------+------------------+-----------------+ | ``es_query_period`` | when querying logs and event, we wait this timeout | duration | no | 4s | | | before each request when it returns nothing (until | | | | @@ -1488,6 +1488,10 @@ This store ables you to store ``Log`` s and ``Event`` s in elasticsearch. +-----------------------------+----------------------------------------------------+-----------+------------------+-----------------+ | ``trace_events`` | to trace events & logs when sent (for debug only) | bool | no | false | +-----------------------------+----------------------------------------------------+-----------+------------------+-----------------+ +| ``initial_shards`` | number of shards used to initialize indices | int64 | no | | ++-----------------------------+----------------------------------------------------+-----------+------------------+-----------------+ +| ``initial_replicas`` | number of replicas used to initialize indices | int64 | no | | ++-----------------------------+----------------------------------------------------+-----------+------------------+-----------------+ Vault configuration diff --git a/doc/requirements.txt b/doc/requirements.txt index 0a7abe077..c0822dc38 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1 +1 @@ -PyYAML>=4.2b1,<5.0 +PyYAML>=5.4 diff --git a/docker_build.sh b/docker_build.sh index f767a15ee..3fb8b749b 100755 --- a/docker_build.sh +++ b/docker_build.sh @@ -1,12 +1,12 @@ #!/usr/bin/env bash # Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France. -# +# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -38,22 +38,28 @@ tf_aws_plugin_version=$(grep tf_aws_plugin_version ${script_dir}/versions.yaml | tf_openstack_plugin_version=$(grep tf_openstack_plugin_version ${script_dir}/versions.yaml | awk '{print $2}') tf_google_plugin_version=$(grep tf_google_plugin_version ${script_dir}/versions.yaml | awk '{print $2}') -if [[ "${TRAVIS}" == "true" ]]; then - if [[ "${TRAVIS_PULL_REQUEST}" == "false" ]] ; then - if [[ -n "${TRAVIS_TAG}" ]] ; then - DOCKER_TAG="$(echo "${TRAVIS_TAG}" | sed -e 's/^v\(.*\)$/\1/')" - else - case ${TRAVIS_BRANCH} in - develop) - DOCKER_TAG="latest";; - *) - # Do not build a container for other branches - echo "No container is built for other branches than develop." - exit 0;; - esac - fi - else - DOCKER_TAG="PR-${TRAVIS_PULL_REQUEST}" +CI_TAG="" +CI_PULL_REQUEST="" +CI_BRANCH="" + +if [[ "${GITHUB_ACTIONS}" == "true" ]] ; then + ref="${GITHUB_REF#refs/*/}" + if [[ "${GITHUB_REF}" == refs/tags/* ]] ; then + CI_TAG="${ref}" + DOCKER_TAG="$(echo "${CI_TAG}" | sed -e 's/^v\(.*\)$/\1/')" + elif [[ "${GITHUB_REF}" == refs/pull/* ]] ; then + CI_PULL_REQUEST="$(echo "${GITHUB_REF}" | awk -F / '{print $3;}')" + DOCKER_TAG="PR-${CI_PULL_REQUEST}" + else + CI_BRANCH="${ref}" + case ${CI_BRANCH} in + develop) + DOCKER_TAG="latest";; + *) + # Do not build a container for other branches + echo "No container is built for other branches than develop." + exit 0;; + esac fi fi @@ -71,18 +77,12 @@ docker build ${BUILD_ARGS} \ --build-arg "TF_GOOGLE_PLUGIN_VERSION=${tf_google_plugin_version}" \ -t "ystia/yorc:${DOCKER_TAG:-latest}" . -if [[ "${TRAVIS}" == "true" ]]; then +if [[ "${GITHUB_ACTIONS}" == "true" ]]; then docker save "ystia/yorc:${DOCKER_TAG:-latest}" | gzip > docker-ystia-yorc-${DOCKER_TAG:-latest}.tgz ls -lh docker-ystia-yorc-${DOCKER_TAG:-latest}.tgz - if [[ "${TRAVIS_PULL_REQUEST}" != "false" ]] && [[ -z "${ARTIFACTORY_API_KEY}" ]] ; then - echo "Building an external pull request, artifactory publication is disabled" - exit 0 - fi - - if [[ -n "${TRAVIS_TAG}" ]] && [[ "${DOCKER_TAG}" != *"-"* ]] ; then + if [[ -n "${CI_TAG}" ]] && [[ "${DOCKER_TAG}" != *"-"* ]] ; then ## Push Image to the Docker hub - docker login -u ${DOCKER_HUB_USER} -p ${DOCKER_HUB_PASS} docker push "ystia/yorc:${DOCKER_TAG:-latest}" else ## Push Image on Artifact Docker Registry @@ -91,11 +91,9 @@ if [[ "${TRAVIS}" == "true" ]]; then exit 0 fi docker tag "ystia/yorc:${DOCKER_TAG:-latest}" "${artifactory_docker_registry}/${artifactory_docker_repo}:${DOCKER_TAG:-latest}" - curl -fL https://getcli.jfrog.io | sh - build_name="yorc-travis-ci" - ./jfrog rt c --interactive=false --user=travis --apikey="${ARTIFACTORY_API_KEY}" --url=https://ystia.jfrog.io/ystia ystia - ./jfrog rt docker-push --build-name="${build_name}" --build-number="${TRAVIS_BUILD_NUMBER}" "${artifactory_docker_registry}/${artifactory_docker_repo}:${DOCKER_TAG:-latest}" yorc-docker-dev-local - ./jfrog rt bag "${build_name}" "${TRAVIS_BUILD_NUMBER}" "${script_dir}" - ./jfrog rt bp "${build_name}" "${TRAVIS_BUILD_NUMBER}" + jfrog rt docker-push "${artifactory_docker_registry}/${artifactory_docker_repo}:${DOCKER_TAG:-latest}" yorc-docker-dev-local + jfrog rt bce + jfrog rt bag + jfrog rt bp fi fi diff --git a/prov/scheduling/scheduler/consul_test.go b/prov/scheduling/scheduler/consul_test.go index 3c77cd919..81069d889 100644 --- a/prov/scheduling/scheduler/consul_test.go +++ b/prov/scheduling/scheduler/consul_test.go @@ -71,5 +71,8 @@ func TestRunConsulSchedulingPackageTests(t *testing.T) { t.Run("testUnregisterAction", func(t *testing.T) { testUnregisterAction(t, client) }) + t.Run("testUpdateActionData", func(t *testing.T) { + testUpdateActionData(t, client) + }) }) } diff --git a/prov/scheduling/scheduler/scheduler_test.go b/prov/scheduling/scheduler/scheduler_test.go index f7629db87..d52fce253 100644 --- a/prov/scheduling/scheduler/scheduler_test.go +++ b/prov/scheduling/scheduler/scheduler_test.go @@ -24,6 +24,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/stretchr/testify/require" + "gotest.tools/v3/assert" "github.com/ystia/yorc/v4/events" "github.com/ystia/yorc/v4/helper/consulutil" @@ -368,3 +369,30 @@ func testUnregisterAction(t *testing.T, client *api.Client) { require.NotNil(t, kvp, "kvp is nil") require.Equal(t, "true", string(kvp.Value), "unregisterFlag is not set to true") } + +func testUpdateActionData(t *testing.T, client *api.Client) { + t.Parallel() + deploymentID := "dep-" + t.Name() + ti := 1 * time.Second + actionType := "test-action" + action := &prov.Action{ActionType: actionType, Data: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}} + id, err := scheduling.RegisterAction(client, deploymentID, ti, action) + assert.NilError(t, err, "Failed to register action") + + err = scheduling.UpdateActionData(client, id, "key2", "newVal") + assert.NilError(t, err, "Failed to update action data") + + testSched := scheduler{cc: client} + newAction, err := testSched.buildScheduledAction(id) + assert.NilError(t, err, "Failed to build action") + + val := newAction.Data["key2"] + assert.Equal(t, val, "newVal", "Unexpected value for action key updated") + + // Check the update of an unregistered action, should fail + err = testSched.unregisterAction(id) + assert.NilError(t, err, "Failed to unregister action") + + err = scheduling.UpdateActionData(client, id, "key3", "newVal") + assert.ErrorContains(t, err, "unregistered") +} diff --git a/prov/scheduling/scheduling.go b/prov/scheduling/scheduling.go index 770fbc37c..a5a3a26d5 100644 --- a/prov/scheduling/scheduling.go +++ b/prov/scheduling/scheduling.go @@ -22,7 +22,7 @@ import ( "github.com/hashicorp/consul/api" "github.com/pkg/errors" - "github.com/satori/go.uuid" + uuid "github.com/satori/go.uuid" "github.com/ystia/yorc/v4/helper/consulutil" "github.com/ystia/yorc/v4/log" @@ -104,7 +104,16 @@ func UnregisterAction(client *api.Client, id string) error { // UpdateActionData updates the value of a given data within an action func UpdateActionData(client *api.Client, id, key, value string) error { - //TODO check if action exists - scaKeyPath := path.Join(consulutil.SchedulingKVPrefix, "actions", id, "data", key) + // check if action still exists + actionIdPrefix := path.Join(consulutil.SchedulingKVPrefix, "actions", id) + kvp, _, err := client.KV().Get(path.Join(actionIdPrefix, "deploymentID"), nil) + if err != nil { + return err + } + if kvp == nil { + return errors.Errorf("Action with ID %s is unregistered", id) + } + + scaKeyPath := path.Join(actionIdPrefix, "data", key) return errors.Wrapf(consulutil.StoreConsulKeyAsString(scaKeyPath, value), "Failed to update data %q for action %q", key, id) } diff --git a/prov/terraform/openstack/osinstance.go b/prov/terraform/openstack/osinstance.go index e89e10139..592c01fe0 100644 --- a/prov/terraform/openstack/osinstance.go +++ b/prov/terraform/openstack/osinstance.go @@ -172,11 +172,14 @@ func generateComputeInstance(ctx context.Context, opts osInstanceOptions) (Compu if toscaVal != nil && toscaVal.RawString() != "" { err = json.Unmarshal([]byte(toscaVal.RawString()), &instance.Metadata) if err != nil { - err = errors.Wrapf(err, "Expected a map of strings for the metadata value of node %s instance %s, got: %s", + return instance, errors.Wrapf(err, "Expected a map of strings for the metadata value of node %s instance %s, got: %s", opts.nodeName, opts.instanceName, toscaVal.RawString()) } } + instance.UserData, err = deployments.GetStringNodeProperty(ctx, opts.deploymentID, + opts.nodeName, "user_data", false) + return instance, err } diff --git a/prov/terraform/openstack/osinstance_test.go b/prov/terraform/openstack/osinstance_test.go index ec2e668a1..276ce47c8 100644 --- a/prov/terraform/openstack/osinstance_test.go +++ b/prov/terraform/openstack/osinstance_test.go @@ -97,7 +97,7 @@ func testSimpleOSInstance(t *testing.T) { require.Len(t, compute.Metadata, 2) require.Equal(t, "firstValue", compute.Metadata["firstKey"]) require.Equal(t, "secondValue", compute.Metadata["secondKey"]) - + require.Contains(t, compute.UserData, "cloud-config") require.Len(t, compute.Provisioners, 0) require.Contains(t, infrastructure.Resource, "null_resource") require.Len(t, infrastructure.Resource["null_resource"], 1) diff --git a/prov/terraform/openstack/resources.go b/prov/terraform/openstack/resources.go index 09469c465..4e6a6371d 100644 --- a/prov/terraform/openstack/resources.go +++ b/prov/terraform/openstack/resources.go @@ -46,6 +46,7 @@ type ComputeInstance struct { KeyPair string `json:"key_pair,omitempty"` SchedulerHints SchedulerHints `json:"scheduler_hints,omitempty"` Metadata map[string]string `json:"metadata,omitempty"` + UserData string `json:"user_data,omitempty"` commons.Resource } diff --git a/prov/terraform/openstack/testdata/simpleOSInstance.yaml b/prov/terraform/openstack/testdata/simpleOSInstance.yaml index 115b361b5..b60a5cf96 100644 --- a/prov/terraform/openstack/testdata/simpleOSInstance.yaml +++ b/prov/terraform/openstack/testdata/simpleOSInstance.yaml @@ -26,6 +26,7 @@ topology_template: key_pair: yorc security_groups: openbar,default metadata: {get_input: vm_metadata} + user_data: "#cloud-config\nwrite_files:\n- content: |\n test\n owner: root:root\n path: /etc/test.txt\n permissions: '0644'" capabilities: endpoint: properties: diff --git a/sonar-project.properties b/sonar-project.properties index 65b9b8b60..32826e779 100644 --- a/sonar-project.properties +++ b/sonar-project.properties @@ -1,3 +1,4 @@ +sonar.organization=ystia sonar.projectKey=ystia_yorc sonar.projectName=Ystia Orchestrator diff --git a/storage/internal/elastic/config.go b/storage/internal/elastic/config.go index a7c756cf1..f1bae6540 100755 --- a/storage/internal/elastic/config.go +++ b/storage/internal/elastic/config.go @@ -15,12 +15,13 @@ package elastic import ( + "reflect" + "time" + "github.com/pkg/errors" "github.com/spf13/cast" "github.com/ystia/yorc/v4/config" "github.com/ystia/yorc/v4/log" - "reflect" - "time" ) var elasticStoreConfType = reflect.TypeOf(elasticStoreConf{}) @@ -54,6 +55,10 @@ type elasticStoreConf struct { traceRequests bool `json:"trace_requests" default:"false"` // Set to true if you want to trace events & logs when sent (for debug only) traceEvents bool `json:"trace_events" default:"false"` + // Inital shards at index creation + InitialShards int `json:"initial_shards" default:"-1"` + // Initial replicas at index creation + InitialReplicas int `json:"initial_replicas" default:"-1"` } // Get the tag for this field (for internal usage only: fatal if not found !). @@ -108,31 +113,86 @@ func getElasticStoreConfig(yorcConfig config.Configuration, storeConfig config.S } // Define store optional / default configuration t, e = getElasticStorageConfigPropertyTag("caCertPath", "json") + if e != nil { + return + } + if storeProperties.IsSet(t) { cfg.caCertPath = storeProperties.GetString(t) } t, e = getElasticStorageConfigPropertyTag("certPath", "json") + if e != nil { + return + } + if storeProperties.IsSet(t) { cfg.certPath = storeProperties.GetString(t) } t, e = getElasticStorageConfigPropertyTag("keyPath", "json") + if e != nil { + return + } + if storeProperties.IsSet(t) { cfg.keyPath = storeProperties.GetString(t) } cfg.esForceRefresh, e = getBoolFromSettingsOrDefaults("esForceRefresh", storeProperties) + if e != nil { + return + } + t, e = getElasticStorageConfigPropertyTag("indicePrefix", "json") + if e != nil { + return + } + if storeProperties.IsSet(t) { cfg.indicePrefix = storeProperties.GetString(t) } else { cfg.indicePrefix, e = getElasticStorageConfigPropertyTag("indicePrefix", "default") + if e != nil { + return + } } cfg.esQueryPeriod, e = getDurationFromSettingsOrDefaults("esQueryPeriod", storeProperties) + if e != nil { + return + } + cfg.esRefreshWaitTimeout, e = getDurationFromSettingsOrDefaults("esRefreshWaitTimeout", storeProperties) + if e != nil { + return + } + cfg.maxBulkSize, e = getIntFromSettingsOrDefaults("maxBulkSize", storeProperties) + if e != nil { + return + } cfg.maxBulkCount, e = getIntFromSettingsOrDefaults("maxBulkCount", storeProperties) + if e != nil { + return + } + cfg.traceRequests, e = getBoolFromSettingsOrDefaults("traceRequests", storeProperties) + if e != nil { + return + } + cfg.traceEvents, e = getBoolFromSettingsOrDefaults("traceEvents", storeProperties) - // If any error have been encountered, it will be returned + if e != nil { + return + } + + cfg.InitialShards, e = getIntFromSettingsOrDefaults("InitialShards", storeProperties) + if e != nil { + return + } + + cfg.InitialReplicas, e = getIntFromSettingsOrDefaults("InitialReplicas", storeProperties) + if e != nil { + return + } + return } diff --git a/storage/internal/elastic/es_utils.go b/storage/internal/elastic/es_utils.go index eccea77de..c60be83a2 100755 --- a/storage/internal/elastic/es_utils.go +++ b/storage/internal/elastic/es_utils.go @@ -20,16 +20,17 @@ import ( "crypto/tls" "crypto/x509" "encoding/json" - elasticsearch6 "github.com/elastic/go-elasticsearch/v6" - "github.com/elastic/go-elasticsearch/v6/esapi" - "github.com/pkg/errors" - "github.com/ystia/yorc/v4/log" - "github.com/ystia/yorc/v4/storage/store" "io" "io/ioutil" "net/http" "strings" "time" + + elasticsearch6 "github.com/elastic/go-elasticsearch/v6" + "github.com/elastic/go-elasticsearch/v6/esapi" + "github.com/pkg/errors" + "github.com/ystia/yorc/v4/log" + "github.com/ystia/yorc/v4/storage/store" ) var pfalse = false @@ -37,8 +38,7 @@ var pfalse = false func prepareEsClient(elasticStoreConfig elasticStoreConf) (*elasticsearch6.Client, error) { log.Printf("Elastic storage will run using this configuration: %+v", elasticStoreConfig) - var esConfig elasticsearch6.Config - esConfig = elasticsearch6.Config{Addresses: elasticStoreConfig.esUrls} + esConfig := elasticsearch6.Config{Addresses: elasticStoreConfig.esUrls} if len(elasticStoreConfig.caCertPath) > 0 { log.Printf("Reading CACert file from %s", elasticStoreConfig.caCertPath) @@ -71,6 +71,9 @@ func prepareEsClient(elasticStoreConfig elasticStoreConf) (*elasticsearch6.Clien // In debug mode or when traceRequests option is activated, we add a custom logger that print requests & responses log.Printf("\t- Tracing ES requests & response can be expensive and verbose !") esConfig.Logger = &debugLogger{} + } else { + // otherwise log only failure are logger + esConfig.Logger = &defaultLogger{} } log.Printf("\t- Index prefix will be %s", elasticStoreConfig.indicePrefix+elasticStoreConfig.clusterID+"_") @@ -124,7 +127,7 @@ func initStorageIndex(c *elasticsearch6.Client, elasticStoreConfig elasticStoreC } else if res.StatusCode == 404 { log.Printf("Indice %s was not found, let's create it !", indexName) - requestBodyData := buildInitStorageIndexQuery() + requestBodyData := buildInitStorageIndexQuery(elasticStoreConfig) // indice doest not exist, let's create it req := esapi.IndicesCreateRequest{ @@ -158,7 +161,7 @@ func refreshIndex(c *elasticsearch6.Client, indexName string) { } // Query ES for events or logs specifying the expected results 'size' and the sort 'order'. -func doQueryEs(c *elasticsearch6.Client, conf elasticStoreConf, +func doQueryEs(ctx context.Context, c *elasticsearch6.Client, conf elasticStoreConf, index string, query string, waitIndex uint64, @@ -170,7 +173,7 @@ func doQueryEs(c *elasticsearch6.Client, conf elasticStoreConf, lastIndex = waitIndex res, e := c.Search( - c.Search.WithContext(context.Background()), + c.Search.WithContext(ctx), c.Search.WithIndex(index), c.Search.WithSize(size), c.Search.WithBody(strings.NewReader(query)), @@ -197,6 +200,8 @@ func doQueryEs(c *elasticsearch6.Client, conf elasticStoreConf, return } + logShardsInfos(r) + hits = int(r["hits"].(map[string]interface{})["total"].(float64)) duration := int(r["took"].(float64)) log.Debugf("Search ES request on index %s took %dms, hits=%d, response code was %d (%s)", index, duration, hits, res.StatusCode, res.Status()) @@ -307,6 +312,20 @@ func closeResponseBody(requestDescription string, res *esapi.Response) { } } +// Log shards stats +func logShardsInfos(r map[string]interface{}) { + si := r["_shards"].(map[string]interface{}) + + duration := int(r["took"].(float64)) + + tt := int(si["total"].(float64)) + ts := int(si["successful"].(float64)) + + if ts < tt { + log.Printf("[Warn] ES Uncomplete response: %d/%d shards (%dms)", ts, tt, duration) + } +} + type debugLogger struct{} // RequestBodyEnabled makes the client pass request body to logger @@ -354,3 +373,79 @@ func (l *debugLogger) LogRoundTrip( return nil } + +type defaultLogger struct{} + +// RequestBodyEnabled makes the client pass request body to logger +func (l *defaultLogger) RequestBodyEnabled() bool { return true } + +// ResponseBodyEnabled makes the client pass response body to logger +func (l *defaultLogger) ResponseBodyEnabled() bool { return true } + +// LogRoundTrip will use log to debug ES request and response (when debug is activated) +func (l *defaultLogger) LogRoundTrip( + req *http.Request, + res *http.Response, + err error, + start time.Time, + dur time.Duration, +) error { + + var level string + var errType string + var errReason string + + switch { + case err != nil: + level = "Exception" + case res != nil && res.StatusCode > 0 && res.StatusCode < 300: + return nil + case res != nil && res.StatusCode > 299 && res.StatusCode < 500: + level = "Warn" + case res != nil && res.StatusCode > 499: + errType, errReason = extractEsError(res) + level = "Error" + default: + level = "Unknown" + } + + if errType == "" && errReason == "" { + log.Printf("ES Request [%s][%v][%s][%s][%d][%v]", + level, start, req.Method, req.URL.String(), res.StatusCode, dur) + } else { + log.Printf("ES Request [%s][%v][%s][%s][%d][%v][%s][%s]", + level, start, req.Method, req.URL.String(), res.StatusCode, dur, errType, errReason) + } + return nil +} + +func extractEsError(response *http.Response) (errType, errReason string) { + var rb map[string]interface{} + var rv interface{} + var ok bool + + errType = "N/A" + errReason = "N/A" + + if err := json.NewDecoder(response.Body).Decode(&rb); err != nil { + return + } + + if rv, ok = rb["error"]; !ok { + return + } + + if rb, ok = rv.(map[string]interface{}); !ok { + return + } + + if rv, ok = rb["type"]; ok { + errType, _ = rv.(string) + } + + if rv, ok = rb["reason"]; ok { + errReason, _ = rv.(string) + } + + return +} diff --git a/storage/internal/elastic/queries.go b/storage/internal/elastic/queries.go index c818a041c..7a23f84ec 100755 --- a/storage/internal/elastic/queries.go +++ b/storage/internal/elastic/queries.go @@ -14,67 +14,48 @@ package elastic -import "strconv" +import ( + "bytes" + "strconv" + "text/template" +) -// Return the query that is used to create indexes for event and log storage. -// We only index the needed fields to optimize ES indexing performance (no dynamic mapping). -func buildInitStorageIndexQuery() (query string) { - query = ` +// Index creation request +const initStorageTemplateText = ` { "settings": { - "refresh_interval": "1s" + {{ if ne .InitialReplicas -1}}"number_of_replicas": {{ .InitialReplicas}},{{end}} + {{ if ne .InitialShards -1 }}"number_of_shards": {{ .InitialShards}},{{end}} + "refresh_interval": "1s" }, "mappings": { "_doc": { "_all": {"enabled": false}, "dynamic": "false", "properties": { - "deploymentId": { - "type": "keyword", - "index": true - }, - "iid": { - "type": "long", - "index": true - }, - "iidStr": { - "type": "keyword", - "index": false - } + "deploymentId": { "type": "keyword", "index": true }, + "iid": { "type": "long", "index": true }, + "iidStr": { "type": "keyword","index": false } } } } }` - return -} -// This ES aggregation query is built using clusterId and eventually deploymentId. -func buildLastModifiedIndexQuery(deploymentID string) (query string) { - if len(deploymentID) == 0 { - query = ` -{ - "aggs" : { - "max_iid" : { - "filter" : { - "match_all": {} - }, - "aggs" : { - "last_index" : { "max" : { "field" : "iid" } } - } - } - } -}` - } else { - query = ` +// Get last Modified index +const lastModifiedIndexTemplateText = ` { "aggs" : { "max_iid" : { "filter" : { +{{if .}} "bool": { "must": [ - { "term": { "deploymentId": "` + deploymentID + `" } } + { "term": { "deploymentId": "{{ . }}" } } ] } +{{else}} + "match_all": {} +{{end}} }, "aggs" : { "last_index" : { "max" : { "field" : "iid" } } @@ -82,57 +63,68 @@ func buildLastModifiedIndexQuery(deploymentID string) (query string) { } } }` - } - return + +// Range Query +const rangeQueryTemplateText = `{ "range":{ "iid":{ "gt": "{{ conv .WaitIndex }}"{{if gt .MaxIndex 0}},"lte": "{{ conv .MaxIndex }}"{{end}}}}}` + +const listQueryTemplateText = ` +{ + "query":{{if .DeploymentID}}{ + "bool":{ + "must": [ + { + "term":{ + "deploymentId": "{{ .DeploymentID }}" + } + }, + {{template "rangeQuery" .}} + ] + } + }{{else}}{{template "rangeQuery" .}}{{end}} } +` -func getRangeQuery(waitIndex uint64, maxIndex uint64) (rangeQuery string) { - if maxIndex > 0 { - rangeQuery = ` - { - "range":{ - "iid":{ - "gt": "` + strconv.FormatUint(waitIndex, 10) + `", - "lte": "` + strconv.FormatUint(maxIndex, 10) + `" - } - } - }` - } else { - rangeQuery = ` - { - "range":{ - "iid":{ - "gt": "` + strconv.FormatUint(waitIndex, 10) + `" - } - } - }` - } - return +var templates *template.Template + +func init() { + funcMap := template.FuncMap{"conv": func(value uint64) string { return strconv.FormatUint(value, 10) }} + + templates = template.Must(template.New("initStorage").Parse(initStorageTemplateText)) + templates = template.Must(templates.New("lastModifiedIndex").Parse(lastModifiedIndexTemplateText)) + + templates = template.Must(templates.New("rangeQuery").Funcs(funcMap).Parse(rangeQueryTemplateText)) + templates = template.Must(templates.New("listQuery").Parse(listQueryTemplateText)) +} + +// Return the query that is used to create indexes for event and log storage. +// We only index the needed fields to optimize ES indexing performance (no dynamic mapping). +func buildInitStorageIndexQuery(elasticStoreConfig elasticStoreConf) string { + var buffer bytes.Buffer + templates.ExecuteTemplate(&buffer, "initStorage", elasticStoreConfig) + return buffer.String() +} + +// This ES aggregation query is built using clusterId and eventually deploymentId. +func buildLastModifiedIndexQuery(deploymentID string) (query string) { + var buffer bytes.Buffer + templates.ExecuteTemplate(&buffer, "lastModifiedIndex", deploymentID) + return buffer.String() } // This ES range query is built using 'waitIndex' and eventually 'maxIndex' and filtered using 'clusterId' and eventually 'deploymentId'. func getListQuery(deploymentID string, waitIndex uint64, maxIndex uint64) (query string) { - rangeQuery := getRangeQuery(waitIndex, maxIndex) - if len(deploymentID) == 0 { - query = ` -{ - "query":` + rangeQuery + ` -}` - } else { - query = ` -{ - "query":{ - "bool":{ - "must":[ - { - "term":{ - "deploymentId":"` + deploymentID + `" - } - },` + rangeQuery + ` - ] - } - } -}` + var buffer bytes.Buffer + + data := struct { + WaitIndex uint64 + MaxIndex uint64 + DeploymentID string + }{ + WaitIndex: waitIndex, + MaxIndex: maxIndex, + DeploymentID: deploymentID, } - return + + templates.ExecuteTemplate(&buffer, "listQuery", data) + return buffer.String() } diff --git a/storage/internal/elastic/store.go b/storage/internal/elastic/store.go index 08dc0365e..20b5f5a80 100755 --- a/storage/internal/elastic/store.go +++ b/storage/internal/elastic/store.go @@ -255,7 +255,7 @@ func (s *elasticStore) GetLastModifyIndex(k string) (lastIndex uint64, e error) func (s *elasticStore) verifyLastIndex(indexName string, deploymentID string, estimatedLastIndex uint64) uint64 { query := getListQuery(deploymentID, estimatedLastIndex, 0) // size = 1 no need for the documents - hits, _, lastIndex, err := doQueryEs(s.esClient, s.cfg, indexName, query, estimatedLastIndex, 1, "desc") + hits, _, lastIndex, err := doQueryEs(context.Background(), s.esClient, s.cfg, indexName, query, estimatedLastIndex, 1, "desc") if err != nil { log.Printf("An error occurred while verifying lastIndex, returning the initial value %d, error was : %+v", estimatedLastIndex, err) @@ -293,7 +293,7 @@ func (s *elasticStore) List(ctx context.Context, k string, waitIndex uint64, tim var err error for { // first just query to know if they is something to fetch, we just want the max iid (so order desc, size 1) - hits, values, lastIndex, err = doQueryEs(s.esClient, s.cfg, indexName, query, waitIndex, 1, "desc") + hits, values, lastIndex, err = doQueryEs(ctx, s.esClient, s.cfg, indexName, query, waitIndex, 1, "desc") if err != nil { return values, waitIndex, errors.Wrapf(err, "Failed to request ES logs or events, error was: %+v", err) } @@ -301,8 +301,14 @@ func (s *elasticStore) List(ctx context.Context, k string, waitIndex uint64, tim if hits > 0 || now.After(end) { break } + log.Debugf("hits is %d and timeout not reached, sleeping %v ...", hits, s.cfg.esQueryPeriod) - time.Sleep(s.cfg.esQueryPeriod) + select { + case <-time.After(s.cfg.esQueryPeriod): + continue + case <-ctx.Done(): + return values, lastIndex, nil + } } if hits > 0 { // we do have something to retrieve, we will just wait esRefreshWaitTimeout to let any document that has just been stored to be indexed @@ -314,7 +320,7 @@ func (s *elasticStore) List(ctx context.Context, k string, waitIndex uint64, tim } time.Sleep(s.cfg.esRefreshWaitTimeout) oldHits := hits - hits, values, lastIndex, err = doQueryEs(s.esClient, s.cfg, indexName, query, waitIndex, 10000, "asc") + hits, values, lastIndex, err = doQueryEs(ctx, s.esClient, s.cfg, indexName, query, waitIndex, 10000, "asc") if err != nil { return values, waitIndex, errors.Wrapf(err, "Failed to request ES logs or events (after waiting for refresh)") } diff --git a/tasks/workflow/worker.go b/tasks/workflow/worker.go index 078d0a00d..542c42a63 100644 --- a/tasks/workflow/worker.go +++ b/tasks/workflow/worker.go @@ -447,6 +447,7 @@ func (w *worker) runAction(ctx context.Context, t *taskExecution) error { } } wasCancelled := new(bool) + taskFailure := new(bool) if action.AsyncOperation.TaskID != "" { ctx = operations.SetOperationLogFields(ctx, action.AsyncOperation.Operation) ctx = events.AddLogOptionalFields(ctx, events.LogOptionalFields{ @@ -467,6 +468,7 @@ func (w *worker) runAction(ctx context.Context, t *taskExecution) error { tasks.UpdateTaskStepWithStatus(action.AsyncOperation.TaskID, action.AsyncOperation.StepName, tasks.TaskStepStatusCANCELED) }) tasks.MonitorTaskFailure(ctx, action.AsyncOperation.TaskID, func() { + *taskFailure = true // Unregister this action asap to prevent new schedulings scheduling.UnregisterAction(w.consulClient, action.ID) @@ -495,6 +497,9 @@ func (w *worker) runAction(ctx context.Context, t *taskExecution) error { if deregister || *wasCancelled { scheduling.UnregisterAction(w.consulClient, action.ID) w.endAction(ctx, t, action, *wasCancelled, err) + } else if *taskFailure { + err = errors.Errorf("Stopped on task failure") + w.endAction(ctx, t, action, *wasCancelled, err) } if err != nil { return err diff --git a/versions.yaml b/versions.yaml index 81da7cc9a..782b341f1 100644 --- a/versions.yaml +++ b/versions.yaml @@ -1,6 +1,6 @@ -yorc_version: 4.1.0-SNAPSHOT +yorc_version: 4.2.0-SNAPSHOT # Alien4Cloud version used by the bootstrap as the default version to download/install -alien4cloud_version: 3.0.0-M8 +alien4cloud_version: 3.2.0 consul_version: 1.2.3 terraform_version: 0.11.8 ansible_version: 2.10.0