From dd91f6e9160fa1825db1242b2559aaeef4f637c7 Mon Sep 17 00:00:00 2001 From: Rakshitha Kamath Date: Tue, 5 Dec 2023 11:12:14 +0530 Subject: [PATCH] Add the storage scale server script --- build_scripts/common/basic-server-client.sh | 7 +- build_scripts/common/basic-storage-scale.sh | 311 ++++++++++++++++++++ build_scripts/storage-scale/client.sh | 108 +++++++ jobs/macros.yml | 11 + jobs/scripts/common.sh | 5 + jobs/storage-scale.yml | 38 +++ 6 files changed, 479 insertions(+), 1 deletion(-) create mode 100644 build_scripts/common/basic-storage-scale.sh create mode 100644 build_scripts/storage-scale/client.sh create mode 100644 jobs/storage-scale.yml diff --git a/build_scripts/common/basic-server-client.sh b/build_scripts/common/basic-server-client.sh index c77e9063..fbb0c3b1 100644 --- a/build_scripts/common/basic-server-client.sh +++ b/build_scripts/common/basic-server-client.sh @@ -14,7 +14,12 @@ function server_run() scp ${SSH_OPTIONS} ${2} root@${1}:./$(basename ${2}) - ssh -t ${SSH_OPTIONS} root@${1} "GERRIT_HOST='${GERRIT_HOST}' GERRIT_PROJECT='${GERRIT_PROJECT}' GERRIT_REFSPEC='${GERRIT_REFSPEC}' CENTOS_VERSION='${CENTOS_VERSION}' CENTOS_ARCH='${CENTOS_ARCH}' ${VOLUME_TYPE}_VOLUME='${EXPORT}' YUM_REPO='${YUM_REPO}' ${INCLUDE_TEMPLATE_URL} ${INCLUDE_ACL_PARAM} bash ./$(basename ${2})" + if [ "$JOB_NAME" == "storage-scale" ]; then + ssh -t ${SSH_OPTIONS} root@${1} "AWS_ACCESS_KEY='${AWS_ACCESS_KEY}' AWS_SECRET_KEY='${AWS_SECRET_KEY}' GERRIT_HOST='${GERRIT_HOST}' GERRIT_PROJECT='${GERRIT_PROJECT}' GERRIT_REFSPEC='${GERRIT_REFSPEC}' CENTOS_VERSION='${CENTOS_VERSION}' CENTOS_ARCH='${CENTOS_ARCH}' ${VOLUME_TYPE}_VOLUME='${EXPORT}' YUM_REPO='${YUM_REPO}' ${INCLUDE_TEMPLATE_URL} ${INCLUDE_ACL_PARAM} bash ./$(basename ${2})" + else + ssh -t ${SSH_OPTIONS} root@${1} "GERRIT_HOST='${GERRIT_HOST}' GERRIT_PROJECT='${GERRIT_PROJECT}' GERRIT_REFSPEC='${GERRIT_REFSPEC}' CENTOS_VERSION='${CENTOS_VERSION}' CENTOS_ARCH='${CENTOS_ARCH}' ${VOLUME_TYPE}_VOLUME='${EXPORT}' YUM_REPO='${YUM_REPO}' ${INCLUDE_TEMPLATE_URL} ${INCLUDE_ACL_PARAM} bash ./$(basename ${2})" + fi + #RETURN_CODE=$? #return $RETURN_CODE diff --git a/build_scripts/common/basic-storage-scale.sh b/build_scripts/common/basic-storage-scale.sh new file mode 100644 index 00000000..9e32e2de --- /dev/null +++ b/build_scripts/common/basic-storage-scale.sh @@ -0,0 +1,311 @@ +#!/bin/sh +# +# Setup a simple gluster environment and export a volume through NFS-Ganesha. +# +# This script uses the following environment variables:/ +# - GLUSTER_VOLUME: name of the gluster volume to create +# this name will also be used as name for the export +# +# The YUM_REPO and GERRIT_* variables are mutually exclusive. +# +# - YUM_REPO: URL to the yum repository (.repo file) for the NFS-Ganesha +# packages. When this option is used, libntirpc-latest is enabled +# as well. Leave empty in case patches from Gerrit need testing. +# +# - GERRIT_HOST: when triggered from a new patch submission, this is set to the +# git server that contains the repository to use. +# +# - GERRIT_PROJECT: project that triggered the build (like ffilz/nfs-ganesha). +# +# - GERRIT_REFSPEC: git tree-ish that can be fetched and checked-out for testing. + + +# abort if anything fails +set -e + +[ -n "${GLUSTER_VOLUME}" ] + +# be a little bit more verbose +set -x + +# enable repositories +yum -y install centos-release-gluster yum-utils centos-release-ceph epel-release unzip + + +#-----------------------------------------------------------------------------------------# +#This script has the following blocks +#1. Download and install Storage Scale +#2. Fetch the code changes, compile, rpmbuild and install the rpms +#3. Export the volume +#4. NOT SURE IF I SHOULD KEEP THIS OR NOT +#-----------------------------------------------------------------------------------------# + + +#-----------------------------------------------------------------------------------------# +#BLOCK 1: STARTS HERE +#-----------------------------------------------------------------------------------------# + +#Download storage_scale and install +WORKING_DIR="DOWNLOAD_STORAGE_SCALE" +mkdir -p $WORKING_DIR +cd $WORKING_DIR +echo $PWD +yum install -y unzip +curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +ls -ltr +unzip -qq awscliv2.zip +chmod +x ./aws/* +./aws/install +aws --version +aws configure set aws_access_key_id ${AWS_ACCESS_KEY} +aws configure set aws_secret_access_key ${AWS_SECRET_KEY} +aws s3api get-object --bucket nfsganesha-ci --key "version_to_use.txt" "version_to_use.txt" +VERSION_TO_USE=$(cat version_to_use.txt) +echo ${VERSION_TO_USE} +aws s3api get-object --bucket nfsganesha-ci --key "${VERSION_TO_USE}" "Storage_Scale_Developer-5.1.9.0-x86_64-Linux-install.zip" +unzip Storage_Scale_Developer-5.1.9.0-x86_64-Linux-install.zip + +ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N "" +cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys +chmod og-wx ~/.ssh/authorized_keys + +yum -y install kernel-devel cpp gcc gcc-c++ binutils numactl jre make elfutils elfutils-devel rpcbind sssd-tools openldap-clients bind-utils net-tools krb5-workstation python3 +python3 -m pip install --user ansible +yum install -y kernel-devel-4.18.0-499.el8.x86_64 kernel-headers-4.18.0-499.el8.x86_64 + +#Add CES IP to /etc/hosts +ip_address=$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1) + +for new_ip in $(echo $ip_address | awk -F '.' '{for(i=$4+1;i<=255;i++){print $1"."$2"."$3"."i}}'); do ping -c 2 $new_ip; if [ "$?" == "1" ]; then USABLE_IP=$new_ip; break; fi; done + +echo "$USABLE_IP cesip1" >> /etc/hosts + +INSTALLER_VERSION=$(echo ${VERSION_TO_USE/.zip/}) +chmod +x $INSTALLER_VERSION +./$INSTALLER_VERSION --silent + +mkdir -p /bricks + +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale setup -s 127.0.0.1 --storesecret; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale node add $(hostname) -n; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale node add $(hostname) -p; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale config protocols -e $USABLE_IP; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale node add -a $(hostname); +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale config gpfs -c $(hostname)_cluster; +dd if=/dev/zero of=/bricks/${GLUSTER_VOLUME} bs=1M count=8192; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale nsd add -p $(hostname) -u dataAndMetadata -fs fs1 -fg 1 /bricks/${GLUSTER_VOLUME}; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale config protocols -f fs1 -m /ibm/fs1; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale enable nfs; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale enable smb; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale callhome disable; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale config perfmon -r off; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale node list; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale install --precheck; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale install; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale deploy --precheck; +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale deploy; + +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale nsd list +/usr/lpp/mmfs/5.1.9.0/ansible-toolkit/spectrumscale filesystem list + +#-----------------------------------------------------------------------------------------# +#BLOCK 1: ENDS HERE +#-----------------------------------------------------------------------------------------# + +#-----------------------------------------------------------------------------------------# +#BLOCK 2: STARTS HERE +#-----------------------------------------------------------------------------------------# + +# make sure rpcbind is running +yum -y install rpcbind +systemctl start rpcbind + +# CentOS 7.4.1708 has an SELinux issue that prevents NFS-Ganesha from creating +# the /var/log/ganesha/ganesha.log file. Starting ganesha.nfsd fails due to +# this. +echo 'TODO: this is BAD, needs a fix in the selinux-policy' +setenforce 0 + +systemctl stop firewalld || true + +if [ -n "${YUM_REPO}" ] +then + yum-config-manager --add-repo=http://artifacts.ci.centos.org/nfs-ganesha/nightly/libntirpc/libntirpc-latest.repo + yum-config-manager --add-repo=${YUM_REPO} + + # install the latest version of gluster + yum -y install nfs-ganesha nfs-ganesha-gluster glusterfs-ganesha + + # start nfs-ganesha service + if ! systemctl start nfs-ganesha + then + echo "+++ systemctl status nfs-ganesha.service +++" + systemctl status nfs-ganesha.service + echo "+++ journalctl -xe +++" + journalctl -xe + exit 1 + fi +else + [ -n "${GERRIT_HOST}" ] + [ -n "${GERRIT_PROJECT}" ] + [ -n "${GERRIT_REFSPEC}" ] + + GIT_REPO=$(basename "${GERRIT_PROJECT}") + GIT_URL="https://${GERRIT_HOST}/${GERRIT_PROJECT}" + + BASE_PACKAGES="git bison flex cmake gcc-c++ libacl-devel krb5-devel dbus-devel rpm-build redhat-rpm-config gdb" + BUILDREQUIRES_EXTRA="libnsl2-devel libnfsidmap-devel libwbclient-devel userspace-rcu-devel libcephfs-devel" + if [ "${CENTOS_VERSION}" = "7" ]; then + yum -y install libgfapi-devel + yum -y install ${BASE_PACKAGES} libnfsidmap-devel libwbclient-devel libcap-devel libblkid-devel userspace-rcu-devel userspace-rcu python2-devel + elif [ "${CENTOS_VERSION}" = "8s" ]; then + yum install -y ${BASE_PACKAGES} libacl-devel libblkid-devel libcap-devel redhat-rpm-config rpm-build libgfapi-devel xfsprogs-devel + yum install --enablerepo=powertools -y ${BUILDREQUIRES_EXTRA} + yum -y install selinux-policy-devel sqlite + elif [ "${CENTOS_VERSION}" = "9s" ]; then + yum install -y ${BASE_PACKAGES} libacl-devel libblkid-devel libcap-devel redhat-rpm-config rpm-build libgfapi-devel xfsprogs-devel + yum install --enablerepo=crb -y ${BUILDREQUIRES_EXTRA} + yum -y install selinux-policy-devel sqlite + fi + + git init "${GIT_REPO}" + pushd "${GIT_REPO}" + + #Its observed that fetch is failing so this little hack is added! Will delete in future if it turns out useless! + git fetch --depth=1 "${GIT_URL}" "${GERRIT_REFSPEC}" > /dev/null + if [ $? = 0 ]; then + echo "Fetch succeeded" + else + sleep 2 + git fetch "${GIT_URL}" "${GERRIT_REFSPEC}" + fi + + git checkout -b "${GERRIT_REFSPEC}" FETCH_HEAD + + # update libntirpc + git submodule update --init || git submodule sync + + mkdir build + pushd build + + cmake -DCMAKE_BUILD_TYPE=Maintainer -DUSE_FSAL_GPFS=ON -DUSE_DBUS=ON -D_MSPAC_SUPPORT=OFF ../src + make dist + rpmbuild -ta --define "_srcrpmdir $PWD" --define "_rpmdir $PWD" *.tar.gz + rpm_arch=$(rpm -E '%{_arch}') + ganesha_version=$(rpm -q --qf '%{VERSION}-%{RELEASE}' -p *.src.rpm) + if [ -e ${rpm_arch}/libntirpc-devel*.rpm ]; then + ntirpc_version=$(rpm -q --qf '%{VERSION}-%{RELEASE}' -p ${rpm_arch}/libntirpc-devel*.rpm) + ntirpc_rpm=${rpm_arch}/libntirpc-${ntirpc_version}.${rpm_arch}.rpm + fi + yum -y install {x86_64,noarch}/*.rpm + + #Test block + ulimit -a + ulimit -c unlimited + ulimit -a + + # start nfs-ganesha service with an empty configuration + echo "NFSv4 { Graceless = true; }" > /etc/ganesha/ganesha.conf + if ! systemctl start nfs-ganesha + then + echo "+++ systemctl status nfs-ganesha.service +++" + systemctl status nfs-ganesha.service + echo "+++ journalctl -xe +++" + journalctl -xe + exit 1 + fi +fi + +#-----------------------------------------------------------------------------------------# +#BLOCK 2: ENDS HERE +#-----------------------------------------------------------------------------------------# + +#-----------------------------------------------------------------------------------------# +#BLOCK 3: STARTS HERE +#-----------------------------------------------------------------------------------------# + +#I have updated the EXPORT command provided here. + +systemctl stop firewalld || service iptables stop || true + +EXPORT_ID=$(date +"%s") + +EXPORT +{ +Delegations=none; +Export_id=$EXPORT_ID; +Filesystem_id=666.666; +MaxOffsetRead=18446744073709551615; +MaxOffsetWrite=18446744073709551615; +MaxRead=1048576; +MaxWrite=1048576; +Path="/bricks/${GLUSTER_VOLUME}"; +PrefRead=1048576; +PrefReaddir=1048576; +PrefWrite=1048576; +Pseudo="/bricks/${GLUSTER_VOLUME}"; +Tag="/bricks/${GLUSTER_VOLUME}"; +UseCookieVerifier=false; +FSAL +{ +Name=gpfs; +} +CLIENT +{ +Access_Type=rw; +Clients=*; +Squash=none; +} +} + +#-----------------------------------------------------------------------------------------# +#BLOCK 3: ENDS HERE +#-----------------------------------------------------------------------------------------# + +#-----------------------------------------------------------------------------------------# +#BLOCK 4: STARTS HERE +#-----------------------------------------------------------------------------------------# + +# Export the volume +mkdir -p /usr/libexec/ganesha +cd /usr/libexec/ganesha +yum -y install wget +wget https://raw.githubusercontent.com/gluster/glusterfs/release-3.10/extras/ganesha/scripts/create-export-ganesha.sh +wget https://raw.githubusercontent.com/gluster/glusterfs/release-3.10/extras/ganesha/scripts/dbus-send.sh +chmod 755 create-export-ganesha.sh dbus-send.sh + +/usr/libexec/ganesha/create-export-ganesha.sh /etc/ganesha on ${GLUSTER_VOLUME} +/usr/libexec/ganesha/dbus-send.sh /etc/ganesha on ${GLUSTER_VOLUME} + +# wait till server comes out of grace period +sleep 5 + +# basic check if the export is available, some debugging if not +if ! showmount -e | grep -q -w -e "${GLUSTER_VOLUME}" +then + echo "+++ /var/log/ganesha/ganesha.log +++" + cat /var/log/ganesha/ganesha.log + echo + echo "+++ /var/log/ganesha/ganesha-gfapi.log" + cat /var/log/ganesha/ganesha-gfapi.log + echo + echo "+++ /etc/ganesha/ganesha.conf +++" + grep --with-filename -e '' /etc/ganesha/ganesha.conf + echo + echo "+++ /etc/ganesha/exports/*.conf +++" + grep --with-filename -e '' /etc/ganesha/exports/*.conf + echo + echo "Export ${GLUSTER_VOLUME} is not available" + echo + echo "+++ /var/log/glusterfs/bricks/*.log" + cat /var/log/glusterfs/bricks/*.log + echo + echo "+++ /var/log/glusterfs/glusterd.log" + cat /var/log/glusterfs/glusterd.log + echo + exit 1 +fi + +#-----------------------------------------------------------------------------------------# +#BLOCK 4: ENDSS HERE +#-----------------------------------------------------------------------------------------# diff --git a/build_scripts/storage-scale/client.sh b/build_scripts/storage-scale/client.sh new file mode 100644 index 00000000..a549195a --- /dev/null +++ b/build_scripts/storage-scale/client.sh @@ -0,0 +1,108 @@ +#!/bin/sh +# +# Environment variables used: +# - SERVER: hostname or IP-address of the NFS-server +# - EXPORT: NFS-export to test (should start with "/") + +# enable some more output +set -x + +[ -n "${SERVER}" ] +[ -n "${EXPORT}" ] + +# install build and runtime dependencies +yum -y install nfs-utils time centos-release-gluster + +mkdir -p /mnt/ganesha + +if [ "$CENTOS_VERSION" == "7" ]; then + yum --enablerepo=centos-gluster*test -y install iozone +elif [ "$CENTOS_VERSION" == "8s" ]; then + curl -o /etc/yum.repos.d/iozone.repo https://copr.fedorainfracloud.org/coprs/aflyhorse/iozone/repo/centos-stream-8/aflyhorse-iozone-centos-stream-8.repo + yum install -y iozone +elif [ "$CENTOS_VERSION" == "9s" ]; then + curl -o /etc/yum.repos.d/iozone.repo https://copr.fedorainfracloud.org/coprs/aflyhorse/iozone/repo/centos-stream-9/aflyhorse-iozone-centos-stream-9.repo + yum install -y iozone +fi + +mount -t nfs -o vers=3 ${SERVER}:${EXPORT} /mnt/ganesha + +cd /mnt/ganesha +ls -ltr +exit 0 + +echo "Running Iozone Test On NFSv3 " +echo "+++++++++++++++++++++++++++++" + +#timeout -s SIGKILL 240s iozone -a > ../ioZoneLog.txt +#TIMED_OUT=$? +#Return code will be 124 if it ends the process by using SIGTERM for not getting any response. 137 when used SIGKILL to kill the process +#if [ $TIMED_OUT == 137 ]; then +# echo -e "The process timed out after 4 minute!\nLooks like the Server process to see if it has crashed!" +# exit 1 +#fi + +iozone -a > ../ioZoneLog.txt +grep "iozone test complete" ../ioZoneLog.txt; + +ret=$? + +if [ $ret -eq 0 ] +then + echo "IOZone Test Is Completed And Successful On v3"; +else + echo "IOZone Test Failed On NFSv3..."; + tail -3 ../ioZoneLog.txt; + exit $ret; +fi + +cd / && umount /mnt/ganesha + +mount -t nfs -o vers=4.0 ${SERVER}:${EXPORT} /mnt/ganesha + +cd /mnt/ganesha + +echo "Running Iozone Test On NFSv4.0 " +echo "++++++++++++++++++++++++++++++++" + +iozone -a > ../ioZoneLog.txt + +grep "iozone test complete" ../ioZoneLog.txt; + +ret=$? + +if [ $ret -eq 0 ] +then + echo "IOZone Test Is Completed And Successful On v4.0"; +else + echo "IOZone Test Failed On NFSv4.0..."; + tail -3 ../ioZoneLog.txt; + exit $ret; +fi + +cd / && umount /mnt/ganesha + +mount -t nfs -o vers=4.1 ${SERVER}:${EXPORT} /mnt/ganesha + +cd /mnt/ganesha + +echo "Running Iozone Test On NFSv4.1 " +echo "+++++++++++++++++++++++++++++++" + +iozone -a > ../ioZoneLog.txt + +grep "iozone test complete" ../ioZoneLog.txt; + +ret=$? + +if [ $ret -eq 0 ] +then + echo "IOZone Test Is Completed And Successful On v4.1"; +else + echo "IOZone Test Failed On NFSv4.1..."; + tail -3 ../ioZoneLog.txt; + exit $ret; +fi + +cd / && umount /mnt/ganesha + diff --git a/jobs/macros.yml b/jobs/macros.yml index d8f187e6..f618820b 100644 --- a/jobs/macros.yml +++ b/jobs/macros.yml @@ -129,6 +129,17 @@ credential-id: GERRITHUB_PRIVATE_KEY variable: GERRITHUB_KEY +- wrapper: + name: aws_credentials + wrappers: + - credentials-binding: + - text: + credential-id: AWS_ACCESS_KEY + variable: ACCESS_KEY + - text: + credential-id: AWS_SECRET_KEY + variable: SECRET_KEY + #Enable this to over-ride the configuration under Manage Jenkins > Configure System > Global Build Time Out #This one is meant for pynfs jobs, setting the value to 4 hours as a part of my testing! - wrapper: diff --git a/jobs/scripts/common.sh b/jobs/scripts/common.sh index 93e10b57..b813c9b8 100644 --- a/jobs/scripts/common.sh +++ b/jobs/scripts/common.sh @@ -6,6 +6,11 @@ export GERRIT_REFSPEC=${GERRIT_REFSPEC} export LAST_TRIGGERED_JOB_NAME=$JOB_NAME export BUILD_NUMBER=${BUILD_NUMBER} +if [ "$JOB_NAME" == "storage-scale" ]; then + export AWS_ACCESS_KEY=${ACCESS_KEY} + export AWS_SECRET_KEY=${SECRET_KEY} +fi + bash $WORKSPACE/ci-tests/build_scripts/common/basic-server-client.sh RET=$? diff --git a/jobs/storage-scale.yml b/jobs/storage-scale.yml new file mode 100644 index 00000000..4631a31e --- /dev/null +++ b/jobs/storage-scale.yml @@ -0,0 +1,38 @@ +- job: + name: storage-scale + node: cico-workspace + description: 'Run Storage Scale CI test' + project-type: freestyle + concurrent: false + allow-manual-triggers: true + + scm: + - ci-tests + + properties: + - discarder + + parameters: + - string: + default: "$WORKSPACE/ci-tests/build_scripts/common/basic-storage-scale.sh" + description: Test script to execute on the reserved machine acting as a server. + name: SERVER_TEST_SCRIPT + - string: + default: "$WORKSPACE/ci-tests/build_scripts/storage_scale/client.sh" + description: Test script to execute on the reserved machine acting as a client. + name: CLIENT_TEST_SCRIPT + - nfs_variables: + export_var: 'storage_scale' + - gerrit_variables + - centos_variables + + builders: + - get-node + - shell: !include-raw: scripts/common.sh + + wrappers: + - cleanup-ws + - gerrithub_key + + publishers: + - post_build_task_return-node