diff --git a/.gitignore b/.gitignore index 63472c8..60e2a1a 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ */settings/local.py public/static/ public/media/ +secrets.sls \ No newline at end of file diff --git a/README.rst b/README.rst index ea7ff64..a7f8bfb 100644 --- a/README.rst +++ b/README.rst @@ -57,98 +57,10 @@ You should now be able to run the development server:: python manage.py runserver -Setup repository ------------------------- - -Before your project can be deployed to a server, the code needs to be -accessible in a git repository. - -1. Add your project code to a git repo, hosted somewhere your server can clone it from. - -2. Edit ``fabfile.py`` near the top and insert your repo's URL. E.g., change this:: - - env.repo = u'' # FIXME: Add repo URL - - to this:: - - env.repo = u'git@github.com:account/reponame.git' - - - -Server Provisioning ------------------------- - -The first step in creating a new server is to create users on the remote server. You -will need root user access with passwordless sudo. How you specify this user will vary -based on the hosting provider. EC2 and Vagrant use a private key file. Rackspace and -Linode use a user/password combination. - -1. For each developer, put a file in the ``conf/users`` directory - containing their public ssh key, and named exactly the same as the - user to create on the server, which should be the same as the userid - on the local development system. (E.g. for user "dickens", the filename - must be "dickens", not "dickens.pub" or "user_dickens".) - -2. Run this command to create users on the server:: - - fab -H -u create_users - - This will create a project user and users for all the developers. - -3. Lock down SSH connections: disable password login and move - the default port from 22 to ``env.ssh_port``:: - - fab -H configure_ssh - -4. Add the IP to the appropriate environment - function and provision it for its role. You can provision a new server with the - ``setup_server`` fab command. It takes a list of roles for this server - ('app', 'db', 'lb') or you can say 'all':: - - fab staging setup_server:all - - -Vagrant Testing ------------------------- - -You can test the provisioning/deployment using `Vagrant `_. -Using the Vagrantfile you can start up the VM. This requires the ``lucid32`` box:: - - vagrant up - -With the VM up and running, you can create the necessary users. -Put the developers' keys in ``conf/users`` as before, then -use these commands to create the users. The location of the key file -(/usr/lib/ruby/gems/1.8/gems/vagrant-1.0.2/keys/vagrant) -may vary on your system. Running ``locate keys/vagrant`` might -help find it:: - - fab -H 33.33.33.10 -u vagrant -i /usr/lib/ruby/gems/1.8/gems/vagrant-1.0.2/keys/vagrant create_users - fab vagrant setup_server:all - fab vagrant deploy - -It is not necessary to reconfigure the SSH settings on the vagrant box. - -The vagrant box forwards -port 80 in the VM to port 8080 on the host box. You can view the site -by visiting localhost:8080 in your browser. - -You may also want to add:: - - 33.33.33.10 dev.example.com - -to your hosts (/etc/hosts) file. - -You can stop the VM with ``vagrant halt`` and -destroy the box completely to retest the provisioning with ``vagrant destroy``. - -For more information please review the Vagrant documentation. - - Deployment ------------------------ -For future deployments, you can deploy changes to a particular environment with +You can deploy changes to a particular environment with the ``deploy`` command. This takes an optional branch name to deploy. If the branch is not given, it will use the default branch defined for this environment in ``env.branch``:: diff --git a/Vagrantfile b/Vagrantfile index 62217b7..8cc589b 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -1,6 +1,7 @@ -Vagrant::Config.run do |config| - config.vm.box = "lucid32" - config.vm.box_url = "http://files.vagrantup.com/lucid32.box" - config.vm.forward_port 80, 8080 - config.vm.network :hostonly, "33.33.33.10" +Vagrant.configure("2") do |config| + # Every Vagrant virtual environment requires a box to build off of. + config.vm.box = "precise32" + config.vm.box_url = "http://files.vagrantup.com/precise32.box" + config.vm.network :forwarded_port, guest: 80, host: 8089 + config.vm.network :private_network, ip: "33.33.33.10" end diff --git a/conf/bootstrap-salt.sh b/conf/bootstrap-salt.sh new file mode 100644 index 0000000..d34a242 --- /dev/null +++ b/conf/bootstrap-salt.sh @@ -0,0 +1,2762 @@ +#!/bin/sh - +#=============================================================================== +# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en +#=============================================================================== +# +# FILE: bootstrap-salt.sh +# +# DESCRIPTION: Bootstrap salt installation for various systems/distributions +# +# BUGS: https://github.com/saltstack/salty-vagrant/issues +# AUTHOR: Pedro Algarvio (s0undt3ch), pedro@algarvio.me +# Alec Koumjian (akoumjian), akoumjian@gmail.com +# Geoff Garside (geoffgarside), geoff@geoffgarside.co.uk +# LICENSE: Apache 2.0 +# ORGANIZATION: Salt Stack (saltstack.org) +# CREATED: 10/15/2012 09:49:37 PM WEST +#=============================================================================== +set -o nounset # Treat unset variables as an error +ScriptVersion="1.5.4" +ScriptName="bootstrap-salt.sh" + +#=============================================================================== +# Environment variables taken into account. +#------------------------------------------------------------------------------- +# * BS_COLORS: If 0 disables colour support +# * BS_PIP_ALLOWED: If 1 enable pip based installations(if needed) +# * BS_ECHO_DEBUG: If 1 enable debug echo which can also be set by -D +# * BS_SALT_ETC_DIR: Defaults to /etc/salt +# * BS_FORCE_OVERWRITE: Force overriding copied files(config, init.d, etc) +#=============================================================================== + + +#=============================================================================== +# LET THE BLACK MAGIC BEGIN!!!! +#=============================================================================== + + +# Bootstrap script truth values +BS_TRUE=1 +BS_FALSE=0 + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __detect_color_support +# DESCRIPTION: Try to detect color support. +#------------------------------------------------------------------------------- +COLORS=${BS_COLORS:-$(tput colors 2>/dev/null || echo 0)} +__detect_color_support() { + if [ $? -eq 0 ] && [ "$COLORS" -gt 2 ]; then + RC="\033[1;31m" + GC="\033[1;32m" + BC="\033[1;34m" + YC="\033[1;33m" + EC="\033[0m" + else + RC="" + GC="" + BC="" + YC="" + EC="" + fi +} +__detect_color_support + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: echoerr +# DESCRIPTION: Echo errors to stderr. +#------------------------------------------------------------------------------- +echoerror() { + printf "${RC} * ERROR${EC}: $@\n" 1>&2; +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: echoinfo +# DESCRIPTION: Echo information to stdout. +#------------------------------------------------------------------------------- +echoinfo() { + printf "${GC} * INFO${EC}: %s\n" "$@"; +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: echowarn +# DESCRIPTION: Echo warning informations to stdout. +#------------------------------------------------------------------------------- +echowarn() { + printf "${YC} * WARN${EC}: %s\n" "$@"; +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: echodebug +# DESCRIPTION: Echo debug information to stdout. +#------------------------------------------------------------------------------- +echodebug() { + if [ $ECHO_DEBUG -eq $BS_TRUE ]; then + printf "${BC} * DEBUG${EC}: %s\n" "$@"; + fi +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: check_pip_allowed +# DESCRIPTION: Simple function to let the users know that -P needs to be +# used. +#------------------------------------------------------------------------------- +check_pip_allowed() { + if [ $PIP_ALLOWED -eq $BS_FALSE ]; then + echoerror "pip based installations were not allowed. Retry using '-P'" + usage + exit 1 + fi +} + +#=== FUNCTION ================================================================ +# NAME: usage +# DESCRIPTION: Display usage information. +#=============================================================================== +usage() { + cat << EOT + + Usage : ${ScriptName} [options] + + Installation types: + - stable (default) + - daily (ubuntu specific) + - git + + Examples: + $ ${ScriptName} + $ ${ScriptName} stable + $ ${ScriptName} daily + $ ${ScriptName} git + $ ${ScriptName} git develop + $ ${ScriptName} git 8c3fadf15ec183e5ce8c63739850d543617e4357 + + Options: + -h Display this message + -v Display script version + -n No colours. + -D Show debug output. + -c Temporary configuration directory + -k Temporary directory holding the minion keys which will pre-seed + the master. + -M Also install salt-master + -S Also install salt-syndic + -N Do not install salt-minion + -C Only run the configuration function. This option automaticaly + bypasses any installation. + -P Allow pip based installations. On some distributions the required salt + packages or it's dependencies are not available as a package for that + distribution. Using this flag allows the script to use pip as a last + resort method. NOTE: This works for functions which actually implement + pip based installations. + -F Allow copied files to overwrite existing(config, init.d, etc) + +EOT +} # ---------- end of function usage ---------- + +#----------------------------------------------------------------------- +# Handle command line arguments +#----------------------------------------------------------------------- +TEMP_CONFIG_DIR="null" +TEMP_KEYS_DIR="null" +INSTALL_MASTER=$BS_FALSE +INSTALL_SYNDIC=$BS_FALSE +INSTALL_MINION=$BS_TRUE +ECHO_DEBUG=${BS_ECHO_DEBUG:-$BS_FALSE} +CONFIG_ONLY=$BS_FALSE +PIP_ALLOWED=${BS_PIP_ALLOWED:-$BS_FALSE} +SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/etc/salt} +FORCE_OVERWRITE=${BS_FORCE_OVERWRITE:-$BS_FALSE} + +while getopts ":hvnDc:k:MSNCP" opt +do + case "${opt}" in + + h ) usage; exit 0 ;; + + v ) echo "$0 -- Version $ScriptVersion"; exit 0 ;; + n ) COLORS=0; __detect_color_support ;; + D ) ECHO_DEBUG=$BS_TRUE ;; + c ) TEMP_CONFIG_DIR="$OPTARG" + # If the configuration directory does not exist, error out + if [ ! -d "$TEMP_CONFIG_DIR" ]; then + echoerror "The configuration directory ${TEMP_CONFIG_DIR} does not exist." + exit 1 + fi + ;; + k ) TEMP_KEYS_DIR="$OPTARG" + # If the configuration directory does not exist, error out + if [ ! -d "$TEMP_KEYS_DIR" ]; then + echoerror "The pre-seed keys directory ${TEMP_KEYS_DIR} does not exist." + exit 1 + fi + ;; + M ) INSTALL_MASTER=$BS_TRUE ;; + S ) INSTALL_SYNDIC=$BS_TRUE ;; + N ) INSTALL_MINION=$BS_FALSE ;; + C ) CONFIG_ONLY=$BS_TRUE ;; + P ) PIP_ALLOWED=$BS_TRUE ;; + F ) FORCE_OVERWRITE=$BS_TRUE ;; + + \?) echo + echoerror "Option does not exist : $OPTARG" + usage + exit 1 + ;; + + esac # --- end of case --- +done +shift $(($OPTIND-1)) + + +__check_unparsed_options() { + shellopts="$1" + unparsed_options=$( echo "$shellopts" | grep -E '[-]+[[:alnum:]]' ) + if [ "x$unparsed_options" != "x" ]; then + usage + echo + echoerror "options are only allowed before install arguments" + echo + exit 1 + fi +} + + +# Check that we're actually installing one of minion/master/syndic +if [ $INSTALL_MINION -eq $BS_FALSE ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && [ $CONFIG_ONLY -eq $BS_FALSE ]; then + echowarn "Nothing to install or configure" + exit 0 +fi + +if [ $CONFIG_ONLY -eq $BS_TRUE ] && [ "$TEMP_CONFIG_DIR" = "null" ]; then + echoerror "In order to run the script in configuration only mode you also need to provide the configuration directory." + exit 1 +fi + +# Define installation type +if [ "$#" -eq 0 ];then + ITYPE="stable" +else + __check_unparsed_options "$*" + ITYPE=$1 + shift +fi + +# Check installation type +if [ "$ITYPE" != "stable" ] && [ "$ITYPE" != "daily" ] && [ "$ITYPE" != "git" ]; then + echoerror "Installation type \"$ITYPE\" is not known..." + exit 1 +fi + +# If doing a git install, check what branch/tag/sha will be checked out +if [ $ITYPE = "git" ]; then + if [ "$#" -eq 0 ];then + GIT_REV="master" + else + __check_unparsed_options "$*" + GIT_REV="$1" + shift + fi +fi + +# Check for any unparsed arguments. Should be an error. +if [ "$#" -gt 0 ]; then + __check_unparsed_options "$*" + usage + echo + echoerror "Too many arguments." + exit 1 +fi + +# Root permissions are required to run this script +if [ $(whoami) != "root" ] ; then + echoerror "Salt requires root privileges to install. Please re-run this script as root." + exit 1 +fi + +CALLER=$(echo `ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' '` | cut -d ' ' -f 2) +if [ "${CALLER}x" = "${0}x" ]; then + CALLER="PIPED THROUGH" +fi +echoinfo "${CALLER} ${0} -- Version ${ScriptVersion}" +#echowarn "Running the unstable version of ${ScriptName}" + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __exit_cleanup +# DESCRIPTION: Cleanup any leftovers after script has ended +# +# +# http://www.unix.com/man-page/POSIX/1posix/trap/ +# +# Signal Number Signal Name +# 1 SIGHUP +# 2 SIGINT +# 3 SIGQUIT +# 6 SIGABRT +# 9 SIGKILL +# 14 SIGALRM +# 15 SIGTERM +#------------------------------------------------------------------------------- +__exit_cleanup() { + EXIT_CODE=$? + + # Remove the logging pipe when the script exits + echodebug "Removing the logging pipe $LOGPIPE" + rm -f $LOGPIPE + + # Kill tee when exiting, CentOS, at least requires this + TEE_PID=$(ps ax | grep tee | grep $LOGFILE | awk '{print $1}') + + [ "x$TEE_PID" = "x" ] && exit $EXIT_CODE + + echodebug "Killing logging pipe tee's with pid(s): $TEE_PID" + + # We need to trap errors since killing tee will cause a 127 errno + # We also do this as late as possible so we don't "mis-catch" other errors + __trap_errors() { + echoinfo "Errors Trapped: $EXIT_CODE" + # Exit with the "original" exit code, not the trapped code + exit $EXIT_CODE + } + trap "__trap_errors" INT QUIT ABRT KILL QUIT TERM + + # Now we're "good" to kill tee + kill -s TERM $TEE_PID + + # In case the 127 errno is not triggered, exit with the "original" exit code + exit $EXIT_CODE +} +trap "__exit_cleanup" EXIT INT + + +# Define our logging file and pipe paths +LOGFILE="/tmp/$( echo $ScriptName | sed s/.sh/.log/g )" +LOGPIPE="/tmp/$( echo $ScriptName | sed s/.sh/.logpipe/g )" + +# Create our logging pipe +# On FreeBSD we have to use mkfifo instead of mknod +mknod $LOGPIPE p >/dev/null 2>&1 || mkfifo $LOGPIPE >/dev/null 2>&1 +if [ $? -ne 0 ]; then + echoerror "Failed to create the named pipe required to log" + exit 1 +fi + +# What ever is written to the logpipe gets written to the logfile +tee < $LOGPIPE $LOGFILE & + +# Close STDOUT, reopen it directing it to the logpipe +exec 1>&- +exec 1>$LOGPIPE +# Close STDERR, reopen it directing it to the logpipe +exec 2>&- +exec 2>$LOGPIPE + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __gather_hardware_info +# DESCRIPTION: Discover hardware information +#------------------------------------------------------------------------------- +__gather_hardware_info() { + if [ -f /proc/cpuinfo ]; then + CPU_VENDOR_ID=$(awk '/vendor_id|Processor/ {sub(/-.*$/,"",$3); print $3; exit}' /proc/cpuinfo ) + elif [ -f /usr/bin/kstat ]; then + # SmartOS. + # Solaris!? + # This has only been tested for a GenuineIntel CPU + CPU_VENDOR_ID=$(/usr/bin/kstat -p cpu_info:0:cpu_info0:vendor_id | awk '{print $2}') + else + CPU_VENDOR_ID=$( sysctl -n hw.model ) + fi + CPU_VENDOR_ID_L=$( echo $CPU_VENDOR_ID | tr '[:upper:]' '[:lower:]' ) + CPU_ARCH=$(uname -m 2>/dev/null || uname -p 2>/dev/null || echo "unknown") + CPU_ARCH_L=$( echo $CPU_ARCH | tr '[:upper:]' '[:lower:]' ) + +} +__gather_hardware_info + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __gather_os_info +# DESCRIPTION: Discover operating system information +#------------------------------------------------------------------------------- +__gather_os_info() { + OS_NAME=$(uname -s 2>/dev/null) + OS_NAME_L=$( echo $OS_NAME | tr '[:upper:]' '[:lower:]' ) + OS_VERSION=$(uname -r) + OS_VERSION_L=$( echo $OS_VERSION | tr '[:upper:]' '[:lower:]' ) +} +__gather_os_info + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __parse_version_string +# DESCRIPTION: Parse version strings ignoring the revision. +# MAJOR.MINOR.REVISION becomes MAJOR.MINOR +#------------------------------------------------------------------------------- +__parse_version_string() { + VERSION_STRING="$1" + PARSED_VERSION=$( + echo $VERSION_STRING | + sed -e 's/^/#/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\)\(\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\).*$/\1/' \ + -e 's/^#.*$//' + ) + echo $PARSED_VERSION +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __unquote_string +# DESCRIPTION: Strip single or double quotes from the provided string. +#------------------------------------------------------------------------------- +__unquote_string() { + echo $@ | sed "s/^\([\"']\)\(.*\)\1\$/\2/g" +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __camelcase_split +# DESCRIPTION: Convert CamelCased strings to Camel_Cased +#------------------------------------------------------------------------------- +__camelcase_split() { + echo $@ | sed -r 's/([^A-Z-])([A-Z])/\1 \2/g' +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __strip_duplicates +# DESCRIPTION: Strip duplicate strings +#------------------------------------------------------------------------------- +__strip_duplicates() { + echo $@ | tr -s '[:space:]' '\n' | awk '!x[$0]++' +} + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __sort_release_files +# DESCRIPTION: Custom sort function. Alphabetical or numerical sort is not +# enough. +#------------------------------------------------------------------------------- +__sort_release_files() { + KNOWN_RELEASE_FILES=$(echo "(arch|centos|debian|ubuntu|fedora|redhat|suse|\ + mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|lsb|system|\ + os)(-|_)(release|version)" | sed -r 's:[[:space:]]::g') + primary_release_files="" + secondary_release_files="" + # Sort know VS un-known files first + for release_file in $(echo $@ | sed -r 's:[[:space:]]:\n:g' | sort --unique --ignore-case); do + match=$(echo $release_file | egrep -i ${KNOWN_RELEASE_FILES}) + if [ "x${match}" != "x" ]; then + primary_release_files="${primary_release_files} ${release_file}" + else + secondary_release_files="${secondary_release_files} ${release_file}" + fi + done + + # Now let's sort by know files importance, max important goes last in the max_prio list + max_prio="redhat-release centos-release" + for entry in $max_prio; do + if [ "x$(echo ${primary_release_files} | grep $entry)" != "x" ]; then + primary_release_files=$(echo ${primary_release_files} | sed -e "s:\(.*\)\($entry\)\(.*\):\2 \1 \3:g") + fi + done + # Now, least important goes last in the min_prio list + min_prio="lsb-release" + for entry in $max_prio; do + if [ "x$(echo ${primary_release_files} | grep $entry)" != "x" ]; then + primary_release_files=$(echo ${primary_release_files} | sed -e "s:\(.*\)\($entry\)\(.*\):\1 \3 \2:g") + fi + done + + # Echo the results collapsing multiple white-space into a single white-space + echo "${primary_release_files} ${secondary_release_files}" | sed -r 's:[[:space:]]:\n:g' +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __gather_linux_system_info +# DESCRIPTION: Discover Linux system information +#------------------------------------------------------------------------------- +__gather_linux_system_info() { + DISTRO_NAME="" + DISTRO_VERSION="" + + # Let's test if the lsb_release binary is available + rv=$(lsb_release >/dev/null 2>&1) + if [ $? -eq 0 ]; then + DISTRO_NAME=$(lsb_release -si) + if [ "x$(echo "$DISTRO_NAME" | grep RedHat)" != "x" ]; then + # Let's convert CamelCase to Camel Case + DISTRO_NAME=$(__camelcase_split "$DISTRO_NAME") + fi + if [ "${DISTRO_NAME}" = "openSUSE project" ]; then + # lsb_release -si returns "openSUSE project" on openSUSE 12.3 + DISTRO_NAME="opensuse" + fi + rv=$(lsb_release -sr) + [ "${rv}x" != "x" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + elif [ -f /etc/lsb-release ]; then + # We don't have the lsb_release binary, though, we do have the file it parses + DISTRO_NAME=$(grep DISTRIB_ID /etc/lsb-release | sed -e 's/.*=//') + rv=$(grep DISTRIB_RELEASE /etc/lsb-release | sed -e 's/.*=//') + [ "${rv}x" != "x" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + fi + + if [ "x$DISTRO_NAME" != "x" ] && [ "x$DISTRO_VERSION" != "x" ]; then + # We already have the distribution name and version + return + fi + + for rsource in $(__sort_release_files $( + cd /etc && /bin/ls *[_-]release *[_-]version 2>/dev/null | env -i sort | \ + sed -e '/^redhat-release$/d' -e '/^lsb-release$/d'; \ + echo redhat-release lsb-release + )); do + + [ -L "/etc/${rsource}" ] && continue # Don't follow symlinks + [ ! -f "/etc/${rsource}" ] && continue # Does not exist + + n=$(echo ${rsource} | sed -e 's/[_-]release$//' -e 's/[_-]version$//') + rv=$( (grep VERSION /etc/${rsource}; cat /etc/${rsource}) | grep '[0-9]' | sed -e 'q' ) + [ "${rv}x" = "x" ] && continue # There's no version information. Continue to next rsource + v=$(__parse_version_string "$rv") + case $(echo ${n} | tr '[:upper:]' '[:lower:]') in + redhat ) + if [ ".$(egrep 'CentOS' /etc/${rsource})" != . ]; then + n="CentOS" + elif [ ".$(egrep 'Red Hat Enterprise Linux' /etc/${rsource})" != . ]; then + n="ed at nterprise inux" + else + n="ed at inux" + fi + ;; + arch ) n="Arch Linux" ;; + centos ) n="CentOS" ;; + debian ) n="Debian" ;; + ubuntu ) n="Ubuntu" ;; + fedora ) n="Fedora" ;; + suse ) n="SUSE" ;; + mandrake*|mandriva ) n="Mandriva" ;; + gentoo ) n="Gentoo" ;; + slackware ) n="Slackware" ;; + turbolinux ) n="TurboLinux" ;; + unitedlinux ) n="UnitedLinux" ;; + system ) + while read -r line; do + [ "${n}x" != "systemx" ] && break + case "$line" in + *Amazon*Linux*AMI*) + n="Amazon Linux AMI" + break + esac + done < /etc/${rsource} + ;; + os ) + nn=$(__unquote_string $(grep '^ID=' /etc/os-release | sed -e 's/^ID=\(.*\)$/\1/g')) + rv=$(__unquote_string $(grep '^VERSION_ID=' /etc/os-release | sed -e 's/^VERSION_ID=\(.*\)$/\1/g')) + [ "${rv}x" != "x" ] && v=$(__parse_version_string "$rv") || v="" + case $(echo ${nn} | tr '[:upper:]' '[:lower:]') in + arch ) + n="Arch Linux" + v="" # Arch Linux does not provide a version. + ;; + debian ) + n="Debian" + if [ "${v}x" = "x" ]; then + if [ "$(cat /etc/debian_version)" = "wheezy/sid" ]; then + # I've found an EC2 wheezy image which did not tell its version + v=$(__parse_version_string "7.0") + fi + else + echowarn "Unable to parse the Debian Version" + fi + ;; + * ) + n=${nn} + ;; + esac + ;; + * ) n="${n}" ; + esac + DISTRO_NAME=$n + DISTRO_VERSION=$v + break + done +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __gather_sunos_system_info +# DESCRIPTION: Discover SunOS system info +#------------------------------------------------------------------------------- +__gather_sunos_system_info() { + if [ -f /sbin/uname ]; then + DISTRO_VERSION=$(/sbin/uname -X | awk '/[kK][eE][rR][nN][eE][lL][iI][dD]/ { print $3}') + fi + + DISTRO_NAME="" + if [ -f /etc/release ]; then + while read -r line; do + [ "${DISTRO_NAME}x" != "x" ] && break + case "$line" in + *OpenIndiana*oi_[0-9]*) + DISTRO_NAME="OpenIndiana" + DISTRO_VERSION=$(echo "$line" | sed -nr "s/OpenIndiana(.*)oi_([[:digit:]]+)(.*)/\2/p") + break + ;; + *OpenSolaris*snv_[0-9]*) + DISTRO_NAME="OpenSolaris" + DISTRO_VERSION=$(echo "$line" | sed -nr "s/OpenSolaris(.*)snv_([[:digit:]]+)(.*)/\2/p") + break + ;; + *Oracle*Solaris*[0-9]*) + DISTRO_NAME="Oracle Solaris" + DISTRO_VERSION=$(echo "$line" | sed -nr "s/(Oracle Solaris) ([[:digit:]]+)(.*)/\2/p") + break + ;; + *Solaris*) + DISTRO_NAME="Solaris" + break + ;; + *NexentaCore*) + DISTRO_NAME="Nexenta Core" + break + ;; + *SmartOS*) + DISTRO_NAME="SmartOS" + break + ;; + esac + done < /etc/release + fi + + if [ "${DISTRO_NAME}x" = "x" ]; then + DISTRO_NAME="Solaris" + DISTRO_VERSION=$( + echo "${OS_VERSION}" | + sed -e 's;^4\.;1.;' \ + -e 's;^5\.\([0-6]\)[^0-9]*$;2.\1;' \ + -e 's;^5\.\([0-9][0-9]*\).*;\1;' + ) + fi +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __gather_bsd_system_info +# DESCRIPTION: Discover OpenBSD, NetBSD and FreeBSD systems information +#------------------------------------------------------------------------------- +__gather_bsd_system_info() { + DISTRO_NAME=${OS_NAME} + DISTRO_VERSION=$(echo "${OS_VERSION}" | sed -e 's;[()];;' -e 's/-.*$//') +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __gather_system_info +# DESCRIPTION: Discover which system and distribution we are running. +#------------------------------------------------------------------------------- +__gather_system_info() { + case ${OS_NAME_L} in + linux ) + __gather_linux_system_info + ;; + sunos ) + __gather_sunos_system_info + ;; + openbsd|freebsd|netbsd ) + __gather_bsd_system_info + ;; + * ) + echoerror "${OS_NAME} not supported."; + exit 1 + ;; + esac + +} +__gather_system_info + + +echo +echoinfo "System Information:" +echoinfo " CPU: ${CPU_VENDOR_ID}" +echoinfo " CPU Arch: ${CPU_ARCH}" +echoinfo " OS Name: ${OS_NAME}" +echoinfo " OS Version: ${OS_VERSION}" +echoinfo " Distribution: ${DISTRO_NAME} ${DISTRO_VERSION}" +echo + +# Let users know what's going to be installed/configured +if [ $INSTALL_MINION -eq $BS_TRUE ]; then + if [ $CONFIG_ONLY -eq $BS_FALSE ]; then + echoinfo "Installing minion" + else + echoinfo "Configuring minion" + fi +fi + +if [ $INSTALL_MASTER -eq $BS_TRUE ]; then + if [ $CONFIG_ONLY -eq $BS_FALSE ]; then + echoinfo "Installing master" + else + echoinfo "Configuring master" + fi +fi + +if [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + if [ $CONFIG_ONLY -eq $BS_FALSE ]; then + echoinfo "Installing syndic" + else + echoinfo "Configuring syndic" + fi +fi + +# Simplify version naming on functions +if [ "x${DISTRO_VERSION}" = "x" ]; then + DISTRO_MAJOR_VERSION="" + DISTRO_MINOR_VERSION="" + PREFIXED_DISTRO_MAJOR_VERSION="" + PREFIXED_DISTRO_MINOR_VERSION="" +else + DISTRO_MAJOR_VERSION="$(echo $DISTRO_VERSION | sed 's/^\([0-9]*\).*/\1/g')" + DISTRO_MINOR_VERSION="$(echo $DISTRO_VERSION | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g')" + PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" + if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MAJOR_VERSION="" + fi + PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" + if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MINOR_VERSION="" + fi +fi +# Simplify distro name naming on functions +DISTRO_NAME_L=$(echo $DISTRO_NAME | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -re 's/([[:space:]])+/_/g') + + +# Only Ubuntu has daily packages, let's let users know about that +if ([ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $ITYPE = "daily" ]) && \ + ([ "${DISTRO_NAME_L}" != "trisquel" ] && [ $ITYPE = "daily" ]); then + echoerror "${DISTRO_NAME} does not have daily packages support" + exit 1 +fi + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __function_defined +# DESCRIPTION: Checks if a function is defined within this scripts scope +# PARAMETERS: function name +# RETURNS: 0 or 1 as in defined or not defined +#------------------------------------------------------------------------------- +__function_defined() { + FUNC_NAME=$1 + if [ "$(command -v $FUNC_NAME)x" != "x" ]; then + echoinfo "Found function $FUNC_NAME" + return 0 + fi + echodebug "$FUNC_NAME not found...." + return 1 +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __git_clone_and_checkout +# DESCRIPTION: (DRY) Helper function to clone and checkout salt to a +# specific revision. +#------------------------------------------------------------------------------- +__git_clone_and_checkout() { + SALT_GIT_CHECKOUT_DIR=/tmp/git/salt + [ -d /tmp/git ] || mkdir /tmp/git + cd /tmp/git + if [ -d $SALT_GIT_CHECKOUT_DIR ]; then + cd $SALT_GIT_CHECKOUT_DIR + git fetch || return 1 + # Tags are needed because of salt's versioning, also fetch that + git fetch --tags || return 1 + git reset --hard $GIT_REV || return 1 + + # Just calling `git reset --hard $GIT_REV` on a branch name that has + # already been checked out will not update that branch to the upstream + # HEAD; instead it will simply reset to itself. Check the ref to see + # if it is a branch name, check out the branch, and pull in the + # changes. + git branch -a | grep -q ${GIT_REV} + if [ $? -eq 0 ]; then + git pull --rebase || return 1 + fi + else + git clone https://github.com/saltstack/salt.git salt || return 1 + cd $SALT_GIT_CHECKOUT_DIR + git checkout $GIT_REV || return 1 + fi + return 0 +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: __apt_get_noinput +# DESCRIPTION: (DRY) apt-get install with noinput options +#------------------------------------------------------------------------------- +__apt_get_noinput() { + apt-get install -y -o DPkg::Options::=--force-confold $@; return $? +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: copyfile +# DESCRIPTION: Simple function to copy files. Overrides if asked. +#------------------------------------------------------------------------------- +copyfile() { + overwrite=$FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for copyfile()" + echoinfo "USAGE: copyfile OR copyfile " + exit 1 + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, copy + echodebug "Copying $sfile to $dfile" + cp "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ $overwrite -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overriding $dfile with $sfile" + cp -f "$sfile" "$dfile" || return 2 + elif [ -f "$dfile" ] && [ $overwrite -ne $BS_TRUE ]; then + echodebug "Not overriding $dfile with $sfile" + fi + return 0 +} + + +#--- FUNCTION ---------------------------------------------------------------- +# NAME: movefile +# DESCRIPTION: Simple function to move files. Overrides if asked. +#------------------------------------------------------------------------------- +movefile() { + overwrite=$FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for movefile()" + echoinfo "USAGE: movefile OR movefile " + exit 1 + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, copy + echodebug "Moving $sfile to $dfile" + mv "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ $overwrite -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overriding $dfile with $sfile" + mv -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ $overwrite -ne $BS_TRUE ]; then + echodebug "Not overriding $dfile with $sfile" + fi + + return 0 +} + +############################################################################## +# +# Distribution install functions +# +# In order to install salt for a distribution you need to define: +# +# To Install Dependencies, which is required, one of: +# 1. install____deps +# 2. install_____deps +# 3. install___deps +# 4 install____deps +# 5. install___deps +# 6. install__deps +# +# Optionally, define a salt configuration function, which will be called if +# the -c (config-dir) option is passed. One of: +# 1. config____salt +# 2. config_____salt +# 3. config___salt +# 4 config____salt +# 5. config___salt +# 6. config__salt +# 7. config_salt [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, define a salt master pre-seed function, which will be called if +# the -k (pre-seed master keys) option is passed. One of: +# 1. pressed____master +# 2. pressed_____master +# 3. pressed___master +# 4 pressed____master +# 5. pressed___master +# 6. pressed__master +# 7. pressed_master [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# To install salt, which, of course, is required, one of: +# 1. install___ +# 2. install____ +# 3. install__ +# +# Optionally, define a post install function, one of: +# 1. install____post +# 2. install_____post +# 3. install___post +# 4 install____post +# 5. install___post +# 6. install__post +# +# Optionally, define a start daemons function, one of: +# 1. install____restart_daemons +# 2. install_____restart_daemons +# 3. install___restart_daemons +# 4 install____restart_daemons +# 5. install___restart_daemons +# 6. install__restart_daemons +# +# NOTE: The start daemons function should be able to restart any daemons +# which are running, or start if they're not running. +# +############################################################################## + + +############################################################################## +# +# Ubuntu Install Functions +# +install_ubuntu_deps() { + apt-get update + if [ $DISTRO_MAJOR_VERSION -eq 12 ] && [ $DISTRO_MINOR_VERSION -gt 04 ] || [ $DISTRO_MAJOR_VERSION -gt 12 ]; then + # Above Ubuntu 12.04 add-apt-repository is in a different package + __apt_get_noinput software-properties-common || return 1 + else + __apt_get_noinput python-software-properties || return 1 + fi + if [ $DISTRO_MAJOR_VERSION -lt 11 ] && [ $DISTRO_MINOR_VERSION -lt 10 ]; then + add-apt-repository ppa:saltstack/salt || return 1 + else + add-apt-repository -y ppa:saltstack/salt || return 1 + fi + apt-get update + return 0 +} + +install_ubuntu_daily_deps() { + apt-get update + if [ $DISTRO_MAJOR_VERSION -eq 12 ] && [ $DISTRO_MINOR_VERSION -gt 04 ] || [ $DISTRO_MAJOR_VERSION -gt 12 ]; then + # Above Ubuntu 12.04 add-apt-repository is in a different package + __apt_get_noinput software-properties-common || return 1 + else + __apt_get_noinput python-software-properties || return 1 + fi + if [ $DISTRO_MAJOR_VERSION -lt 11 ] && [ $DISTRO_MINOR_VERSION -lt 10 ]; then + add-apt-repository ppa:saltstack/salt-daily || return 1 + else + add-apt-repository -y ppa:saltstack/salt-daily || return 1 + fi + apt-get update + return 0 +} + +install_ubuntu_11_10_deps() { + apt-get update + __apt_get_noinput python-software-properties || return 1 + add-apt-repository -y 'deb http://us.archive.ubuntu.com/ubuntu/ oneiric universe' || return 1 + add-apt-repository -y ppa:saltstack/salt || return 1 + apt-get update + return 0 +} + +install_ubuntu_git_deps() { + install_ubuntu_deps || return 1 + __apt_get_noinput git-core python-yaml python-m2crypto python-crypto \ + msgpack-python python-zmq python-jinja2 || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_ubuntu_11_10_post() { + add-apt-repository -y --remove 'deb http://us.archive.ubuntu.com/ubuntu/ oneiric universe' || return 1 + return 0 +} + +install_ubuntu_stable() { + packages="" + if [ $INSTALL_MINION -eq $BS_TRUE ]; then + packages="${packages} salt-minion" + fi + if [ $INSTALL_MASTER -eq $BS_TRUE ]; then + packages="${packages} salt-master" + fi + if [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + packages="${packages} salt-syndic" + fi + __apt_get_noinput ${packages} || return 1 + return 0 +} + +install_ubuntu_daily() { + install_ubuntu_stable || return 1 + return 0 +} + +install_ubuntu_git() { + python setup.py install --install-layout=deb || return 1 + return 0 +} + +install_ubuntu_git_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ]; then + # We have upstart support + echodebug "There's upstart support" + /sbin/initctl status salt-$fname > /dev/null 2>&1 + + if [ $? -eq 1 ]; then + # upstart does not know about our service, let's copy the proper file + echowarn "Upstart does not apparently know anything about salt-$fname" + echodebug "Copying ${SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to /etc/init/salt-$fname.conf" + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart /etc/init/salt-$fname.conf + fi + # No upstart support in Ubuntu!? + elif [ -f ${SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init ]; then + echodebug "There's NO upstart support!?" + echodebug "Copying ${SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init to /etc/init.d/salt-$fname" + copyfile ${SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init /etc/init.d/salt-$fname + chmod +x /etc/init.d/salt-$fname + update-rc.d salt-$fname defaults + else + echoerror "Neither upstart not init.d was setup for salt-$fname" + fi + done +} + +install_ubuntu_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ]; then + echodebug "There's upstart support while checking salt-$fname" + status salt-$fname || echowarn "Upstart does not apparently know anything about salt-$fname" + sleep 1 + if [ $? -eq 0 ]; then + echodebug "Upstart apparently knows about salt-$fname" + # upstart knows about this service, let's stop and start it. + # We could restart but earlier versions of the upstart script + # did not support restart, so, it's safer this way + + # Is it running??? + status salt-$fname | grep -q running + # If it is, stop it + if [ $? -eq 0 ]; then + sleep 1 + stop salt-$fname || (echodebug "Failed to stop salt-$fname" && return 1) + fi + # Now start it + sleep 1 + start salt-$fname + [ $? -eq 0 ] && continue + # We failed to start the service, let's test the SysV code bellow + echodebug "Failed to start salt-$fname" + fi + fi + + if [ ! -f /etc/init.d/salt-$fname ]; then + echoerror "No init.d support for salt-$fname was found" + return 1 + fi + + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + done + return 0 +} +# +# End of Ubuntu Install Functions +# +############################################################################## + +############################################################################## +# +# Trisquel(Ubuntu) Install Functions +# +# Trisquel 6.0 is based on Ubuntu 12.04 +# +install_trisquel_6_stable_deps() { + apt-get update + __apt_get_noinput python-software-properties || return 1 + add-apt-repository -y ppa:saltstack/salt || return 1 + apt-get update + return 0 +} + +install_trisquel_6_daily_deps() { + apt-get update + __apt_get_noinput python-software-properties || return 1 + add-apt-repository -y ppa:saltstack/salt-daily || return 1 + apt-get update + return 0 +} + +install_trisquel_6_git_deps() { + install_trisquel_6_stable_deps || return 1 + __apt_get_noinput git-core python-yaml python-m2crypto python-crypto \ + msgpack-python python-zmq python-jinja2 || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_trisquel_6_stable() { + install_ubuntu_stable || return 1 + return 0 +} + +install_trisquel_6_daily() { + install_ubuntu_daily || return 1 + return 0 +} + +install_trisquel_6_git() { + install_ubuntu_git || return 1 + return 0 +} + +install_trisquel_git_post() { + install_ubuntu_git_post || return 1 + return 0 +} + +install_trisquel_restart_daemons() { + install_ubuntu_restart_daemons || return 1 + return 0 +} +# +# End of Tristel(Ubuntu) Install Functions +# +############################################################################## + +############################################################################## +# +# Debian Install Functions +# +install_debian_deps() { + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + apt-get update +} + +install_debian_6_deps() { + check_pip_allowed + echowarn "PyZMQ will be installed from PyPi in order to compile it against ZMQ3" + echowarn "This is required for long term stable minion connections to the master." + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + if [ "x$(grep -R 'backports.debian.org' /etc/apt)" = "x" ]; then + echo "deb http://backports.debian.org/debian-backports squeeze-backports main" >> \ + /etc/apt/sources.list.d/backports.list + fi + + if [ ! -f /etc/apt/preferences.d/local-salt-backport.pref ]; then + # Add madduck's repo since squeeze packages have been deprecated + for fname in salt-common salt-master salt-minion salt-syndic salt-doc; do + echo "Package: $fname" + echo "Pin: release a=squeeze-backports" + echo "Pin-Priority: 600" + echo + done > /etc/apt/preferences.d/local-salt-backport.pref + + cat <<_eof > /etc/apt/sources.list.d/local-madduck-backports.list +deb http://debian.madduck.net/repo squeeze-backports main +deb-src http://debian.madduck.net/repo squeeze-backports main +_eof + + wget -q http://debian.madduck.net/repo/gpg/archive.key -O - | apt-key add - || return 1 + fi + + if [ ! -f /etc/apt/sources.list.d/debian-experimental.list ]; then + cat <<_eof > /etc/apt/sources.list.d/debian-experimental.list +deb http://ftp.debian.org/debian experimental main +deb-src http://ftp.debian.org/debian experimental main +_eof + + cat <<_eof > /etc/apt/preferences.d/libzmq3-debian-experimental.pref +Package: libzmq3 +Pin: release a=experimental +Pin-Priority: 800 + +Package: libzmq3-dev +Pin: release a=experimental +Pin-Priority: 800 +_eof + fi + + apt-get update + __apt_get_noinput -t experimental libzmq3 libzmq3-dev || return 1 + __apt_get_noinput build-essential python-dev python-pip || return 1 + return 0 +} + +install_debian_7_deps() { + check_pip_allowed + echowarn "PyZMQ will be installed from PyPi in order to compile it against ZMQ3" + echowarn "This is required for long term stable minion connections to the master." + + if [ ! -f /etc/apt/sources.list.d/debian-experimental.list ]; then + cat <<_eof > /etc/apt/sources.list.d/debian-experimental.list +deb http://ftp.debian.org/debian experimental main +deb-src http://ftp.debian.org/debian experimental main +_eof + + cat <<_eof > /etc/apt/preferences.d/libzmq3-debian-experimental.pref +Package: libzmq3 +Pin: release a=experimental +Pin-Priority: 800 + +Package: libzmq3-dev +Pin: release a=experimental +Pin-Priority: 800 +_eof + fi + + apt-get update + __apt_get_noinput -t experimental libzmq3 libzmq3-dev || return 1 + __apt_get_noinput build-essential python-dev python-pip || return 1 + return 0 +} + +install_debian_git_deps() { + check_pip_allowed + echowarn "PyZMQ will be installed from PyPi in order to compile it against ZMQ3" + echowarn "This is required for long term stable minion connections to the master." + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + apt-get update + __apt_get_noinput lsb-release python python-pkg-resources python-crypto \ + python-jinja2 python-m2crypto python-yaml msgpack-python python-pip \ + git || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_debian_6_git_deps() { + install_debian_6_deps || return 1 # Add backports + install_debian_git_deps || return 1 # Grab the actual deps + return 0 +} + +install_debian_7_git_deps() { + install_debian_7_deps || return 1 # Add experimental repository for ZMQ3 + install_debian_git_deps || return 1 # Grab the actual deps + return 0 +} + +__install_debian_stable() { + check_pip_allowed + packages="" + if [ $INSTALL_MINION -eq $BS_TRUE ]; then + packages="${packages} salt-minion" + fi + if [ $INSTALL_MASTER -eq $BS_TRUE ]; then + packages="${packages} salt-master" + fi + if [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + packages="${packages} salt-syndic" + fi + __apt_get_noinput ${packages} || return 1 + + # Building pyzmq from source to build it against libzmq3. + # Should override current installation + pip install -U pyzmq || return 1 + + return 0 +} + + +install_debian_6_stable() { + __install_debian_stable || return 1 + return 0 +} + +install_debian_git() { + python setup.py install --install-layout=deb || return 1 + + # Building pyzmq from source to build it against libzmq3. + # Should override current installation + pip install -U pyzmq || return 1 + return 0 +} + +install_debian_6_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_7_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_git_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f ${SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init ]; then + copyfile ${SALT_GIT_CHECKOUT_DIR}/debian/salt-$fname.init /etc/init.d/salt-$fname + fi + chmod +x /etc/init.d/salt-$fname + update-rc.d salt-$fname defaults + done +} + +install_debian_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + done +} +# +# Ended Debian Install Functions +# +############################################################################## + +############################################################################## +# +# Fedora Install Functions +# +install_fedora_deps() { + yum install -y PyYAML libyaml m2crypto python-crypto python-jinja2 \ + python-msgpack python-zmq || return 1 + return 0 +} + +install_fedora_stable() { + packages="" + if [ $INSTALL_MINION -eq $BS_TRUE ]; then + packages="${packages} salt-minion" + fi + if [ $INSTALL_MASTER -eq $BS_TRUE ] || [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + packages="${packages} salt-master" + fi + yum install -y ${packages} || return 1 + return 0 +} + +install_fedora_git_deps() { + install_fedora_deps || return 1 + yum install -y git || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_fedora_git() { + python setup.py install || return 1 + return 0 +} + +install_fedora_git_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname.service /lib/systemd/system/salt-$fname.service + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 0.1 + systemctl daemon-reload + done +} + +install_fedora_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service + done +} +# +# Ended Fedora Install Functions +# +############################################################################## + +############################################################################## +# +# CentOS Install Functions +# +install_centos_stable_deps() { + if [ $CPU_ARCH_L = "i686" ]; then + EPEL_ARCH="i386" + else + EPEL_ARCH=$CPU_ARCH_L + fi + if [ $DISTRO_MAJOR_VERSION -eq 5 ]; then + rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/5/${EPEL_ARCH}/epel-release-5-4.noarch.rpm || return 1 + elif [ $DISTRO_MAJOR_VERSION -eq 6 ]; then + rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/6/${EPEL_ARCH}/epel-release-6-8.noarch.rpm || return 1 + else + echoerror "Failed add EPEL repository support." + return 1 + fi + + yum -y update || return 1 + + if [ $DISTRO_MAJOR_VERSION -eq 5 ]; then + yum -y install PyYAML python26-m2crypto m2crypto python26 \ + python26-crypto python26-msgpack python26-zmq \ + python26-jinja2 --enablerepo=epel || return 1 + else + yum -y install PyYAML m2crypto python-crypto python-msgpack \ + python-zmq python-jinja2 --enablerepo=epel || return 1 + fi + return 0 +} + +install_centos_stable() { + packages="" + if [ $INSTALL_MINION -eq $BS_TRUE ]; then + packages="${packages} salt-minion" + fi + if [ $INSTALL_MASTER -eq $BS_TRUE ] || [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + packages="${packages} salt-master" + fi + yum -y install ${packages} --enablerepo=epel || return 1 + return 0 +} + +install_centos_stable_post() { + for fname in minion master syndic; do + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ ! -f /sbin/initctl ] && [ -f /etc/init.d/salt-$fname ]; then + # Still in SysV init!? + /sbin/chkconfig salt-$fname on + fi + done +} + +install_centos_git_deps() { + install_centos_stable_deps || return 1 + yum -y install git --enablerepo=epel || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_centos_git() { + if [ $DISTRO_MAJOR_VERSION -eq 5 ]; then + python2.6 setup.py install || return 1 + else + python2 setup.py install || return 1 + fi + return 0 +} + +install_centos_git_post() { + for fname in master minion syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ]; then + # We have upstart support + /sbin/initctl status salt-$fname > /dev/null 2>&1 + if [ $? -eq 1 ]; then + # upstart does not know about our service, let's copy the proper file + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart /etc/init/salt-$fname.conf + fi + # Still in SysV init?! + elif [ ! -f /etc/init.d/salt-$fname ]; then + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname} /etc/init.d/ + chmod +x /etc/init.d/salt-${fname} + /sbin/chkconfig salt-${fname} on + fi + done +} + +install_centos_restart_daemons() { + for fname in minion master syndic; do + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ]; then + # We have upstart support + /sbin/initctl status salt-$fname > /dev/null 2>&1 + if [ $? -eq 0 ]; then + # upstart knows about this service. + # Let's try to stop it, and then start it + /sbin/initctl stop salt-$fname > /dev/null 2>&1 + /sbin/initctl start salt-$fname > /dev/null 2>&1 + # Restart service + [ $? -eq 0 ] && continue + # We failed to start the service, let's test the SysV code bellow + fi + fi + + if [ -f /etc/init.d/salt-$fname ]; then + # Still in SysV init!? + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + fi + done +} +# +# Ended CentOS Install Functions +# +############################################################################## + +############################################################################## +# +# RedHat Install Functions +# +install_red_hat_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_red_hat_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_red_hat_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_server_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_red_hat_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_red_hat_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} +# +# Ended RedHat Install Functions +# +############################################################################## + +############################################################################## +# +# Amazon Linux AMI Install Functions +# +install_amazon_linux_ami_deps() { + # Acording to http://aws.amazon.com/amazon-linux-ami/faqs/#epel we should + # enable the EPEL 6 repo + if [ $CPU_ARCH_L = "i686" ]; then + EPEL_ARCH="i386" + else + EPEL_ARCH=$CPU_ARCH_L + fi + rpm -Uvh --force http://mirrors.kernel.org/fedora-epel/6/${EPEL_ARCH}/epel-release-6-8.noarch.rpm || return 1 + yum -y update || return 1 + yum -y install PyYAML m2crypto python-crypto python-msgpack python-zmq \ + python-ordereddict python-jinja2 --enablerepo=epel || return 1 +} + +install_amazon_linux_ami_git_deps() { + install_amazon_linux_ami_deps || return 1 + yum -y install git --enablerepo=epel || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_git_post() { + install_centos_git_post || return 1 + return 0 +} +# +# Ended Amazon Linux AMI Install Functions +# +############################################################################## + +############################################################################## +# +# Arch Install Functions +# +install_arch_linux_stable_deps() { + grep '\[salt\]' /etc/pacman.conf >/dev/null 2>&1 || echo '[salt] +Server = http://intothesaltmine.org/archlinux +' >> /etc/pacman.conf +} + +install_arch_linux_git_deps() { + grep '\[salt\]' /etc/pacman.conf >/dev/null 2>&1 || echo '[salt] +Server = http://intothesaltmine.org/archlinux +' >> /etc/pacman.conf + + pacman -Sy --noconfirm pacman || return 1 + pacman -Sy --noconfirm git python2-crypto python2-distribute \ + python2-jinja python2-m2crypto python2-markupsafe python2-msgpack \ + python2-psutil python2-yaml python2-pyzmq zeromq || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_arch_linux_stable() { + pacman -Sy --noconfirm pacman || return 1 + pacman -Syu --noconfirm salt || return 1 + return 0 +} + +install_arch_linux_git() { + python2 setup.py install || return 1 + return 0 +} + +install_arch_linux_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + # Using systemd + /usr/bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 0.1 + /usr/bin/systemctl daemon-reload + continue + fi + + # XXX: How do we enable old Arch init.d scripts? + done +} + +install_arch_linux_git_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname.service /lib/systemd/system/salt-$fname.service + + /usr/bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 0.1 + /usr/bin/systemctl daemon-reload + continue + fi + + # SysV init!? + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname /etc/rc.d/init.d/salt-$fname + chmod +x /etc/rc.d/init.d/salt-$fname + done +} + +install_arch_linux_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + /usr/bin/systemctl stop salt-$fname.service > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service + continue + fi + /etc/rc.d/salt-$fname stop > /dev/null 2>&1 + /etc/rc.d/salt-$fname start + done +} +# +# Ended Arch Install Functions +# +############################################################################## + +############################################################################## +# +# FreeBSD Install Functions +# +__freebsd_get_packagesite() { + if [ $CPU_ARCH_L = "amd64" ]; then + BSD_ARCH="x86:64" + elif [ $CPU_ARCH_L = "x86_64" ]; then + BSD_ARCH="x86:64" + elif [ $CPU_ARCH_L = "i386" ]; then + BSD_ARCH="x86:32" + elif [ $CPU_ARCH_L = "i686" ]; then + BSD_ARCH="x86:32" + fi + + # Since the variable might not be set, don't, momentarily treat it as a failure + set +o nounset + + if [ "x${PACKAGESITE}" = "x" ]; then + echowarn "The environment variable PACKAGESITE is not set." + echowarn "The installation will, most likely fail since pkgbeta.freebsd.org does not yet contain any packages" + fi + BS_PACKAGESITE=${PACKAGESITE:-"http://pkgbeta.freebsd.org/freebsd:${DISTRO_MAJOR_VERSION}:${BSD_ARCH}/latest"} + + # Treat unset variables as errors once more + set -o nounset +} + +install_freebsd_9_stable_deps() { + __freebsd_get_packagesite + + fetch "${BS_PACKAGESITE}/Latest/pkg.txz" || return 1 + tar xf ./pkg.txz -s ",/.*/,,g" "*/pkg-static" || return 1 + ./pkg-static add ./pkg.txz || return 1 + /usr/local/sbin/pkg2ng || return 1 + echo "PACKAGESITE: ${BS_PACKAGESITE}" > /usr/local/etc/pkg.conf + + /usr/local/sbin/pkg install -y swig || return 1 + return 0 +} + +install_freebsd_git_deps() { + __freebsd_get_packagesite + + fetch "${BS_PACKAGESITE}/Latest/pkg.txz" || return 1 + tar xf ./pkg.txz -s ",/.*/,,g" "*/pkg-static" || return 1 + ./pkg-static add ./pkg.txz || return 1 + /usr/local/sbin/pkg2ng || return 1 + echo "PACKAGESITE: ${BS_PACKAGESITE}" > /usr/local/etc/pkg.conf + + /usr/local/sbin/pkg install -y swig || return 1 + + __git_clone_and_checkout || return 1 + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_freebsd_9_stable() { + /usr/local/sbin/pkg install -y sysutils/py-salt || return 1 + return 0 +} + +install_freebsd_git() { + /usr/local/sbin/pkg install -y git sysutils/py-salt || return 1 + /usr/local/sbin/pkg delete -y sysutils/py-salt || return 1 + + /usr/local/bin/python setup.py install || return 1 + return 0 +} + +install_freebsd_9_stable_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + enable_string="salt_${fname}_enable=\"YES\"" + grep "$enable_string" /etc/rc.conf >/dev/null 2>&1 + [ $? -eq 1 ] && echo "$enable_string" >> /etc/rc.conf + + [ -f /usr/local/etc/salt/${fname}.sample ] && copyfile /usr/local/etc/salt/${fname}.sample /usr/local/etc/salt/${fname} + + if [ $fname = "minion" ] ; then + grep "salt_minion_paths" /etc/rc.conf >/dev/null 2>&1 + [ $? -eq 1 ] && echo "salt_minion_paths=\"/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin\"" >> /etc/rc.conf + fi + + done +} + +install_freebsd_git_post() { + install_freebsd_9_stable_post || return 1 + return 0 +} + +install_freebsd_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + service salt_$fname stop > /dev/null 2>&1 + service salt_$fname start + done +} +# +# Ended FreeBSD Install Functions +# +############################################################################## + +############################################################################## +# +# SmartOS Install Functions +# +install_smartos_deps() { + check_pip_allowed + echowarn "PyZMQ will be installed using pip" + + ZEROMQ_VERSION='3.2.2' + pkgin -y in libtool-base autoconf automake libuuid gcc-compiler gmake \ + python27 py27-pip py27-setuptools py27-yaml py27-crypto swig || return 1 + [ -d zeromq-${ZEROMQ_VERSION} ] || ( + wget http://download.zeromq.org/zeromq-${ZEROMQ_VERSION}.tar.gz && + tar -xvf zeromq-${ZEROMQ_VERSION}.tar.gz + ) + cd zeromq-${ZEROMQ_VERSION} + ./configure || return 1 + make || return 1 + make install || return 1 + + pip-2.7 install pyzmq || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + # Let's set the configuration directory to /tmp + TEMP_CONFIG_DIR="/tmp" + CONFIG_SALT_FUNC="config_salt" + + # Let's download, since they were not provided, the default configuration files + if [ ! -f /etc/salt/minion ] && [ ! -f $TEMP_CONFIG_DIR/minion ]; then + curl -sk -o $TEMP_CONFIG_DIR/minion -L \ + https://raw.github.com/saltstack/salt/develop/conf/minion || return 1 + fi + if [ ! -f /etc/salt/master ] && [ ! -f $TEMP_CONFIG_DIR/master ]; then + curl -sk -o $TEMP_CONFIG_DIR/master -L \ + https://raw.github.com/saltstack/salt/develop/conf/master || return 1 + fi + fi + + return 0 + +} + +install_smartos_git_deps() { + install_smartos_deps || return 1 + pkgin -y in scmgit || return 1 + + __git_clone_and_checkout || return 1 + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_smartos_stable() { + USE_SETUPTOOLS=1 pip-2.7 install salt || return 1 + return 0 +} + +install_smartos_git() { + # Use setuptools in order to also install dependencies + USE_SETUPTOOLS=1 /opt/local/bin/python setup.py install || return 1 + return 0 +} + +install_smartos_post() { + # Install manifest files if needed. + for fname in minion master syndic; do + svcs network/salt-$fname > /dev/null 2>&1 + if [ $? -eq 1 ]; then + if [ ! -f $TEMP_CONFIG_DIR/salt-$fname.xml ]; then + curl -sk -o $TEMP_CONFIG_DIR/salt-$fname.xml -L https://raw.github.com/saltstack/salt/develop/pkg/solaris/salt-$fname.xml + fi + svccfg import $TEMP_CONFIG_DIR/salt-$fname.xml + fi + done +} + +install_smartos_git_post() { + # Install manifest files if needed. + for fname in minion master syndic; do + svcs network/salt-$fname > /dev/null 2>&1 + if [ $? -eq 1 ]; then + svccfg import ${SALT_GIT_CHECKOUT_DIR}/pkg/solaris/salt-$fname.xml + fi + done +} + +install_smartos_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + # Stop if running && Start service + svcadm disable salt-$fname > /dev/null 2>&1 + svcadm enable salt-$fname + done +} +# +# Ended SmartOS Install Functions +# +############################################################################## + +############################################################################## +# +# openSUSE Install Functions. +# +install_opensuse_stable_deps() { + DISTRO_REPO="openSUSE_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" + + # Is the repository already known + $(zypper repos | grep devel_languages_python >/dev/null 2>&1) + if [ $? -eq 1 ]; then + # zypper does not yet know nothing about devel_languages_python + zypper --non-interactive addrepo --refresh \ + http://download.opensuse.org/repositories/devel:/languages:/python/${DISTRO_REPO}/devel:languages:python.repo || return 1 + fi + + zypper --gpg-auto-import-keys --non-interactive refresh || return 1 + zypper --non-interactive install --auto-agree-with-licenses libzmq3 python \ + python-Jinja2 python-M2Crypto python-PyYAML python-msgpack-python \ + python-pycrypto python-pyzmq || return 1 + return 0 +} + +install_opensuse_git_deps() { + install_opensuse_stable_deps || return 1 + zypper --non-interactive install --auto-agree-with-licenses git || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_stable() { + packages="" + if [ $INSTALL_MINION -eq $BS_TRUE ]; then + packages="${packages} salt-minion" + fi + if [ $INSTALL_MASTER -eq $BS_TRUE ]; then + packages="${packages} salt-master" + fi + if [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + packages="${packages} salt-syndic" + fi + zypper --non-interactive install --auto-agree-with-licenses $packages || return 1 + return 0 +} + +install_opensuse_git() { + python setup.py install --prefix=/usr || return 1 + return 0 +} + +install_opensuse_stable_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 0.1 + systemctl daemon-reload + continue + fi + + /sbin/chkconfig --add salt-$fname + /sbin/chkconfig salt-$fname on + + done +} + +install_opensuse_git_post() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.service /lib/systemd/system/salt-$fname.service + continue + fi + + copyfile ${SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname /etc/init.d/salt-$fname + chmod +x /etc/init.d/salt-$fname + + done + + install_opensuse_stable_post +} + +install_opensuse_restart_daemons() { + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service + continue + fi + + service salt-$fname stop > /dev/null 2>&1 + service salt-$fname start + + done +} +# +# End of openSUSE Install Functions. +# +############################################################################## + +############################################################################## +# +# SuSE Install Functions. +# +install_suse_11_stable_deps() { + SUSE_PATCHLEVEL=$(awk '/PATCHLEVEL/ {print $3}' /etc/SuSE-release ) + if [ "x${SUSE_PATCHLEVEL}" != "x" ]; then + DISTRO_PATCHLEVEL="_SP${SUSE_PATCHLEVEL}" + fi + DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}${DISTRO_PATCHLEVEL}" + + # Is the repository already known + $(zypper repos | grep devel_languages_python >/dev/null 2>&1) + if [ $? -eq 1 ]; then + # zypper does not yet know nothing about devel_languages_python + zypper --non-interactive addrepo --refresh \ + http://download.opensuse.org/repositories/devel:/languages:/python/${DISTRO_REPO}/devel:languages:python.repo || return 1 + fi + + zypper --gpg-auto-import-keys --non-interactive refresh || return 1 + if [ $SUSE_PATCHLEVEL -eq 1 ]; then + check_pip_allowed + echowarn "PyYaml will be installed using pip" + zypper --non-interactive install --auto-agree-with-licenses libzmq3 python \ + python-Jinja2 'python-M2Crypto>=0.21' python-msgpack-python \ + python-pycrypto python-pyzmq python-pip || return 1 + # There's no python-PyYaml in SP1, let's install it using pip + pip install PyYaml || return 1 + else + zypper --non-interactive install --auto-agree-with-licenses libzmq3 python \ + python-Jinja2 'python-M2Crypto>=0.21' python-PyYAML python-msgpack-python \ + python-pycrypto python-pyzmq || return 1 + fi + + # PIP based installs need to copy configuration files "by hand". + if [ $SUSE_PATCHLEVEL -eq 1 ]; then + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + # Let's set the configuration directory to /tmp + TEMP_CONFIG_DIR="/tmp" + CONFIG_SALT_FUNC="config_salt" + + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + # Syndic uses the same configuration file as the master + [ $fname = "syndic" ] && fname=master + + # Let's download, since they were not provided, the default configuration files + if [ ! -f /etc/salt/$fname ] && [ ! -f $TEMP_CONFIG_DIR/$fname ]; then + curl -sk -o $TEMP_CONFIG_DIR/$fname -L \ + https://raw.github.com/saltstack/salt/develop/conf/$fname || return 1 + fi + done + fi + fi + return 0 +} + +install_suse_11_git_deps() { + install_suse_11_stable_deps || return 1 + zypper --non-interactive install --auto-agree-with-licenses git || return 1 + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$TEMP_CONFIG_DIR" = "null" ]; then + TEMP_CONFIG_DIR="${SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_11_stable() { + if [ $SUSE_PATCHLEVEL -gt 1 ]; then + install_opensuse_stable || return 1 + else + # USE_SETUPTOOLS=1 To work around + # error: option --single-version-externally-managed not recognized + USE_SETUPTOOLS=1 pip install salt || return 1 + fi + return 0 +} + +install_suse_11_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_11_stable_post() { + if [ $SUSE_PATCHLEVEL -gt 1 ]; then + install_opensuse_stable_post || return 1 + else + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + curl -k -L https://github.com/saltstack/salt/raw/develop/pkg/salt-$fname.service \ + -o /lib/systemd/system/salt-$fname.service || return 1 + continue + fi + + curl -k -L https://github.com/saltstack/salt/raw/develop/pkg/rpm/salt-$fname \ + -o /etc/init.d/salt-$fname || return 1 + chmod +x /etc/init.d/salt-$fname + + done + fi + return 0 +} + +install_suse_11_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_11_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} +# +# End of SuSE Install Functions. +# +############################################################################## + +############################################################################## +# +# Default minion configuration function. Matches ANY distribution as long as +# the -c options is passed. +# +config_salt() { + # If the configuration directory is not passed, return + [ "$TEMP_CONFIG_DIR" = "null" ] && return + + CONFIGURED_ANYTHING=$BS_FALSE + + PKI_DIR=$SALT_ETC_DIR/pki + + # Let's create the necessary directories + [ -d $SALT_ETC_DIR ] || mkdir $SALT_ETC_DIR || return 1 + [ -d $PKI_DIR ] || mkdir -p $PKI_DIR && chmod 700 $PKI_DIR || return 1 + + if [ $INSTALL_MINION -eq $BS_TRUE ]; then + # Create the PKI directory + [ -d $PKI_DIR/minion ] || mkdir -p $PKI_DIR/minion && chmod 700 $PKI_DIR/minion || return 1 + + # Copy the minions configuration if found + if [ -f "$TEMP_CONFIG_DIR/minion" ]; then + mv "$TEMP_CONFIG_DIR/minion" /etc/salt || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the minion's keys if found + if [ -f "$TEMP_CONFIG_DIR/minion.pem" ]; then + mv "$TEMP_CONFIG_DIR/minion.pem" $PKI_DIR/minion/ || return 1 + chmod 400 $PKI_DIR/minion/minion.pem || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$TEMP_CONFIG_DIR/minion.pub" ]; then + mv "$TEMP_CONFIG_DIR/minion.pub" $PKI_DIR/minion/ || return 1 + chmod 664 $PKI_DIR/minion/minion.pub || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + + if [ $INSTALL_MASTER -eq $BS_TRUE ] || [ $INSTALL_SYNDIC -eq $BS_TRUE ]; then + # Create the PKI directory + [ -d $PKI_DIR/master ] || mkdir -p $PKI_DIR/master && chmod 700 $PKI_DIR/master || return 1 + + # Copy the masters configuration if found + if [ -f "$TEMP_CONFIG_DIR/master" ]; then + mv "$TEMP_CONFIG_DIR/master" /etc/salt || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the master's keys if found + if [ -f "$TEMP_CONFIG_DIR/master.pem" ]; then + mv "$TEMP_CONFIG_DIR/master.pem" $PKI_DIR/master/ || return 1 + chmod 400 $PKI_DIR/master/master.pem || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$TEMP_CONFIG_DIR/master.pub" ]; then + mv "$TEMP_CONFIG_DIR/master.pub" $PKI_DIR/master/ || return 1 + chmod 664 $PKI_DIR/master/master.pub || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + if [ $CONFIG_ONLY -eq $BS_TRUE ] && [ $CONFIGURED_ANYTHING -eq $BS_FALSE ]; then + echowarn "No configuration or keys were copied over. No configuration was done!" + exit 0 + fi + return 0 +} +# +# Ended Default Configuration function +# +############################################################################## + + +############################################################################## +# +# Default salt master minion keys pre-seed function. Matches ANY distribution +# as long as the -k option is passed. +# +preseed_master() { + # Create the PKI directory + [ -d $PKI_DIR/minions ] || mkdir -p $PKI_DIR/minions && chmod 700 $PKI_DIR/minions || return 1 + + for keyfile in $(ls $TEMP_KEYS_DIR); do + src_keyfile="${TEMP_KEYS_DIR}/${keyfile}" + dst_keyfile="${PKI_DIR}/minions/${keyfile}" + + # If it's not a file, skip to the next + [ ! -f $keyfile_path ] && continue + + movefile "$src_keyfile" "$dst_keyfile" || return 1 + chmod 664 $dst_keyfile || return 1 + done + + return 0 +} +# +# Ended Default Salt Master Pre-Seed minion keys function +# +############################################################################## + + +############################################################################## +# +# This function checks if all of the installed daemons are running or not. +# +daemons_running() { + FAILED_DAEMONS=0 + for fname in minion master syndic; do + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ "x$(ps aux | grep -v grep | grep salt-$fname)" = "x" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$(expr $FAILED_DAEMONS + 1) + fi + done + return $FAILED_DAEMONS +} +# +# Ended daemons running check function +# +############################################################################## + + +#============================================================================= +# LET'S PROCEED WITH OUR INSTALLATION +#============================================================================= +# Let's get the dependencies install function +DEP_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_deps" +DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_deps" +DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_deps" +DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_deps" +DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_deps" +DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_deps" + +DEPS_INSTALL_FUNC="null" +for DEP_FUNC_NAME in $(__strip_duplicates $DEP_FUNC_NAMES); do + if __function_defined $DEP_FUNC_NAME; then + DEPS_INSTALL_FUNC=$DEP_FUNC_NAME + break + fi +done + + +# Let's get the minion config function +CONFIG_SALT_FUNC="null" +if [ "$TEMP_CONFIG_DIR" != "null" ]; then + + CONFIG_FUNC_NAMES="config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_salt" + CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_salt" + CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_salt" + CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_salt" + CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_${ITYPE}_salt" + CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_salt" + CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_salt" + + for FUNC_NAME in $(__strip_duplicates $CONFIG_FUNC_NAMES); do + if __function_defined $FUNC_NAME; then + CONFIG_SALT_FUNC=$FUNC_NAME + break + fi + done +fi + + +# Let's get the pre-seed master function +PRESEED_MASTER_FUNC="null" +if [ "$TEMP_CONFIG_DIR" != "null" ]; then + + PRESEED_FUNC_NAMES="preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_master" + PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_master" + PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_master" + PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_master" + PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_${ITYPE}_master" + PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_master" + PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_master" + + for FUNC_NAME in $(__strip_duplicates $PRESEED_FUNC_NAMES); do + if __function_defined $FUNC_NAME; then + PRESEED_MASTER_FUNC=$FUNC_NAME + break + fi + done +fi + + +# Let's get the install function +INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" + +INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates $INSTALL_FUNC_NAMES); do + if __function_defined $FUNC_NAME; then + INSTALL_FUNC=$FUNC_NAME + break + fi +done + + +# Let's get the post install function +POST_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_post" + + +POST_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates $POST_FUNC_NAMES); do + if __function_defined $FUNC_NAME; then + POST_INSTALL_FUNC=$FUNC_NAME + break + fi +done + + +# Let's get the start daemons install function +STARTDAEMONS_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_restart_daemons" + +STARTDAEMONS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates $STARTDAEMONS_FUNC_NAMES); do + if __function_defined $FUNC_NAME; then + STARTDAEMONS_INSTALL_FUNC=$FUNC_NAME + break + fi +done + + +# Let's get the daemons running check function. +DAEMONS_RUNNING_FUNC="null" +DAEMONS_RUNNING_FUNC_NAMES="daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" + +for FUNC_NAME in $(__strip_duplicates $DAEMONS_RUNNING_FUNC_NAMES); do + if __function_defined $FUNC_NAME; then + DAEMONS_RUNNING_FUNC=$FUNC_NAME + break + fi +done + + + +if [ $DEPS_INSTALL_FUNC = "null" ]; then + echoerror "No dependencies installation function found. Exiting..." + exit 1 +fi + +if [ $INSTALL_FUNC = "null" ]; then + echoerror "No installation function found. Exiting..." + exit 1 +fi + + +# Install dependencies +if [ $CONFIG_ONLY -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + $DEPS_INSTALL_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + + +# Configure Salt +if [ "$TEMP_CONFIG_DIR" != "null" ] && [ "$CONFIG_SALT_FUNC" != "null" ]; then + echoinfo "Running ${CONFIG_SALT_FUNC}()" + $CONFIG_SALT_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${CONFIG_SALT_FUNC}()!!!" + exit 1 + fi +fi + + +# Pre-Seed master keys +if [ "$TEMP_KEYS_DIR" != "null" ] && [ "$PRESEED_MASTER_FUNC" != "null" ]; then + echoinfo "Running ${PRESEED_MASTER_FUNC}()" + $PRESEED_MASTER_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${PRESEED_MASTER_FUNC}()!!!" + exit 1 + fi +fi + + +# Install Salt +if [ $CONFIG_ONLY -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${INSTALL_FUNC}()" + $INSTALL_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${INSTALL_FUNC}()!!!" + exit 1 + fi +fi + + +# Run any post install function, Only execute function is not in config mode only +if [ $CONFIG_ONLY -eq $BS_FALSE ] && [ "$POST_INSTALL_FUNC" != "null" ]; then + echoinfo "Running ${POST_INSTALL_FUNC}()" + $POST_INSTALL_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${POST_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + + +# Run any start daemons function +if [ "$STARTDAEMONS_INSTALL_FUNC" != "null" ]; then + echoinfo "Running ${STARTDAEMONS_INSTALL_FUNC}()" + $STARTDAEMONS_INSTALL_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${STARTDAEMONS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Check if the installed daemons are running or not +if [ "$DAEMONS_RUNNING_FUNC" != "null" ]; then + sleep 3 # Sleep a little bit to let daemons start + echoinfo "Running ${DAEMONS_RUNNING_FUNC}()" + $DAEMONS_RUNNING_FUNC + if [ $? -ne 0 ]; then + echoerror "Failed to run ${DAEMONS_RUNNING_FUNC}()!!!" + + for fname in minion master syndic; do + # Skip if not meant to be installed + [ $fname = "minion" ] && [ $INSTALL_MINION -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ $INSTALL_MASTER -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ $INSTALL_SYNDIC -eq $BS_FALSE ] && continue + + if [ $ECHO_DEBUG -eq $BS_FALSE ]; then + echoerror "salt-$fname was not found running. Pass '-D' for additional debugging information..." + continue + fi + + + [ ! $SALT_ETC_DIR/$fname ] && [ $fname != "syndic" ] && echodebug "$SALT_ETC_DIR/$fname does not exist" + + echodebug "Running salt-$fname by hand outputs: $(nohup salt-$fname -l debug)" + + [ ! -f /var/log/salt/$fname ] && echodebug "/var/log/salt/$fname does not exist. Can't cat its contents!" && continue + + echodebug "DEAMON LOGS for $fname:" + echodebug "$(cat /var/log/salt/$fname)" + echo + done + + echodebug "Running Processes:" + echodebug "$(ps auxwww)" + + exit 1 + fi +fi + + +# Done! +if [ $CONFIG_ONLY -eq $BS_FALSE ]; then + echoinfo "Salt installed!" +else + echoinfo "Salt configured" +fi +exit 0 \ No newline at end of file diff --git a/conf/local/project/db.sls b/conf/local/project/db.sls new file mode 100644 index 0000000..f202dd3 --- /dev/null +++ b/conf/local/project/db.sls @@ -0,0 +1,2 @@ +include: + - postgresql.project \ No newline at end of file diff --git a/conf/local/project/env_secrets.jinja2 b/conf/local/project/env_secrets.jinja2 new file mode 100644 index 0000000..cb2d060 --- /dev/null +++ b/conf/local/project/env_secrets.jinja2 @@ -0,0 +1,3 @@ +{% for key, val in pillar['secrets'].iteritems() %} +export {{ key }}={{ val }} +{% endfor %} diff --git a/conf/templates/nginx/site.conf b/conf/local/project/nginx.conf similarity index 67% rename from conf/templates/nginx/site.conf rename to conf/local/project/nginx.conf index 160a02c..71acbf3 100644 --- a/conf/templates/nginx/site.conf +++ b/conf/local/project/nginx.conf @@ -1,39 +1,38 @@ -upstream django_server { +upstream {{ pillar['project_name'] }} { server 127.0.0.1:{{ server_port|default('8000') }} fail_timeout=0; } - server { listen 80; - server_name {{ server_name }}; - root {{ code_root }}/public; + server_name {{ pillar['domain'] }}; + root {{ public_root }}; keepalive_timeout 5; access_log {{ log_dir }}/access.log; error_log {{ log_dir }}/error.log; - if ($host !~* ^({{ server_name }}|www.{{ server_name }})$) { + if ($host !~* ^({{ pillar['domain'] }}|www.{{ pillar['domain'] }})$) { # Deny non-matching Host headers return 444; } location /robots.txt { - alias {{ code_root }}/public/robots.txt; + alias {{ public_root }}/static/robots.txt; } location /media { - alias {{ code_root }}/public/media; + alias {{ public_root }}/media; } location /static { - alias {{ code_root }}/public/static; + alias {{ public_root }}/static; expires max; } error_page 502 503 504 /502.html; location /502.html { - alias {{ code_root }}/public/502.html; + alias {{ public_root }}/static/502.html; } location / { @@ -44,7 +43,7 @@ server { proxy_redirect off; proxy_buffering on; proxy_intercept_errors on; - proxy_pass http://django_server; + proxy_pass http://{{ pillar['project_name'] }}; } } @@ -52,5 +51,5 @@ server { server { listen 80 default_server; server_name _; - return 301 http://{{ server_name }}$request_uri; + return 301 http://{{ pillar['domain'] }}$request_uri; } diff --git a/conf/local/project/supervisor/group.conf b/conf/local/project/supervisor/group.conf new file mode 100644 index 0000000..6cc0cf2 --- /dev/null +++ b/conf/local/project/supervisor/group.conf @@ -0,0 +1,2 @@ +[group:{{ pillar['project_name'] }}] +programs={{ pillar['project_name'] }}-server diff --git a/conf/local/project/supervisor/gunicorn.conf b/conf/local/project/supervisor/gunicorn.conf new file mode 100644 index 0000000..82c0bcb --- /dev/null +++ b/conf/local/project/supervisor/gunicorn.conf @@ -0,0 +1,13 @@ +[program:{{ pillar['project_name'] }}-server] +process_name=%(program_name)s +command={{ virtualenv_root }}/bin/django-admin.py run_gunicorn --bind=127.0.0.1:{{ server_port|default('8000') }} --workers=3 --worker-class=gevent --preload +user={{ pillar['project_name'] }} +autostart=true +autorestart=true +stdout_logfile={{ log_dir }}/gunicorn.log +redirect_stderr=true +stderr_logfile={{ log_dir }}/gunicorn.error.log +environment=DJANGO_SETTINGS_MODULE="{{ settings }}", + {%- for key, value in pillar['secrets'].iteritems() -%} + {{ key }}="{{ value }}"{%- if not loop.last -%},{%- endif -%} + {%- endfor -%} \ No newline at end of file diff --git a/conf/local/project/web.sls b/conf/local/project/web.sls new file mode 100644 index 0000000..6466b72 --- /dev/null +++ b/conf/local/project/web.sls @@ -0,0 +1,160 @@ +include: + - memcached + - postfix + - version-control + - nginx + - python + - supervisor + - ufw + +http_firewall: + ufw.allow: + - names: + - '80' + - '443' + - enabled: true + +project_user: + user.present: + - name: {{ pillar['project_name'] }} + - remove_groups: False + - groups: [www-data] + +root_dir: + file.directory: + - name: /var/www/{{ pillar['project_name'] }}/ + - user: {{ pillar['project_name'] }} + - group: admin + - mode: 775 + - makedirs: True + - require: + - user: project_user + +log_dir: + file.directory: + - name: /var/www/{{ pillar['project_name'] }}/log/ + - user: {{ pillar['project_name'] }} + - group: www-data + - mode: 775 + - makedirs: True + - require: + - file: root_dir + +public_dir: + file.directory: + - name: /var/www/{{ pillar['project_name'] }}/public/ + - user: {{ pillar['project_name'] }} + - group: www-data + - mode: 775 + - makedirs: True + - require: + - file: root_dir + +venv: + virtualenv.managed: + - name: /var/www/{{ pillar['project_name'] }}/env/ + - no_site_packages: True + - distribute: True + - require: + - pip: virtualenv + - file: root_dir + +venv_dir: + file.directory: + - name: /var/www/{{ pillar['project_name'] }}/env/ + - user: {{ pillar['project_name'] }} + - group: {{ pillar['project_name'] }} + - recurse: + - user + - group + - require: + - virtualenv: venv + +activate: + file.append: + - name: /var/www/{{ pillar['project_name'] }}/env/bin/activate + - text: source /var/www/{{ pillar['project_name'] }}/env/bin/secrets + - require: + - virtualenv: venv + +secrets: + file.managed: + - name: /var/www/{{ pillar['project_name'] }}/env/bin/secrets + - source: salt://project/env_secrets.jinja2 + - user: {{ pillar['project_name'] }} + - group: {{ pillar['project_name'] }} + - template: jinja + - require: + - file: activate + +nginx_conf: + file.managed: + - name: /etc/nginx/sites-enabled/{{ pillar['project_name'] }}.conf + - source: salt://project/nginx.conf + - user: root + - group: root + - mode: 644 + - template: jinja + - context: + public_root: "/var/www/{{ pillar['project_name']}}/public" + log_dir: "/var/www/{{ pillar['project_name']}}/log" + - require: + - pkg: nginx + - file: log_dir + +group_conf: + file.managed: + - name: /etc/supervisor/conf.d/{{ pillar['project_name'] }}-group.conf + - source: salt://project/supervisor/group.conf + - user: root + - group: root + - mode: 644 + - template: jinja + - require: + - pkg: supervisor + - file: log_dir + +gunicorn_conf: + file.managed: + - name: /etc/supervisor/conf.d/{{ pillar['project_name'] }}-gunicorn.conf + - source: salt://project/supervisor/gunicorn.conf + - user: root + - group: root + - mode: 644 + - template: jinja + - context: + log_dir: "/var/www/{{ pillar['project_name']}}/log" + virtualenv_root: "/var/www/{{ pillar['project_name']}}/env" + settings: "{{ pillar['project_name']}}.settings.{{ pillar['environment'] }}" + - require: + - pkg: supervisor + - file: log_dir + +extend: + nginx: + service: + - running + - watch: + - file: nginx_conf + + supervisor: + service: + - running + - watch: + - file: group_conf + - file: gunicorn_conf + +npm: + pkg: + - installed + +less: + cmd.run: + - name: npm install less -g + - user: root + - unless: which lessc + - require: + - pkg: npm + file.symlink: + - name: /usr/bin/lessc + - target: /usr/local/bin/lessc \ No newline at end of file diff --git a/conf/local/top.sls b/conf/local/top.sls new file mode 100644 index 0000000..dbc8776 --- /dev/null +++ b/conf/local/top.sls @@ -0,0 +1,10 @@ +base: + '*': + - base + - vagrant.user + - users.devs + - sshd + - sshd.github + - locale.utf8 + - project.web + - project.db diff --git a/conf/minion.conf b/conf/minion.conf new file mode 100644 index 0000000..43d3abc --- /dev/null +++ b/conf/minion.conf @@ -0,0 +1,17 @@ +## Minimal masterless minion configuration ## +## All other settings take the defaults ## + +id: %(project)s-%(environment)s + +master: localhost + +file_client: local + +file_roots: + base: + - /srv/local + - /srv/common + +pillar_roots: + base: + - /srv/pillar \ No newline at end of file diff --git a/conf/packages.conf b/conf/packages.conf deleted file mode 100644 index 0752523..0000000 --- a/conf/packages.conf +++ /dev/null @@ -1,14 +0,0 @@ -[base] -packages = python-software-properties dpkg-dev wget build-essential git-core subversion mercurial vim ntp - -[app] -packages = python2.6 python-all-dev python-setuptools libpq-dev libevent-1.4-2 libevent-core-1.4-2 libevent-extra-1.4-2 libevent-dev memcached libjpeg62 libjpeg62-dev zlib1g-dev supervisor postfix nodejs npm -ppas = ppa:pitti/postgresql ppa:chris-lea/node.js - -[lb] -packages = nginx -ppas = ppa:nginx/stable - -[db] -packages = postgresql-9.1 postgresql-contrib-9.1 postgresql-server-dev-9.1 postgresql-client-9.1 libpq-dev -ppas = ppa:pitti/postgresql diff --git a/conf/pillar/devs.sls b/conf/pillar/devs.sls new file mode 100644 index 0000000..4512b1b --- /dev/null +++ b/conf/pillar/devs.sls @@ -0,0 +1,6 @@ +users: + # This should be a list of developer users and thier public SSH keys + # example-user: + # groups: [admin, login] + # public_key: + # - ssh-dss AAAAB3NzaC1kc3MAAACBAP/dCNcAJED+pBsEwH01E4NU2xrvoB6H5gXkvQHWIKUuMF3MWXgSGhKpgVqLJKh+d0gwuKyI9344HM5dFs4z3E0JhI7Fg4uXIYu1SwuqnxO+D18WLVGt4gCn57JCjLy/c8LJWAHJWFb2v9t4fayC+oBiyEvpjI6VYIJnSvO3D4tjAAAAFQCNzcKi0sehN1Jw+zB6ccMlHt5E6wAAAIEAnW18UHG/O+RIkJazTJ7qFlOb79RS1nnvnHAvtfuiAPIBmeJcKoZkiQzeBYtFereSRHmSug9DsqHK6C5PrP36UMZYhDkqqp5gpJexmokI2kt3AVxJwro7cjy6Tq+0yt+lwqH4JEblybk7yPeRNC1ihnp2CSipC5LP1PydIcgN9/UAAACAeH1OxUzgCfpM06cfKL57gtjIS34ryCdkT2oYfYOANa8vahN2JqxB004o+z2CnQ9DkTqzzf9jUYI/qal19+zYhn8Bd/FdPVp+VTfRpR17fQKuTWrnF7g6jNVN2ltwHo6o99vrCzjHhJHZ2EXOODzAUrACptyfQv/ZCutkjAg44YE= copelco@montgomery.local diff --git a/conf/pillar/production/env.sls b/conf/pillar/production/env.sls new file mode 100644 index 0000000..211e069 --- /dev/null +++ b/conf/pillar/production/env.sls @@ -0,0 +1,4 @@ +environment: production + +# FIXME: Change to match production domain name +domain: example.com \ No newline at end of file diff --git a/conf/pillar/production/secrets.ex b/conf/pillar/production/secrets.ex new file mode 100644 index 0000000..5bd472d --- /dev/null +++ b/conf/pillar/production/secrets.ex @@ -0,0 +1,3 @@ +# This file should be renamed to secrets.sls +secrets: + DB_PASSWORD: 'XXXXXX' \ No newline at end of file diff --git a/conf/pillar/project.sls b/conf/pillar/project.sls new file mode 100644 index 0000000..4a921bb --- /dev/null +++ b/conf/pillar/project.sls @@ -0,0 +1,2 @@ +# FIXME: Change to match project name +project_name: example \ No newline at end of file diff --git a/conf/pillar/staging/env.sls b/conf/pillar/staging/env.sls new file mode 100644 index 0000000..c4849c3 --- /dev/null +++ b/conf/pillar/staging/env.sls @@ -0,0 +1,4 @@ +environment: staging + +# FIXME: Change to match staging domain name +domain: staging.example.com \ No newline at end of file diff --git a/conf/pillar/staging/secrets.ex b/conf/pillar/staging/secrets.ex new file mode 100644 index 0000000..5bd472d --- /dev/null +++ b/conf/pillar/staging/secrets.ex @@ -0,0 +1,3 @@ +# This file should be renamed to secrets.sls +secrets: + DB_PASSWORD: 'XXXXXX' \ No newline at end of file diff --git a/conf/pillar/top.sls b/conf/pillar/top.sls new file mode 100644 index 0000000..f614725 --- /dev/null +++ b/conf/pillar/top.sls @@ -0,0 +1,10 @@ +base: + "*": + - project + - devs + "*-staging": + - staging.env + - staging.secrets + "*-production": + - production.env + - production.secrets diff --git a/conf/templates/supervisor/group.conf b/conf/templates/supervisor/group.conf deleted file mode 100644 index 4d37708..0000000 --- a/conf/templates/supervisor/group.conf +++ /dev/null @@ -1,2 +0,0 @@ -[group:{{ environment }}] -programs=server diff --git a/conf/templates/supervisor/gunicorn.conf b/conf/templates/supervisor/gunicorn.conf deleted file mode 100644 index 8088f9b..0000000 --- a/conf/templates/supervisor/gunicorn.conf +++ /dev/null @@ -1,10 +0,0 @@ -[program:server] -process_name=%(program_name)s -command={{ virtualenv_root }}/bin/django-admin.py run_gunicorn --bind=127.0.0.1:{{ server_port|default('8000') }} --workers=3 --worker-class=gevent --preload --settings={{ settings }} -directory={{ project_root }} -user={{ project_user }} -autostart=true -autorestart=true -stdout_logfile={{ log_dir }}/gunicorn.log -redirect_stderr=true -stderr_logfile={{ log_dir }}/gunicorn.error.log diff --git a/conf/users/REMOVE b/conf/users/REMOVE deleted file mode 100644 index 99b01d2..0000000 --- a/conf/users/REMOVE +++ /dev/null @@ -1,3 +0,0 @@ -This directory should contain the public ssh keys for the developers working on -the project. The name of the file should be the name of the user which will be -created by ``fab create_users``. diff --git a/docs/provisioning.rst b/docs/provisioning.rst new file mode 100644 index 0000000..56871c1 --- /dev/null +++ b/docs/provisioning.rst @@ -0,0 +1,143 @@ +Server Provisioning +======================== + + +Overview +------------------------ + +{{ project_name|title }} is deployed on the following stack. + +- OS: Ubuntu 12.04 LTS +- Python: 2.7 +- Database: Postgres 9.1 +- Application Server: Gunicorn +- Frontend Server: Nginx +- Cache: Memcached + +These services are configured to run together on a single machine. Each environment +(``staging`` or ``production``) should run on a separate machine. `Supervisord `_ +manages the application server process. + + +Initial Setup +------------------------ + +Before your project can be deployed to a server, the code needs to be +accessible in a git repository. Once that is done you should update the ``env.repo`` in +the ``fabfile.py``. E.g., change this:: + + env.repo = u'' # FIXME: Add repo URL + +to this:: + + env.repo = u'git@github.com:account/reponame.git' + +You also need to set the project name in `conf/pillar/project.sls``. This should +match the ``env.project`` in ``fabfile.py``. For the environment you want to setup +you will need to set the ``domain`` in ``conf/pillar//env.sls``. + +You will also need add the developer's user names and SSH keys to ``conf/pillar/devs.sls``. Each +user record should match the format:: + + example-user: + groups: [admin, login] + public_key: + - ssh-rsa + +Additional developers can be added later, but you will need to create at least one user for +yourself. + + +Managing Secrets +------------------------ + +Secret information such as passwords and API keys should never be committed to the +source repository. Instead, each environment manages its secrets in ``conf/pillar//secrets.sls``. +These ``secrets.sls`` files are excluded from the source control and need to be passed +to the developers out of band. There are example files given in ``conf/pillar//secrets.ex``. +They have the format:: + + secrets: + DB_PASSWORD: 'XXXXXX' + +Each key/value pair given in the ``secrets`` dictionary will be added to the OS environment +and can retrieved in the Python code via:: + + import os + + password = os.environ['DB_PASSWORD'] + +Secrets for other environments will not be available. That is, the staging server +will not have access to the production secrets. As such there is no need to namespace the +secrets by their environment. + + +Setup Checklist +------------------------ + +To summarize the steps above, you can use the following checklist + +- ``env.repo`` is set in ``fabfile.py`` +- Developer user names and SSH keys have been added to ``conf/pillar/devs.sls`` +- Project name has been in ``conf/pillar/project.sls`` +- Environment domain name has been set in ``conf/pillar//env.sls`` +- Environment secrets have been set in ``conf/pillar//secrets.sls`` + + +Provision +------------------------ + +Once you have completed the above steps, you are ready to provision a new server +for a given environment. You will need to be able to connect to the server +as a root user. How this is done will depend on where the server is hosted. +VPS providers such as Linode will give you a username/password combination. Amazon's +EC2 uses a private key. These credentials will be passed as command line arguments.:: + + # Template of the command + fab -H -u provision + # Example of provisioning 33.33.33.10 as a staging machine + fab -H 33.33.33.10 -u root staging provision + +Behind the scenes this will rsync the states/pillars in ``conf`` over to the +server as well as check out the base states from the `margarita `_ +repo. It will then use the `masterless salt-minion `_ +to ensure the states are up to date. + +Note that because of the use of rsync it is possible to execute configuration changes which +have not yet been committed to the repo. This can be handy for testing configuration +changes and allows for the secrets to be excluded from the repo, but it's a double-edged sword. +You should be sure to commit any configuration changes to the repo when they are ready. + +Once a server has been created for its environment it should be added to the ``env.hosts`` +for the given environment. In our example we would add:: + + def staging(): + env.environment = 'staging' + env.hosts = ['33.33.33.10', ] + +At this point we can run the first deploy:: + + fab staging deploy + +This will do the initial checkout of the repo source, install the Python requirements, +run syncdb/migrate, and collect the static resources. + + +Updates +------------------------ + +During the life of the project you will likely need to make updates to the server +configuration. This might include new secrets added to the pillar, new developers +added to the project, or new services which need to be installed. Configuration updates +can be made by calling the ``provision`` command again.:: + + # Template of the command + fab provision + # Reprovision the staging server + fab staging provision + +In this case we do not need to connect as the root user. We connect as our developer +user. We also do not need to specify the host. It will use the ``env.hosts`` previously +set for this environment. + +For more information testing the provisioning see the doc:`vagrant guide `. diff --git a/docs/server-setup.rst b/docs/server-setup.rst new file mode 100644 index 0000000..4bbcbac --- /dev/null +++ b/docs/server-setup.rst @@ -0,0 +1,67 @@ +Server Setup +======================== + + +Provisioning +------------------------ + +The server provisioning is managed using `Salt Stack `_. The base +states are managed in a `common repo `_ and additional +states specific to this project are contained within the ``conf`` directory at the root +of the repository. + +For more information see the doc:`provisioning guide `. + + +Layout +------------------------ + +Below is the server layout created by this provisioning process:: + + /var/www/{{ project_name }}/ + source/ + env/ + log/ + public/ + static/ + media/ + +``source`` contains the source code of the project. ``env`` +is the `virtualenv `_ for Python requirements. ``log`` +stores the Nginx, Gunicorn and other logs used by the project. ``public`` +holds the static resources (css/js) for the project and the uploaded user media. +``public/static/`` and ``public/media/`` map to the ``STATIC_ROOT`` and +``MEDIA_ROOT`` settings. + + +Deployment +------------------------ + +For deployment, each developer connects to the server as their own user. Each developer +has SSH access via their public key. These users are created/managed by the Salt +provisioning. The deployment itself is automated with `Fabric `_. +To deploy, a developer simply runs:: + + # Deploy updates to staging + fab staging deploy + # Deploy updates to production + fab production deploy + +Each environment (``staging`` or ``production``) is tied to a particular Git branch managed +by ``env.branch`` in the ``fabfile.py``. Deploying a different branch can be done by +passing the branch name to the deploy command:: + + # Deploy new-feature to staging + fab staging deploy:new-feature + +Developers should coordinate to ensure that they do not deploy different branches on +top of one another. + +New python requirements add to the ``requirements/`` files and new South migrations +are detected by grepping the Git diff on deploy. This works fairly well, but if they +need to be manually updated that can be done via Fabric:: + + # Installs new requirements from requirements/production.txt + fab staging update_requirements + # Runs syncdb/migrate + fab staging syncdb diff --git a/docs/vagrant.rst b/docs/vagrant.rst new file mode 100644 index 0000000..d541751 --- /dev/null +++ b/docs/vagrant.rst @@ -0,0 +1,62 @@ +Vagrant Testing +======================== + + +Starting the VM +------------------------ + +You can test the provisioning/deployment using `Vagrant `_. +Using the included Vagrantfile you can start up the VM. This requires Vagrant 1.2+ and +the ``precise32`` box. The box will be installed if you don't have it already.:: + + vagrant up + +The general provision workflow is the same as in the previous :doc:`provisioning guide ` +so here are notes of the Vagrant specifics. + + +Provisioning the VM +------------------------ + +The ``fabfile.py`` contains a ``vagrant`` environment with the VM's IP already added. +The rest of the environment is made to match the ``staging`` environment. If you +have already configured the ``conf/pillar/staging/env.sls`` and ``conf/pillar/staging/secrets.sls`` +then you can continue provisioning the VM. + +To connect to the VM for the first time, you need to use the private key which ships +with the Vagrant install. The location of the file may vary on your platform depending +on which version you installed and how it was installed. You can use ``locate`` to find it:: + + # Example locate with output + $ locate keys/vagrant + /opt/vagrant/embedded/gems/gems/vagrant-1.2.2/keys/vagrant + /opt/vagrant/embedded/gems/gems/vagrant-1.2.2/keys/vagrant.pub + +You can then call the initial provision using this key location for the ``-i`` option:: + + fab -u vagrant -i /opt/vagrant/embedded/gems/gems/vagrant-1.2.2/keys/vagrant vagrant provision + +After that has finished you can run the initial deploy:: + + fab vagrant deploy + + +Testing on the VM +------------------------ + +With the VM fully provisioned and deployed, you can access the VM on localhost port 8089. Since +the Nginx configuration will only listen for the domain name in ``conf/pillar/staging/env.sls``, +you will need to modify your ``/etc/hosts`` configuration to view it. You will need to add:: + + 127.0.0.1 + +where ```` matches the domain in ``conf/pillar/staging/env.sls``. For example, let's use +staging.example.com:: + + 127.0.0.1 staging.example.com + +In your browser you can now view staging.example.com:8089 and see the VM running the full +web stack. + +Note that this ``/etc/hosts`` entry will prevent you from accessing the true staging.example.com. +When your testing is complete, you should remove or comment out this entry. diff --git a/fabfile.py b/fabfile.py index fd5bf25..6a4e0af 100644 --- a/fabfile.py +++ b/fabfile.py @@ -1,40 +1,27 @@ -import ConfigParser import os import re -from argyle import rabbitmq, postgres, nginx, system -from argyle.base import upload_template -from argyle.postgres import create_db_user, create_db -from argyle.supervisor import supervisor_command, upload_supervisor_app_conf -from argyle.system import service_command, start_service, stop_service, restart_service - from fabric.api import cd, env, get, hide, local, put, require, run, settings, sudo, task -from fabric.contrib import files, console +from fabric.color import red +from fabric.contrib import files, project +from fabric.utils import abort, error # Directory structure PROJECT_ROOT = os.path.dirname(__file__) CONF_ROOT = os.path.join(PROJECT_ROOT, 'conf') -SERVER_ROLES = ['app', 'lb', 'db'] env.project = '{{ project_name }}' env.project_user = '{{ project_name }}' env.repo = u'' # FIXME: Add repo URL env.shell = '/bin/bash -c' env.disable_known_hosts = True -env.ssh_port = 2222 env.forward_agent = True -# Additional settings for argyle -env.ARGYLE_TEMPLATE_DIRS = ( - os.path.join(CONF_ROOT, 'templates') -) - @task def vagrant(): env.environment = 'staging' env.hosts = ['33.33.33.10', ] env.branch = 'master' - env.server_name = 'dev.example.com' setup_path() @@ -43,7 +30,6 @@ def staging(): env.environment = 'staging' env.hosts = [] # FIXME: Add staging server hosts env.branch = 'master' - env.server_name = '' # FIXME: Add staging server name setup_path() @@ -52,138 +38,73 @@ def production(): env.environment = 'production' env.hosts = [] # FIXME: Add production hosts env.branch = 'master' - env.server_name = '' # FIXME: Add production server name setup_path() def setup_path(): env.home = '/home/%(project_user)s/' % env - env.root = os.path.join(env.home, 'www', env.environment) - env.code_root = os.path.join(env.root, env.project) - env.project_root = os.path.join(env.code_root, env.project) + env.root = os.path.join('/var/www/', env.project) + env.code_root = os.path.join(env.root, 'source') env.virtualenv_root = os.path.join(env.root, 'env') - env.log_dir = os.path.join(env.root, 'log') env.db = '%s_%s' % (env.project, env.environment) - env.vhost = '%s_%s' % (env.project, env.environment) env.settings = '%(project)s.settings.%(environment)s' % env @task -def create_users(): - """Create project user and developer users.""" - ssh_dir = u"/home/%s/.ssh" % env.project_user - system.create_user(env.project_user, groups=['www-data', 'login', ]) - sudo('mkdir -p %s' % ssh_dir) - user_dir = os.path.join(CONF_ROOT, "users") - for username in os.listdir(user_dir): - key_file = os.path.normpath(os.path.join(user_dir, username)) - system.create_user(username, groups=['dev', 'login', ], key_file=key_file) - with open(key_file, 'rt') as f: - ssh_key = f.read() - # Add ssh key for project user - files.append('%s/authorized_keys' % ssh_dir, ssh_key, use_sudo=True) - files.append(u'/etc/sudoers', r'%dev ALL=(ALL) NOPASSWD:ALL', use_sudo=True) - sudo('chown -R %s:%s %s' % (env.project_user, env.project_user, ssh_dir)) - - -@task -def configure_ssh(): - """ - Change sshd_config defaults: - Change default port - Disable root login - Disable password login - Restrict to only login group - """ - ssh_config = u'/etc/ssh/sshd_config' - files.sed(ssh_config, u"Port 22$", u"Port %s" % env.ssh_port, use_sudo=True) - files.sed(ssh_config, u"PermitRootLogin yes", u"PermitRootLogin no", use_sudo=True) - files.append(ssh_config, u"AllowGroups login", use_sudo=True) - files.append(ssh_config, u"PasswordAuthentication no", use_sudo=True) - service_command(u'ssh', u'reload') - - -@task -def install_packages(*roles): - """Install packages for the given roles.""" - config_file = os.path.join(CONF_ROOT, u'packages.conf') - config = ConfigParser.SafeConfigParser() - config.read(config_file) - for role in roles: - if config.has_section(role): - # Get ppas - if config.has_option(role, 'ppas'): - for ppa in config.get(role, 'ppas').split(' '): - system.add_ppa(ppa, update=False) - # Get sources - if config.has_option(role, 'sources'): - for section in config.get(role, 'sources').split(' '): - source = config.get(section, 'source') - key = config.get(section, 'key') - system.add_apt_source(source=source, key=key, update=False) - sudo(u"apt-get update") - sudo(u"apt-get install -y %s" % config.get(role, 'packages')) - sudo(u"apt-get upgrade -y") +def provision(common='master'): + """Provision server with masterless Salt minion.""" + require('environment') + # Install salt minion + with settings(warn_only=True): + with hide('running', 'stdout', 'stderr'): + installed = run('which salt-call') + if not installed: + bootstrap_file = os.path.join(CONF_ROOT, 'bootstrap-salt.sh') + put(bootstrap_file, '/tmp/bootstrap-salt.sh') + sudo('sh /tmp/bootstrap-salt.sh daily') + # Rsync local states and pillars + minion_file = os.path.join(CONF_ROOT, 'minion.conf') + files.upload_template(minion_file, '/etc/salt/minion', use_sudo=True, context=env) + salt_root = CONF_ROOT if CONF_ROOT.endswith('/') else CONF_ROOT + '/' + environments = ['staging', 'production'] + # Only include current environment's pillar tree + exclude = [os.path.join('pillar', e) for e in environments if e != env.environment] + project.rsync_project(local_dir=salt_root, remote_dir='/tmp/salt', delete=True, exclude=exclude) + sudo('rm -rf /srv/*') + sudo('mv /tmp/salt/* /srv/') + sudo('rm -rf /tmp/salt/') + # Pull common states + sudo('rm -rf /tmp/common/') + with settings(warn_only=True): + with hide('running', 'stdout', 'stderr'): + installed = run('which git') + if not installed: + sudo('apt-get install git-core -q -y') + run('git clone git://github.com/caktus/margarita.git /tmp/common/') + with cd('/tmp/common/'): + run('git checkout %s' % common) + sudo('mv /tmp/common/ /srv/common/') + sudo('rm -rf /tmp/common/') + sudo('chown root:root -R /srv/') + # Update to highstate + with settings(warn_only=True): + sudo('salt-call --local state.highstate -l info --out json > /tmp/output.json') + get('/tmp/output.json', 'output.json') + with open('output.json', 'r') as f: + try: + results = json.load(f) + except (TypeError, ValueError) as e: + error(u'Non-JSON output from salt-call', exception=e) + else: + for state, result in results['local'].items(): + if not result["result"]: + print red(u'Error with %(name)s state: %(comment)s' % result) @task -def setup_server(*roles): - """Install packages and add configurations for server given roles.""" - require('environment') - # Set server locale - sudo('/usr/sbin/update-locale LANG=en_US.UTF-8') - roles = list(roles) - if roles == ['all', ]: - roles = SERVER_ROLES - if 'base' not in roles: - roles.insert(0, 'base') - install_packages(*roles) - if 'db' in roles: - if console.confirm(u"Do you want to reset the Postgres cluster?.", default=False): - # Ensure the cluster is using UTF-8 - pg_version = postgres.detect_version() - sudo('pg_dropcluster --stop %s main' % pg_version, user='postgres') - sudo('pg_createcluster --start -e UTF-8 --locale en_US.UTF-8 %s main' % pg_version, - user='postgres') - postgres.create_db_user(username=env.project_user) - postgres.create_db(name=env.db, owner=env.project_user) - if 'app' in roles: - # Create project directories and install Python requirements - project_run('mkdir -p %(root)s' % env) - project_run('mkdir -p %(log_dir)s' % env) - # FIXME: update to SSH as normal user and use sudo - # we ssh as the project_user here to maintain ssh agent - # forwarding, because it doesn't work with sudo. read: - # http://serverfault.com/questions/107187/sudo-su-username-while-keeping-ssh-key-forwarding - with settings(user=env.project_user): - # TODO: Add known hosts prior to clone. - # i.e. ssh -o StrictHostKeyChecking=no git@github.com - run('git clone %(repo)s %(code_root)s' % env) - with cd(env.code_root): - run('git checkout %(branch)s' % env) - # Install and create virtualenv - with settings(hide('everything'), warn_only=True): - test_for_pip = run('which pip') - if not test_for_pip: - sudo("easy_install -U pip") - with settings(hide('everything'), warn_only=True): - test_for_virtualenv = run('which virtualenv') - if not test_for_virtualenv: - sudo("pip install -U virtualenv") - project_run('virtualenv -p python2.6 --clear --distribute %s' % env.virtualenv_root) - path_file = os.path.join(env.virtualenv_root, 'lib', 'python2.6', 'site-packages', 'project.pth') - files.append(path_file, env.code_root, use_sudo=True) - sudo('chown %s:%s %s' % (env.project_user, env.project_user, path_file)) - sudo('npm install less -g') - update_requirements() - upload_supervisor_app_conf(app_name=u'gunicorn') - upload_supervisor_app_conf(app_name=u'group') - # Restart services to pickup changes - supervisor_command('reload') - supervisor_command('restart %(environment)s:*' % env) - if 'lb' in roles: - nginx.remove_default_site() - nginx.upload_nginx_site_conf(site_name=u'%(project)s-%(environment)s.conf' % env) +def supervisor_command(command): + """Run a supervisorctl command.""" + sudo(u'supervisorctl %s' % command) def project_run(cmd): @@ -240,22 +161,35 @@ def match_changes(changes, match): def deploy(branch=None): """Deploy to a given environment.""" require('environment') + if not env.repo: + abort('env.repo is not set.') if branch is not None: env.branch = branch requirements = False migrations = False - # Fetch latest changes - with cd(env.code_root): - with settings(user=env.project_user): + if files.exists(env.code_root): + # Fetch latest changes + with cd(env.code_root): run('git fetch origin') - # Look for new requirements or migrations - changes = run("git diff origin/%(branch)s --stat-name-width=9999" % env) - requirements = match_changes(changes, r"requirements/") - migrations = match_changes(changes, r"/migrations/") - if requirements or migrations: - supervisor_command('stop %(environment)s:*' % env) - with settings(user=env.project_user): + # Look for new requirements or migrations + changes = run("git diff origin/%(branch)s --stat-name-width=9999" % env) + requirements = match_changes(changes, r"requirements/") + migrations = match_changes(changes, r"/migrations/") + if requirements or migrations: + supervisor_command('stop %(project)s:*' % env) run("git reset --hard origin/%(branch)s" % env) + else: + # Initial clone + run('git clone %(repo)s %(code_root)s' % env) + with cd(env.code_root): + run('git checkout %(branch)s' % env) + requirements = True + migrations = True + # Add code root to the Python path + path_file = os.path.join(env.virtualenv_root, 'lib', 'python2.7', 'site-packages', 'project.pth') + files.append(path_file, env.code_root, use_sudo=True) + sudo('chown %s:%s %s' % (env.project_user, env.project_user, path_file)) + sudo('chown %(project_user)s:%(project_user)s -R %(code_root)s' % env) if requirements: update_requirements() # New requirements might need new tables/migrations @@ -263,7 +197,7 @@ def deploy(branch=None): elif migrations: syncdb() collectstatic() - supervisor_command('restart %(environment)s:*' % env) + supervisor_command('restart %(project)s:*' % env) @task diff --git a/project_name/settings/staging.py b/project_name/settings/staging.py index f9796fd..97b8970 100644 --- a/project_name/settings/staging.py +++ b/project_name/settings/staging.py @@ -5,6 +5,13 @@ DATABASES['default']['NAME'] = '{{ project_name }}_staging' + +PUBLIC_ROOT = '/var/www/{{ project_name }}/public/' + +STATIC_ROOT = os.path.join(PUBLIC_ROOT, 'static') + +MEDIA_ROOT = os.path.join(PUBLIC_ROOT, 'media') + INSTALLED_APPS += ( 'gunicorn', ) diff --git a/public/502.html b/project_name/static/502.html similarity index 100% rename from public/502.html rename to project_name/static/502.html diff --git a/public/robots.txt b/project_name/static/robots.txt similarity index 100% rename from public/robots.txt rename to project_name/static/robots.txt diff --git a/requirements/dev.txt b/requirements/dev.txt index ef218f7..2d25beb 100644 --- a/requirements/dev.txt +++ b/requirements/dev.txt @@ -2,8 +2,6 @@ django-debug-toolbar==0.9.4 coverage==3.5.1 pylint==0.25.1 -fabric==1.4.3 -ssh==1.7.14 -pycrypto==2.6.0 -Jinja2==2.6 -argyle==0.2.0 +fabric==1.6.0 +paramiko==1.10.1 +pycrypto==2.6.0 \ No newline at end of file