diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..a0efcf7e --- /dev/null +++ b/.gitignore @@ -0,0 +1,25 @@ +node_modules +*.pem +pass +.pass +hellorunnable +dump.rdb +erl_crash.dump +npm-debug.log +ca.srl +.DS_Store +ansible/roles/hipache/templates/runnable* +enviroments/**/k8/**/configMaps/*cert* +dock.sh +dock-pool.sh +environments/**/secrets +# Leaving for now while PR is merged +environments/*/k8 +# Meant to not break anything. Might remove later +environments/runnable-on-prem-test +*.retry +*.tfstate* +terraform/credentials.tfvars +terraform/.build +ansible/secrets/* +.idea diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..3b7ed08f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM ubuntu:14.04 + +RUN apt-get update -y +RUN DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y -q unzip build-essential python-pip python-dev python-yaml libxml2-dev libxslt1-dev zlib1g-dev git curl sshpass openssh-client +RUN pip install --upgrade pyyaml jinja2 pycrypto + +RUN curl -sL https://deb.nodesource.com/setup_7.x | sudo -E bash - && \ + apt-get install -y nodejs + +RUN curl -O https://releases.hashicorp.com/vault/0.6.3/vault_0.6.3_linux_amd64.zip && \ + unzip ./vault_0.6.3_linux_amd64.zip -d /bin && \ + chmod +x /bin/vault + +RUN git clone git://github.com/ansible/ansible.git --recursive /opt/ansible + +RUN cd /opt/ansible && \ + git checkout v2.1.3.0-1 && \ + git submodule update --init --recursive && \ + bash -c 'source ./hacking/env-setup' + +ENV PATH /opt/ansible/bin:$PATH +ENV PYTHONPATH /opt/ansible/lib:$PYTHONPATH +ENV MANPATH /opt/ansible/docs/man:$MANPATH + +ADD ./ssh /root/.ssh +RUN echo 'eval `ssh-agent`' >> /root/start.sh +RUN echo 'ssh-add /root/.ssh/id_rsa' >> /root/start.sh +RUN echo 'npm start' >> /root/start.sh +RUN chmod +x /root/start.sh + +ADD ./ansible/ /ansible +RUN cd /ansible && npm install + +ADD ./deployer/ /deployer +RUN cd /deployer && npm install + +WORKDIR /deployer +CMD /root/start.sh diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..56fa638e --- /dev/null +++ b/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,23 @@ +We should treat our k8 files as compiled files, since we don't actually (http://blog.andrewray.me/dealing-with-compiled-files-in-git/). We should probably wait some time until we actually have more confidence in our compilation. + +[//]: # (Let's get your best description here about what's happend! Here's a list as well, if you like:) + +* I removed this function +* I fixed all these things, etc. + +#### Dependencies + +- [ ] list dependencies (eg, PR from another branch or repo; tags or versions required prior to deployment) + +#### Tests + +> Test any modifications on one of our environments. + +- [ ] tested on _environment_ by _someone_ + +#### Deployment (post-merge) + +> Ensure that all environments have the given changes. + +- [ ] deployed to gamma +- [ ] deployed to delta diff --git a/README.md b/README.md index 9404c0e5..cc3ff42d 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,155 @@ devops-scripts ============== -devops-scripts +Scripts for managing our deployments. + +# How to Deploy at Runnable +## Setup + +Before you can deploy you'll need to install the appropriate tools, scripts, and keys on your local machine. +To do so, execute the following steps: + +1. Install Ansible v2.2.1.0 (the deploy automation tool we use to deploy projects to production) +Installation: http://docs.ansible.com/intro_installation.html +Upgrading: `sudo pip install ansible==2.2.1.0` or http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-pip + +2. Install JMESPath: +`pip install jmespath-terminal` + +3. Get the latest devops-scripts (the recipes that we use to deploy various projects) +https://github.com/CodeNow/devops-scripts + +4. Change to the devops scripts repo directory and run the following command: +`ln -s //ssh/config ~/.ssh/config` + +5. Obtain the "Ansible Secrets" zip for the environment you want to deploy (or create the new environment following [./environments/README.md](./environments/README.md)) + +6. Unzip file obtained above into `devops-scripts/environments/${YOUR_ENV}/secrets` + +7. Copy the `*.pem` files from `devops-scripts/ansible/secrets` to your `~/.ssh` directory + +8. Install two required tools onto your machine: +```bash +brew update && brew install vault daemon +``` + +At this point you should be capable of deploying; keep reading to find out how to actually perform a deploy! + +## Deploying Services +- **IMPORTANT:** always pull latest devopts-scripts (`git pull origin master`) +- **IMPORTANT:** Before you deploy a new version of any project make sure to determine which version of the project is currently deployed. This way you can quickly revert to the last stable release if something goes wrong after pushing a new version. + +### Step 1: Determine the Current Deploy Version +To determine the latest deploy tag for a project please check the project's repository on +github and look for the latest release tag (should be in the form `vX.Y.Z`). Once you've located the tag, +copy it down somewhere that is easily and quickly accessible (you may need to use it quickly if something goes wrong). + +### Step 2: Deploy the Project via `ansible-playbook` + +- **WARNING:** If you were unable to determine the last deploy tag for a project and cannot revert **STOP**. + Ask someone on the team for help before continuing. +- **IMPORTANT:** All commands should be run from the `devops-script/ansible` directory. + +#### Ansible Vault + +Please note that there are playbook that require encrypted [ansible vault](http://docs.ansible.com/ansible/playbooks_vault.html) files. If you see the following error: + +```bash +ERROR: A vault password must be specified to decrypt # snip +``` + +you will need to re-run the playbook with: + +```bash +--ask-vault-pass +``` + +#### Latest Tag +Build and deploy a service to the latest tag of its repository. This will build +the docker image needed to run the container on our infrastructure. + +#### Branch or Tag +Build and deploy a service to a specific branch or tag on its repository. This performs a build +of the docker image needed to run the service on our architecture. + +##### Command +``` +ansible-playbook -i ../[inventory_dir] [service-playbook] -e @../environments/[gamma-or-delta]/main.yml -e git_branch=[branch-or-tag] -t deploy +``` + +##### Arguments +- `[inventory_dir]` - The environment inventory files (servers and variables). Should be one of the following: + - `/enviroments/stage` - Runnable sandbox staging environment services + - `/environments/gamma` - Gamma services (internal use only; production mirror) + - `/environments/delta` - Delta services (real production) +- `[main-var-file]` - The file with the main variables for the environment +- `[service-playbook]` - The playbook for the service you wish to deploy, ex: + - `api.yml` - Deploys both the api and the api-workers services + - `shiva.yml` - Deploys the shiva micro-service + - `charon.yml` - Deploys a specific version of charon DNS to all docks +- `[branch-or-tag]` - The branch or tag you wish to deploy, ex: + - `-e git_branch=v1.9.9` (version tag) + - `-e git_branch=my-feature-branch` (branch) + - `-e git_branch=3928745892364578623` (specific commit) + +##### Rebuild and Deploy Tag or Branch (No Cache) +Forces a rebuild of a docker image for the given service at the given branch or tag and then deploys the +newly created image. This is useful when a previously deployed branch has new changes that need to +be deployed to an environment. + +Generally this command is only used with `gamma-hosts/` as it is often used to update code +being tested in the production mirror. + +##### Command +``` +ansible-playbook -i ../[inventory_dir] [service-playbook] -e @../environments/[gamma-or-delta]/main.yml] -e git_branch=[branch-or-tag] -t deploy -e build_args=--no-cache +``` + +##### Arguments +- `[inventory_dir]` - The environment inventory files (servers and variables). +- `[main-var-file]` - The file with the main variables for the environment +- `[service-playbook]` - The playbook for the service you wish to deploy. +- `[branch-or-tag]` - The branch or tag you wish to deploy. + +## Reverting +If, for some reason, the new deploy is not operating as expected you can quickly revert by referencing the tag you collected in Step 1. +Simply run the appropriate deploy command in the previous section with the last release tag and the new deploy will be reverted. + +## Deploy Songs + +- **IMPORTANT:** Make sure to play the song loud and proud when deploying! + +It is the custom at Runnable to play a song to the entire team when deploying. For each of the repositories here are the respective songs: + +| Service | Deploy Song Link | +| ------- | ---------------- | +| api / api-workers | [Push it - Rick Ross](https://www.youtube.com/watch?v=qk2jeE1LOn8) | +| arithmancy | [onerepublic - Counting Stars](https://www.youtube.com/watch?v=hT_nvWreIhg) | +| big poppa | [Big Poppa - The Notorious B.I.G.](https://www.youtube.com/watch?v=phaJXp_zMYM) | +| charon | [Enter Sandman - Metallica](https://www.youtube.com/watch?v=CD-E-LDc384) | +| clio | [Billy Joel - We Didn't Start the Fire](https://www.youtube.com/watch?v=eFTLKWw542g) | +| cream | [C.R.E.A.M. - Wu-Tang Clan](https://www.youtube.com/watch?v=PBwAxmrE194) | +| customerbot | [Trailer Park Boys Theme](https://www.youtube.com/watch?v=dI6Drn3OA70) | +| deployer | [Rollout - Ludacris](https://www.youtube.com/watch?v=t21DFnu00Dc) | +| detention | [Unbreakable Kimmy Schmidt](https://youtu.be/CV9xF8CjhJk?t=21s) | +| docker-listener | [Call Me Maybe - Carly Rae Jepsen](https://www.youtube.com/watch?v=fWNaR-rxAic) | +| drake | [Drake - Hotline Bling](https://www.youtube.com/watch?v=uxpDa-c-4Mc) +| filibuster | [He's a Pirate - Pirates Of The Caribbean](https://www.youtube.com/watch?v=yRh-dzrI4Z4) | +| Full Stack Deploy (`all.yml`) | [The Cleveland Orchestra (George Szell conducting) Ludwig von Beethoven Symphony No. 9 "Chorale (Ode To Joy)" Opus 125 IV.](https://www.youtube.com/watch?v=4g5770gaais) | +| github-proxy | [Proxy - Martin Garrix](https://www.youtube.com/watch?v=NWB6-PJw4Mk) | +| khronos | [Time After Time - Cyndi Lauper](https://www.youtube.com/watch?v=VdQY7BusJNU) | +| krain | [Men at Work - Down Under](https://www.youtube.com/watch?v=XfR9iY5y94s) | +| link | [Zelda Main Theme Song](https://www.youtube.com/watch?v=cGufy1PAeTU) | +| mavis | [Fairy Tail Theme song](https://www.youtube.com/watch?v=R4UFCTMrV-o) | +| navi | [Ocarina of Time: Lost Woods The Legend of Zelda](https://www.youtube.com/watch?v=iOGpdGEEcJM) | +| optimus | [Original Transformers Opening Theme](https://www.youtube.com/watch?v=nLS2N9mHWaw) | +| pheidi | [Chariots of Fire Theme](https://www.youtube.com/watch?v=CSav51fVlKU) | +| runnable-angular | [Push It To The Limit - Scarface](https://www.youtube.com/watch?v=9D-QD_HIfjA) | +| sauron | [Sauron Theme Song from LOTR](https://www.youtube.com/watch?v=V_rk9VBrXMY) | +| Security Groups | [Out Of The Woods - Tayor Swift](https://www.youtube.com/watch?v=JLf9q36UsBk) +| shiva | [FFXIV Shiva Theme](https://www.youtube.com/watch?v=noJiH8HLZw4) | +| starlord | [Blue Swede - Hooked on a Feeling](https://www.youtube.com/watch?v=NrI-UBIB8Jk) | +| swarm-deamon | [Pink Floyd - Another Brick In The Wall](https://www.youtube.com/watch?v=5IpYOF4Hi6Q) | +| swarm-manager | [Eric Prydz VS Pink Floyd - 'Proper Education'](https://www.youtube.com/watch?v=IttkDYE33aU) | +| varnish | [Karate Kid Theme Song](https://www.youtube.com/watch?v=VIYqtkdMxQg) | +| vault / vault-values | [Seal - Kiss From A Rose](https://www.youtube.com/watch?v=zP3so2hY4CM) | diff --git a/ansible/.gitignore b/ansible/.gitignore new file mode 100644 index 00000000..61053df0 --- /dev/null +++ b/ansible/.gitignore @@ -0,0 +1,2 @@ +_cache +*.pyc diff --git a/ansible/README.md b/ansible/README.md new file mode 100644 index 00000000..78aec1d0 --- /dev/null +++ b/ansible/README.md @@ -0,0 +1,15 @@ + +Ansible provides a framework for our administration and deployment. It requires an organization for scripts and variables. By design it uses SSH to connect to all hosts before it executes the actions. As such it can be run from any machine. All Ansible provided functionality is idempotent and it strongly encourage custom scripts match that standard. + +Here is the organization of the files in `devops-scripts/ansible` + +* `*-hosts` - Files naming all the servers +* `*.yml` - The top level ansible actions. These files describe how a host has vars and roles executed on it. +* `/group_vars` - yml files that define variables and values for your ansible scripts. This mostly maps one to one with machine types in AWS. They’re a key value map. +* `/library` - Third party libraries and scripts. +* `/roles` - A set of folders containing the ansible roles. A role defines the executable actions by ansible. The center pieces is the `/tasks/main.yml`. It defines name actions and requirements. +The role can have several sub folders. + * `/handlers` - ??? + * `/defaults` - ??? + * `/meta` - contains dependencies + * `/template` - templates for any files that need to be generate and delivered. diff --git a/ansible/agreeable-egret.yml b/ansible/agreeable-egret.yml new file mode 100644 index 00000000..ca7f35c1 --- /dev/null +++ b/ansible/agreeable-egret.yml @@ -0,0 +1,11 @@ +--- +- hosts: agreeable-egret + vars_files: + - group_vars/alpha-agreeable-egret.yml + roles: + - role: notify + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 00000000..8083aaca --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +# Required so `sudo: yes` does not lose the environment variables, which hold the ssh-agent socket +sudo_flags = -HE diff --git a/ansible/api-core.yml b/ansible/api-core.yml new file mode 100644 index 00000000..766ec30f --- /dev/null +++ b/ansible/api-core.yml @@ -0,0 +1,14 @@ +--- +- hosts: api + vars_files: + - group_vars/alpha-api-base.yml + - group_vars/alpha-api.yml + roles: + - role: notify + rollbar_token: "{{ api_rollbar_token }}" + + - role: builder + + - role: docker_client + - role: k8-deployment + - role: k8-service diff --git a/ansible/api.yml b/ansible/api.yml new file mode 100644 index 00000000..d6520458 --- /dev/null +++ b/ansible/api.yml @@ -0,0 +1,3 @@ +- include: api-core.yml +- include: socket-server.yml +- include: workers.yml diff --git a/ansible/app-services.yml b/ansible/app-services.yml new file mode 100644 index 00000000..46333f10 --- /dev/null +++ b/ansible/app-services.yml @@ -0,0 +1,6 @@ +- include: detention.yml git_branch="{{ detention_branch }}" +- include: drake.yml git_branch="{{ drake_branch }}" +- include: eru.yml git_branch="{{ eru_branch }}" +- include: metis.yml git_branch="{{ astral_branch }}" +- include: shiva.yml git_branch="{{ astral_branch }}" +# run with: `--extra-vars "@current_versions.yml"` diff --git a/ansible/arithmancy.yml b/ansible/arithmancy.yml new file mode 100644 index 00000000..fd920d36 --- /dev/null +++ b/ansible/arithmancy.yml @@ -0,0 +1,11 @@ +--- +- hosts: arithmancy + vars_files: + - group_vars/alpha-arithmancy.yml + roles: + - role: notify + rollbar_token: "{{ arithmancy_rollbar_token }}" + + - role: builder + + - role: k8-deployment diff --git a/ansible/base.yml b/ansible/base.yml new file mode 100644 index 00000000..6ee8aead --- /dev/null +++ b/ansible/base.yml @@ -0,0 +1,23 @@ +--- +- hosts: localhost + connection: local + tasks: + - fail: msg="`host` (target host) needs to be defined to run this role" + when: host is not defined + + - add_host: + name={{ host }} + groups=dock + +- hosts: "{{ host }}" + roles: + - { role: apt_update } + - { role: package-dock, tags: [ dock, package ] } + - { role: package-aws, tags: [ dock, package ] } + - { role: package_ntp } + - { role: build_essential } + - { role: docker, tags: [ docker ] } + - { role: datadog, tags: [ datadog ] } + - { role: ulimits, tags: [ ulimits ] } + - { role: loggly, tags: [ loggly, clean ] } + - { role: node } diff --git a/ansible/bastion.yml b/ansible/bastion.yml new file mode 100644 index 00000000..f516d6a3 --- /dev/null +++ b/ansible/bastion.yml @@ -0,0 +1,9 @@ +--- +- hosts: bastion + roles: + - role: notify + tags: [ notify ], + app_name: bastion_sshd, + git_branch: latest, + name: bastion_sshd + - { role: bastion_sshd, tags: bastion-sshd } diff --git a/ansible/big-poppa-http.yml b/ansible/big-poppa-http.yml new file mode 100644 index 00000000..9ca96414 --- /dev/null +++ b/ansible/big-poppa-http.yml @@ -0,0 +1,13 @@ +--- +- hosts: big-poppa + vars_files: + - group_vars/alpha-big-poppa-base.yml + - group_vars/alpha-big-poppa-http.yml + roles: + - role: notify + rollbar_token: "{{ big_poppa_http_rollbar_token }}" + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/big-poppa-worker.yml b/ansible/big-poppa-worker.yml new file mode 100644 index 00000000..da449c24 --- /dev/null +++ b/ansible/big-poppa-worker.yml @@ -0,0 +1,12 @@ +--- +- hosts: big-poppa + vars_files: + - group_vars/alpha-big-poppa-base.yml + - group_vars/alpha-big-poppa-worker.yml + roles: + - role: notify + rollbar_token: "{{ big_poppa_worker_rollbar_token }}" + + - role: builder + + - role: k8-deployment diff --git a/ansible/big-poppa.yml b/ansible/big-poppa.yml new file mode 100644 index 00000000..06e52daa --- /dev/null +++ b/ansible/big-poppa.yml @@ -0,0 +1,2 @@ +- include: big-poppa-http.yml +- include: big-poppa-worker.yml diff --git a/ansible/cadvisor.yml b/ansible/cadvisor.yml new file mode 100644 index 00000000..ad1d0629 --- /dev/null +++ b/ansible/cadvisor.yml @@ -0,0 +1,7 @@ +--- +- hosts: docks + vars_files: + - "group_vars/alpha-cadvisor.yml" + roles: + - { role: notify, tags: "notify" } + - { role: container_kill_start } diff --git a/ansible/charon.yml b/ansible/charon.yml new file mode 100644 index 00000000..e31d8b2a --- /dev/null +++ b/ansible/charon.yml @@ -0,0 +1,10 @@ +--- +- hosts: "{{ dock | default('docks') }}" + vars_files: + - group_vars/alpha-charon.yml + roles: + - { role: notify, tags: [notify] } + - { role: git_repo } + - { role: node_service } + - { role: loggly } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/clio.yml b/ansible/clio.yml new file mode 100644 index 00000000..972eab09 --- /dev/null +++ b/ansible/clio.yml @@ -0,0 +1,11 @@ +--- +- hosts: clio + vars_files: + - group_vars/alpha-clio.yml + roles: + - role: notify + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/consul-services.yml b/ansible/consul-services.yml new file mode 100644 index 00000000..564bda73 --- /dev/null +++ b/ansible/consul-services.yml @@ -0,0 +1,4 @@ +--- +- hosts: consul + roles: + - { role: consul-services } diff --git a/ansible/consul-single.yml b/ansible/consul-single.yml new file mode 100644 index 00000000..564fe354 --- /dev/null +++ b/ansible/consul-single.yml @@ -0,0 +1,9 @@ +--- +- hosts: consul + vars_files: + - group_vars/alpha-consul-single.yml + roles: + - role: notify + + - role: k8-deployment + - role: k8-service diff --git a/ansible/consul-values-job.yml b/ansible/consul-values-job.yml new file mode 100644 index 00000000..2868d709 --- /dev/null +++ b/ansible/consul-values-job.yml @@ -0,0 +1,7 @@ +--- +- hosts: consul + vars_files: + - "group_vars/alpha-consul.yml" + - "group_vars/alpha-consul-values.yml" + roles: + - role: k8-job diff --git a/ansible/consul-values.yml b/ansible/consul-values.yml new file mode 100644 index 00000000..d4a666ec --- /dev/null +++ b/ansible/consul-values.yml @@ -0,0 +1,36 @@ +--- +- hosts: consul + vars_files: + - "group_vars/alpha-consul.yml" + tasks: + - name: make sure httplib2 is installed + become: true + pip: + name: httplib2 + executable: pip + + - name: put values into consul + run_once: true + when: write_values is defined + uri: + method=PUT + url={{ consul_url }}/v1/kv/{{ item.key }} + body="{{ item.value }}" + with_items: "{{ consul_seed }}" + + - name: get values from consul + tags: consul_values + run_once: true + when: read_values is defined + uri: + method=GET + url={{ consul_url }}/v1/kv/{{ item.key }} + with_items: "{{ consul_seed }}" + register: values + + - name: print values to screen + tags: consul_values + run_once: true + when: read_values is defined + debug: msg="{{ item.item.key }}" -> "{{ item.json[0].Value | b64decode }}" + with_items: "{{ values.results }}" diff --git a/ansible/consul.yml b/ansible/consul.yml new file mode 100644 index 00000000..a218a600 --- /dev/null +++ b/ansible/consul.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + serial: 1 + vars_files: + - group_vars/alpha-consul.yml + roles: + - { role: notify, tags: notify } + - { role: database } + - { role: datadog, tags: [ datadog ] } + - { role: consul } + - { role: container_kill_start } diff --git a/ansible/cream-http.yml b/ansible/cream-http.yml new file mode 100644 index 00000000..eb7a7cb8 --- /dev/null +++ b/ansible/cream-http.yml @@ -0,0 +1,13 @@ +--- +- hosts: cream + vars_files: + - group_vars/alpha-cream-base.yml + - group_vars/alpha-cream-http.yml + roles: + - role: notify + rollbar_token: "{{ cream_http_rollbar_token }}" + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/cream-worker.yml b/ansible/cream-worker.yml new file mode 100644 index 00000000..d4d06a7b --- /dev/null +++ b/ansible/cream-worker.yml @@ -0,0 +1,12 @@ +--- +- hosts: cream + vars_files: + - group_vars/alpha-cream-base.yml + - group_vars/alpha-cream-worker.yml + roles: + - role: notify + rollbar_token: "{{ cream_http_rollbar_token }}" + + - role: builder + + - role: k8-deployment diff --git a/ansible/cream.yml b/ansible/cream.yml new file mode 100644 index 00000000..d9b74b1d --- /dev/null +++ b/ansible/cream.yml @@ -0,0 +1,2 @@ +- include: cream-worker.yml +- include: cream-http.yml diff --git a/ansible/customerbot.yml b/ansible/customerbot.yml new file mode 100644 index 00000000..ab40cf93 --- /dev/null +++ b/ansible/customerbot.yml @@ -0,0 +1,10 @@ +--- +- hosts: customerbot + vars_files: + - group_vars/alpha-customerbot.yml + roles: + - role: notify + + - role: builder + + - role: k8-deployment diff --git a/ansible/datadog-k8.yml b/ansible/datadog-k8.yml new file mode 100644 index 00000000..b7553370 --- /dev/null +++ b/ansible/datadog-k8.yml @@ -0,0 +1,41 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: kube-state-metrics +spec: + replicas: 1 + template: + metadata: + labels: + app: kube-state-metrics + spec: + containers: + - name: kube-state-metrics + image: gcr.io/google_containers/kube-state-metrics:v0.3.0 + ports: + - name: metrics + containerPort: 8080 + resources: + requests: + memory: 30Mi + cpu: 100m + limits: + memory: 50Mi + cpu: 200m +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: 'true' + labels: + app: kube-state-metrics + name: kube-state-metrics +spec: + ports: + - name: metrics + port: 8080 + targetPort: metrics + protocol: TCP + selector: + app: kube-state-metrics diff --git a/ansible/datadog.yml b/ansible/datadog.yml new file mode 100644 index 00000000..3557910e --- /dev/null +++ b/ansible/datadog.yml @@ -0,0 +1,8 @@ +--- +- hosts: datadog + vars_files: + - group_vars/alpha-datadog.yml + roles: + - role: notify + + - role: datadog-daemon diff --git a/ansible/deployer.yml b/ansible/deployer.yml new file mode 100644 index 00000000..38ca6e94 --- /dev/null +++ b/ansible/deployer.yml @@ -0,0 +1,16 @@ +--- +- hosts: rabbitmq + +- hosts: deployer + vars_files: + - group_vars/alpha-deployer.yml + roles: + - role: copy_secret_file + file_names: + - "{{ env }}.pem" + - id_rsa + - vault-pass + + + - { role: build_with_dockerfile } + - { role: container_kill_start } diff --git a/ansible/detention.yml b/ansible/detention.yml new file mode 100644 index 00000000..9691c65b --- /dev/null +++ b/ansible/detention.yml @@ -0,0 +1,11 @@ +--- +- hosts: detention + vars_files: + - group_vars/alpha-detention.yml + roles: + - role: notify + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/dock-generate-launch-config.yml b/ansible/dock-generate-launch-config.yml new file mode 100644 index 00000000..89603148 --- /dev/null +++ b/ansible/dock-generate-launch-config.yml @@ -0,0 +1,8 @@ +--- +- hosts: redis +- hosts: consul + +- hosts: user-local + connection: local + roles: + - { role: dock_launch_config } diff --git a/ansible/dock-init.yml b/ansible/dock-init.yml new file mode 100644 index 00000000..bce9afc2 --- /dev/null +++ b/ansible/dock-init.yml @@ -0,0 +1,14 @@ +--- +- hosts: "{{ dock }}" + vars_files: + - group_vars/alpha-dock-init.yml + roles: + - { role: notify, tags: [notify] } + - { role: package-dock, tags: [dock, package] } + - { role: package-aws, tags: [dock, package] } + - { role: docker, tags: [docker] } + - { role: datadog, tags: [datadog] } + - { role: git_repo } + - { role: dock-init } + - { role: consul_value, tags: [consul_value] } + - { role: ulimits, tags: [ulimits] } diff --git a/ansible/dock-services.yml b/ansible/dock-services.yml new file mode 100644 index 00000000..0e2ec173 --- /dev/null +++ b/ansible/dock-services.yml @@ -0,0 +1,6 @@ +- include: khronos.yml git_branch="{{ khronos_branch }}" +- include: optimus.yml git_branch="{{ optimus_branch }}" +- include: palantiri.yml git_branch="{{ palantiri_branch }}" +- include: sauron.yml git_branch="{{ sauron_branch }}" +- include: swarm-manager.yml +# run with: `--extra-vars "@current_versions.yml"` diff --git a/ansible/dock.yml b/ansible/dock.yml new file mode 100644 index 00000000..39baf0cb --- /dev/null +++ b/ansible/dock.yml @@ -0,0 +1,30 @@ +--- +- hosts: localhost + connection: local + tasks: + - fail: msg="`dock` (target dock) needs to be defined to run this role" + when: dock is not defined + + - add_host: + name={{ dock }} + groups=dock + +- include: image-builder.yml git_branch="v4.6.0" + +- hosts: "{{ dock }}" + tasks: + - name: remove datadog agent + become: true + apt: + name=datadog-agent + state=absent + purge=yes + force=yes + + roles: + - { role: install-ssm } + - { role: dock-images } + +- include: charon.yml git_branch="v5.0.3" +- include: dock-init.yml git_branch="v10.1.4" +- include: krain.yml git_branch="v0.3.1" diff --git a/ansible/docker-files/base/Dockerfile b/ansible/docker-files/base/Dockerfile new file mode 100644 index 00000000..ce30b6ad --- /dev/null +++ b/ansible/docker-files/base/Dockerfile @@ -0,0 +1,21 @@ +FROM dockerfile/ansible + +# Update aptitude with new repo +RUN apt-get update + +# Install software +RUN apt-get install -y build-essential wget make git + +# Make ssh dir +RUN mkdir /root/.ssh/ +ADD ./Test-runnable.pem /root/.ssh/Test-runnable.pem +ADD ./runnablevpc.pem /root/.ssh/runnablevpc.pem + +# add .ssh config file +ADD config /root/.ssh/config + +# Copy over private key, +ADD id_rsa /root/.ssh/id_rsa + +# set correct permissions +RUN chmod 600 /root/.ssh/id_rsa diff --git a/ansible/docker-files/base/README.md b/ansible/docker-files/base/README.md new file mode 100644 index 00000000..ba5dcbab --- /dev/null +++ b/ansible/docker-files/base/README.md @@ -0,0 +1 @@ +must add pem files before building diff --git a/ansible/docker-files/base/config b/ansible/docker-files/base/config new file mode 100644 index 00000000..32e89389 --- /dev/null +++ b/ansible/docker-files/base/config @@ -0,0 +1,2 @@ +Host github.com + StrictHostKeyChecking no \ No newline at end of file diff --git a/ansible/docker-files/base/id_rsa.pub b/ansible/docker-files/base/id_rsa.pub new file mode 100644 index 00000000..9b34dfd8 --- /dev/null +++ b/ansible/docker-files/base/id_rsa.pub @@ -0,0 +1,2 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9u533YdQnQwk97oMWgOohNnYfLOWbbu3HMM9cmIShQ8TGWpJLm4nnk0CcuwoZq3lfTEbfJcvFEGQtLfnw2UezB9JaoDlXYLGZYLeXYSSzN6xR5FSm2dENnYxyG9S9EgGhD/B12+RyaSEFQwQmerzlS04PGhkwkoFwFENC3fY2eme4fLQ9p6AWrdJ977kUWqGwAnpQNtgmIj+vUJJLwvHlfmCxMnCtru4rKyqSgmZBpaIxSwfHGQo+GgvE6e9LzF2bFHd/3895C0t2inxY7h7I6DaG5NTEKvoTPwJZXkZnQhx+e1RZtPoNJJ6iS7zqY7faXlFOQMqQnpwjQzgyd163 root@workstation.example.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9u533YdQnQwk97oMWgOohNnYfLOWbbu3HMM9cmIShQ8TGWpJLm4nnk0CcuwoZq3lfTEbfJcvFEGQtLfnw2UezB9JaoDlXYLGZYLeXYSSzN6xR5FSm2dENnYxyG9S9EgGhD/B12+RyaSEFQwQmerzlS04PGhkwkoFwFENC3fY2eme4fLQ9p6AWrdJ977kUWqGwAnpQNtgmIj+vUJJLwvHlfmCxMnCtru4rKyqSgmZBpaIxSwfHGQo+GgvE6e9LzF2bFHd/3895C0t2inxY7h7I6DaG5NTEKvoTPwJZXkZnQhx+e1RZtPoNJJ6iS7zqY7faXlFOQMqQnpwjQzgyd163 root@workstation.example.com diff --git a/ansible/docker-files/main/Dockerfile b/ansible/docker-files/main/Dockerfile new file mode 100644 index 00000000..6f384f67 --- /dev/null +++ b/ansible/docker-files/main/Dockerfile @@ -0,0 +1,19 @@ +FROM {{ registry_address }}/runnable/ansible_base + +ENV REPO_BASE /repos + +RUN mkdir $REPO_BASE +WORKDIR /repos + +# clone repos for dockerfiles +RUN git clone git@github.com:CodeNow/api.git +RUN git clone git@github.com:CodeNow/runnable-angular.git +RUN git clone git@github.com:CodeNow/devops-scripts.git +# copy things into places +RUN mv $REPO_BASE/devops-scripts/ssh/config /root/.ssh/config +ENV ANSIBLE_HOSTS /repos/devops-scripts/ansible/hosts + +WORKDIR /repos/devops-scripts/ansible + +CMD ansible-playbook + diff --git a/ansible/docker-listener.yml b/ansible/docker-listener.yml new file mode 100644 index 00000000..c0295782 --- /dev/null +++ b/ansible/docker-listener.yml @@ -0,0 +1,12 @@ +--- +- hosts: docker-listener + vars_files: + - group_vars/alpha-docker-listener.yml + roles: + - role: notify + rollbar_token: "{{ docker_listener_rollbar_key }}" + + - role: builder + + - role: docker_client + - role: k8-deployment diff --git a/ansible/drake-http.yml b/ansible/drake-http.yml new file mode 100644 index 00000000..cb02165f --- /dev/null +++ b/ansible/drake-http.yml @@ -0,0 +1,13 @@ +--- +- hosts: drake + vars_files: + - group_vars/alpha-drake-base.yml + - group_vars/alpha-drake-http.yml + roles: + - role: notify + rollbar_token: "{{ drake_http_rollbar_token }}" + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/drake-worker.yml b/ansible/drake-worker.yml new file mode 100644 index 00000000..ae9ac7ce --- /dev/null +++ b/ansible/drake-worker.yml @@ -0,0 +1,12 @@ +--- +- hosts: drake + vars_files: + - group_vars/alpha-drake-base.yml + - group_vars/alpha-drake-worker.yml + roles: + - role: notify + rollbar_token: "{{ drake_worker_rollbar_token }}" + + - role: builder + + - role: k8-deployment diff --git a/ansible/drake.yml b/ansible/drake.yml new file mode 100644 index 00000000..f1b7e442 --- /dev/null +++ b/ansible/drake.yml @@ -0,0 +1,2 @@ +- include: drake-http.yml +- include: drake-worker.yml diff --git a/ansible/enterprise-sign-in.yml b/ansible/enterprise-sign-in.yml new file mode 100644 index 00000000..66e267cb --- /dev/null +++ b/ansible/enterprise-sign-in.yml @@ -0,0 +1,7 @@ +--- +- hosts: web + vars_files: + - group_vars/alpha-enterprise-sign-in.yml + roles: + - { role: builder, tags: "build" } + - { role: k8-job } diff --git a/ansible/eru.yml b/ansible/eru.yml new file mode 100644 index 00000000..3deab881 --- /dev/null +++ b/ansible/eru.yml @@ -0,0 +1,11 @@ +--- +- hosts: eru + vars_files: + - group_vars/alpha-eru.yml + roles: + - role: notify + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/filter_plugins/split.py b/ansible/filter_plugins/split.py new file mode 100755 index 00000000..b4698d11 --- /dev/null +++ b/ansible/filter_plugins/split.py @@ -0,0 +1,29 @@ +from ansible import errors +import re + +def split_string(string, seperator=' '): + try: + return string.split(seperator) + except Exception, e: + raise errors.AnsibleFilterError('split plugin error: %s, string=%s' % str(e),str(string) ) + +def split_regex(string, seperator_pattern): + try: + return re.split(seperator_pattern, string) + except Exception, e: + raise errors.AnsibleFilterError('split plugin error: %s' % str(e)) + +def split_get_index(array, index): + try: + return array[index] + except Exception, e: + raise errors.AnsibleFilterError('split plugin error: %s, index=%s' % str(e),str(index)) + +class FilterModule(object): + ''' A filter to split a string into a list. ''' + def filters(self): + return { + 'split' : split_string, + 'split_regex' : split_regex, + 'split_get_index': split_get_index + } diff --git a/ansible/generate-all-client-certs.yml b/ansible/generate-all-client-certs.yml new file mode 100644 index 00000000..a4143bad --- /dev/null +++ b/ansible/generate-all-client-certs.yml @@ -0,0 +1,21 @@ +--- +- hosts: localhost + connection: local + tasks: + - name: generate client certs + shell: + cmd: "./roles/docker_client/scripts/genClientCert.sh {{ item }} {{ certs_root }}" + chdir: ./ + with_items: + - "api" + - "api-core" + - "socket-server" + - "api-socket-server" + - "workers" + - "api-worker" + - "khronos" + - "palantiri" + - "docker-listener" + - "shiva" + - "sauron" + - "swarm-manager" diff --git a/ansible/getVersions.sh b/ansible/getVersions.sh new file mode 100755 index 00000000..d720c07c --- /dev/null +++ b/ansible/getVersions.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +consul=localhost:8500/v1 + +if [[ $1 != "" ]]; then + consul=$1:8500/v1 +fi + +kv=$consul/kv + +echo NODE_ENV: $(curl -s $kv/node/env | jq -r '.[0].Value' | base64 -d) + +echo image-builder: $(curl -s $kv/image-builder/version | jq -r '.[0].Value' | base64 -d) +echo docker-listener: $(curl -s $kv/docker-listener/version | jq -r '.[0].Value' | base64 -d) +echo krain: $(curl -s $kv/krain/version | jq -r '.[0].Value' | base64 -d) +echo sauron: $(curl -s $kv/sauron/version | jq -r '.[0].Value' | base64 -d) +echo charon: $(curl -s $kv/charon/version | jq -r '.[0].Value' | base64 -d) diff --git a/ansible/github-varnish.yml b/ansible/github-varnish.yml new file mode 100644 index 00000000..aa07e28b --- /dev/null +++ b/ansible/github-varnish.yml @@ -0,0 +1,11 @@ +--- +- hosts: github-varnish + vars_files: + - group_vars/alpha-github-varnish.yml + roles: + - role: notify + + - role: build_with_dockerfile + + - role: k8-deployment + - role: k8-service diff --git a/ansible/gke-dock.yml b/ansible/gke-dock.yml new file mode 100644 index 00000000..d85ce812 --- /dev/null +++ b/ansible/gke-dock.yml @@ -0,0 +1,8 @@ +--- +- hosts: localhost + vars_files: + - group_vars/alpha-gke-dock.yml + roles: + - role: notify + + - role: k8-deployment diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml new file mode 100644 index 00000000..0f88d171 --- /dev/null +++ b/ansible/group_vars/all.yml @@ -0,0 +1,273 @@ +--- +# Defaults +github_domain: api.github.com +github_protocol: https + +# ops +ops_slack_channel_url: https://hooks.slack.com/services/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 +environment_root: "../environments/{{ env }}/" +opts_root: "{{ environment_root }}/k8/{{ name }}" +secrets_root: "{{ environment_root }}/secrets/" +certs_root: "{{ secrets_root }}/certs/" +domains_root: "{{ secrets_root }}/domains/" +docker_client_root: "{{ secrets_root }}/docker-client/" +config_maps_path: "{{ opts_root }}/configMaps" +services_path: "{{ opts_root }}/services" +deployments_path: "{{ opts_root }}/deployments" +jobs_path: "{{ opts_root }}/jobs" +cron_jobs_path: "{{ opts_root }}/crons" +volumes_path: "{{ opts_root }}/volumes" +daemon_sets_path: "{{ opts_root }}/daemonSets" + +container_tag: "{{ git_branch }}" +# registry settings +image_pull_secret_name: runnable-kubernetes-pull-secret +registry_host: "quay.io" +registry_address: "{{ registry_host }}" +container_image: "{{ registry_address }}/runnable/{{ name }}" +inject_ca: true +quay_api_token: QB9UzzNhwClqMgRyMgNGrSGLdUYZPJLJALdcpKRa + +# For docker built containers +do_not_push: true + +# default for container args (which are optional) +container_run_args: "npm start" + +# REPO_BASE for getting dockerfiles +repo_base: "{{ lookup('env','REPO_BASE') }}" + +dockerfile: basic_node/Dockerfile +docker_network: 172.17.0.0/16 +base_dockerfile: node_base + +# slack rooms to send notifications +slack_token: T029DEC10/B1RSX8LNS/qLLSYEEqkGddohOdE44eDf3j +slack_channels: [ '#ops' ] + +loggly_token: f673760d-e0b3-4a93-a15e-2862ea074f91 +loggly_password: TebejAcutHeH_Ch0tR9ru4anaT6CRu*3 +loggly_username: ops + +## +## cores and logs +## + +app_log_dir: /docker/app-logs +core_file_dir: /docker/app-cores + +## +## shared application configs +## + +# agreeable-egret +agreeable_egret_hostname: egret.{{ domain }} +egret_pg_database: egret +egret_port: 65520 + +#angular +angular_url: https://app.{{ domain }} +mixpanel_proxy_url: https://mixpanel.{{ domain }} + +# api +api_github_hook_secret: 3V3RYTHINGisAW3S0ME! +api_hostname: api.{{ domain }} +api_port: 80 +api_socket_server_hostname: apisock.{{ domain }} +api_socket_server_rollbar_key: cad27f265e8e473c9b293615e7ed0f3a +api_url: https://{{ api_hostname }} +api_mongo_user: api +api_mongo_auth: "{{ api_mongo_user }}:{{ api_mongo_password }}" +sendgrid_key: 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 + +# arithmancy +arithmancy_rollbar_key: 1fe145fdeb1a4526b48f5dd82b0d6eb5 +arithmancy_pg_database: arithmancy + +# astral +pg_database: astral + +# big-poppa +big_poppa_pg_database: big_poppa +big_poppa_port: 7788 +big_poppa_host_address: big-poppa + +# cadvisor +cadvisor_port: 29007 + +# charon +charon_port: 53 + +# clio +clio_host_address: clio +clio_port: 8008 +clio_mongo_database: clio +clio_mongo_user: clio +clio_mongo_auth: "{{ clio_mongo_user }}:{{ clio_mongo_password }}" + +# consul +consul_api_port: 8500 +consul_https_port: 8501 +consul_storage_size_gb: 10 +# Host to be used from inside the kubernetes cluster +consul_internal_host_address: 'consul' + +# cream +cream_port: 8899 +cream_host_address: cream + +# datadog +datadog_api_key: d3ab5d85bca924f9d4e33d307beacb4a +datadog_host_address: "datadog" +datadog_port: 8125 + +# detention +detention_host_address: detention +detention_port: 9123 +detention_hostname: detention.{{ domain }} + +# docker log driver +log_driver: syslog +log_facility: local7 + +# docker +docker_port: 4242 + +# docker-listener +docker_listener_rollbar_key: 7b5924eccfff415d9fc00c051811e9d7 + +# drake +drake_hostname: drake.{{ domain }} +drake_port: 80 + +# ec2 +aws_access_key: "AKIAIWRXWZ4P3MIMY3LA" +aws_secret_key: "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" +aws_region: "us-west-2" + +# eru +eru_http_port: 5501 +eru_graphql_port: 5502 + +# fluffy +fluffy_port: 80 +fluffy_hostname: "fluffy.{{ domain }}" +fluffy_rollbar_token: 6eb108bab1f54687b9b023e5b485967a + +# github-varnish +github_varnish_host: github-varnish +github_varnish_port: 80 + +# kartographer +kartographer_rollbar_token: a7f85fda20bd4b9fb7b1197fc04d3c89 + +# keymaker +keymaker_pg_database: keymaker +keymaker_port: 3008 +keymaker_host_address: keymaker +keymaker_http_rollbar_token: 4cfd134be18b4f1ca8e05925088360ff +keymaker_worker_rollbar_token: b6685a5db05d4550afe1c8545c095b29 + +# khronos +rollbar_khronos_token: 5a140828cce14410812d34a3ef936f25 + +# krain +krain_port: 3100 + +# marketing +marketing_url: https://{{ domain }} + +# metabase +metabase_port: 3000 + +# mongo +mongo_storage_size_gb: 50 + +# navi +navi_extra_ports: [11211,15672,25672,27017,29015,3000,3001,3306,4000,4607,5000,5200,5400,5432,6379,6969,8000,8080,9000,9292,9300] +navi_host_address: navi +navi_http_port: 3567 +navi_mongo_hosts: "mongo" +navi_rollbar_token: 719269e87b9b42848472542a8b2059ae + +# node_exporter +node_exporter_port: 29006 + +# optimus +optimus_port: 80 +optimus_hostname: optimus.{{ domain }} +optimus_rollbar_token: a49f9cce09ee46f09df3f603178fba75 + +# palantiri +palantiri_rollbar_token: ed971bbca9ea44a29268afc606ab8c7d + +# pheidi +pheidi_email_github_token: 115b4d854e34e8a5ba99ab73eefe4bf7a8944d6d +pheidi_rollbar_token: 6fc422ac645441bea7f6f14853eb01ab + +# postgres +postgres_storage_size_gb: 50 + +# prometheus +prometheus_port: 9090 +prometheus_storage_size_gb: 100 +prometheus_aws_access_key: AKIAIFG37NSI6O2QMRRQ +prometheus_aws_secret_key: 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 + +# prometheus_alert +prometheus_alert_port: 9093 +prometheus_alert_url: http://prometheus-alerts:{{ prometheus_alert_port }} + +# rabbit +rabbit_host_address: rabbitmq +rabbit_port: 5672 +rabbit_storage_size_gb: 50 + +# redis +redis_host_address: redis +redis_port: 6379 +redis_storage_size_gb: 50 + +# sauron +sauron_rollbar_token: 83157ae2d50d4b6398e404c0b9978d26 + +# secrets +secret_root: "{{ opts_root }}/secrets" + +# shiva +pg_user: shiva +shiva_port: 3000 +shiva_rollbar_token: 0526a90faec845d796e1ef5361a00526 + +# swarm +swarm_master_port: 2375 +swarm_host_address: swarm +swarm_container_name: swarm + +# navi/link +link_hello_runnable_github_token: 5d8f7029d3d6941b0fc62a7eb8c605d8e0bc7c29 +navi_mongo_database: navi +navi_mongo_user: "navi" +navi_mongo_auth: "{{ navi_mongo_user }}:{{ navi_mongo_password }}" +navi_mongo_hosts: "{{ mongo_hosts }}" + +npm_token: c0c4b32a-3de5-4e27-9d32-56c1616746d8 + +# remote vault +vault_port: 8200 + +# user-vault +user_vault_port: 8200 +user_vault_host_address: user-vault + +# local-vault +vault_local_port: 31836 +vault_addr: http://127.0.0.1:{{ vault_local_port }} +vault_consul_address: "consul.{{ domain }}" + +# userland +userland_host_address: userland + +# vault +vault_api_port: 8200 +vault_url: http://vault.{{ domain }} diff --git a/ansible/group_vars/alpha-agreeable-egret.yml b/ansible/group_vars/alpha-agreeable-egret.yml new file mode 100644 index 00000000..500f147f --- /dev/null +++ b/ansible/group_vars/alpha-agreeable-egret.yml @@ -0,0 +1,22 @@ +name: agreeable-egret +service_name: egret + +inject_ca: false +hosted_ports: ["{{ egret_port }}"] +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.3.1" +npm_version: "3.7.5" + +container_envs: + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ api_hello_runnable_github_token }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RUNNABLE_API_URL + value: "{{ api_url }}" + - name: PORT + value: "{{ hosted_ports[0] }}" + - name: RUNNABLE_USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + - name: POSTGRES_CONNECT_STRING + value: postgres://{{ egret_pg_user }}:{{ egret_pg_pass }}@{{ egret_pg_host }}/{{ egret_pg_database }} diff --git a/ansible/group_vars/alpha-api-base.yml b/ansible/group_vars/alpha-api-base.yml new file mode 100644 index 00000000..1f60fffd --- /dev/null +++ b/ansible/group_vars/alpha-api-base.yml @@ -0,0 +1,142 @@ +node_version: 4.4.3 +npm_version: 4.0.3 + +repo: git@github.com:CodeNow/api.git +has_shrinkwrap: true +container_image: "{{ registry_address }}/runnable/api" + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] +dockerfile_pre_install_commands: [ + 'echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc' +] + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +tcp_readiness_probe_port: "{{ api_port }}" + +api_base_container_envs: + - name: DOCKER_IMAGE_BUILDER_CACHE + value: "/home/anandkumarpatel/cache" + - name: "DOCKER_IMAGE_BUILDER_HOST_CONFIG_PATH" + value: "/root/.docker/config.json" + - name: VALID_REDIR_TLDS + value: "{{ domain }},runnablecloud.com" + - name: FULL_API_DOMAIN + value: "{{ api_url }}" + - name: FULL_FRONTEND_DOMAIN + value: "https://{{ domain }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ api_aws_access_key_id }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ api_aws_secret_access_key }}" + - name: BIG_POPPA_HOST + value: "http://{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: CLIO_HTTP_URL + value: "http://{{ clio_host_address }}:{{ clio_port }}/" + - name: COOKIE_DOMAIN + value: "{{ domain }}" + - name: CREAM_HOST + value: "http://{{ cream_host_address }}:{{ cream_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: DOCKER_BUILD_LINE_TIMEOUT_MS + value: "3600000" + - name: DOMAIN + value: "{{ domain }}" + - name: KEYMAKER_HTTP_URL + value: "http://{{ keymaker_host_address }}:{{ keymaker_port }}/" + - name: GITHUB_CALLBACK_URL + value: "{{ api_url }}/auth/github/callback" + - name: GITHUB_CLIENT_ID + value: "{{ api_github_client_id }}" + - name: GITHUB_CLIENT_SECRET + value: "{{ api_github_client_secret }}" + - name: GITHUB_DEPLOY_KEYS_BUCKET + value: "{{ api_github_deploy_keys_bucket }}" + - name: GITHUB_DEPLOY_KEY_TITLE + value: "Runnable-{{ environment }}" + - name: GITHUB_HOOK_URL + value: "{{ api_url }}/actions/github" + - name: GITHUB_UPGRADE_REDIRECT_URL + value: "https://app.{{ domain }}/githubAuthUpgrade/" + - name: GITHUB_WEBHOOK_URL + value: "https://{{ drake_hostname }}/github" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: GITHUB_PROTOCOL + value: "http" + - name: HELLO_RUNNABLE_GITHUB_ID + value: "{{ hello_runnable_github_id }}" + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ api_hello_runnable_github_token }}" + - name: KRAIN_PORT + value: "{{ krain_port }}" + - name: MIXPANEL_APP_ID + value: "{{ api_mixpanel_app_id | default('ansible_undefined') }}" + - name: MONGO_AUTH + value: "{{ api_mongo_auth }}" + - name: MONGO_DB + value: "{{ api_mongo_database }}" + - name: MONGO_HOSTS + value: "{{ mongo_hosts }}" + - name: MONGO_REPLSET_NAME + value: "{{ api_mongo_replset_name | default('ansible_undefined') }}" + - name: MONGO + value: "mongodb://{{ api_mongo_auth }}@{{ mongo_hosts }}/{{ api_mongo_database }}" + - name: NAVI_HOST + value: "http://{{ navi_host_address }}:{{ navi_http_port }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: NUM_WORKERS + value: "1" + - name: OPTIMUS_HOST + value: "http://{{ optimus_hostname }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: REDIS_IPADDRESS + value: "{{ redis_host_address }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: S3_CONTEXT_RESOURCE_BUCKET + value: "{{ api_s3_context_bucket }}" + - name: S3_LOG_BUCKET + value: "{{ api_s3_log_bucket }}" + - name: SENDGRID_KEY + value: "{{ sendgrid_key }}" + - name: LOG_LEVEL + value: "trace" + - name: SWARM_HOST + value: "http://{{ swarm_host_address }}:{{ swarm_master_port }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + - name: AWS_ALIAS_HOST + value: "{{ api_aws_alias_host }}" + - name: NEW_RELIC_APP_NAME + value: "{{ api_new_relic_app_name | default('ansible_undefined') }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ new_relic_license_key | default('ansible_undefined') }}" + - name: NEW_RELIC_LOG_LEVEL + value: "fatal" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + - name: INTERCOM_APP_ID + value: "{{ api_intercom_app_id | default('ansible_undefined') }}" + - name: INTERCOM_API_KEY + value: "{{ api_intercom_api_key | default('ansible_undefined') }}" + - name: USER_VAULT_ENDPOINT + value: "http://{{ user_vault_load_balancer }}:{{ user_vault_port }}" diff --git a/ansible/group_vars/alpha-api.yml b/ansible/group_vars/alpha-api.yml new file mode 100644 index 00000000..4daa55d5 --- /dev/null +++ b/ansible/group_vars/alpha-api.yml @@ -0,0 +1,19 @@ +name: api +service_name: api + +hosted_ports: [ "{{ api_port }}" ] + +memory_hard_limit: 5G +memory_request: 1G +num_replicas: 16 + +envs: + - name: ROLLBAR_KEY + value: "{{ api_rollbar_key | default('ansible_undefined') }}" + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +container_envs: "{{ api_base_container_envs + envs }}" diff --git a/ansible/group_vars/alpha-arithmancy.yml b/ansible/group_vars/alpha-arithmancy.yml new file mode 100644 index 00000000..9f219082 --- /dev/null +++ b/ansible/group_vars/alpha-arithmancy.yml @@ -0,0 +1,26 @@ +name: "arithmancy" + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +container_envs: + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: POSTGRES_CONNECT_STRING + value: "postgres://{{ arithmancy_pg_user }}:{{ arithmancy_pg_pass }}@{{ arithmancy_pg_host }}/{{ arithmancy_pg_database }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: ROLLBAR_KEY + value: "{{ arithmancy_rollbar_key | default('ansible_undefined') }}" diff --git a/ansible/group_vars/alpha-big-poppa-base.yml b/ansible/group_vars/alpha-big-poppa-base.yml new file mode 100644 index 00000000..8d19e066 --- /dev/null +++ b/ansible/group_vars/alpha-big-poppa-base.yml @@ -0,0 +1,65 @@ +node_version: 4.4.7 +npm_version: 2 + +inject_ca: false +repo: "git@github.com:CodeNow/big-poppa.git" + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "apt-get update", + "apt-get install postgresql-client=9.4+165+deb8u2 -y", + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +big_poppa_base_container_envs: + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: GITHUB_TOKEN + value: "{{ big_poppa_github_token }}" + - name: INTERCOM_API_KEY + value: "{{ big_poppa_intercom_key | default('undefined') }}" + - name: INTERCOM_APP_ID + value: "{{ big_poppa_intercom_id | default('undefined') }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: ON_PREM + value: "{{ is_on_prem | default('ansible_undefined') | lower }}" + - name: PGUSER + value: "{{ big_poppa_pg_user }}" + - name: PGPASSWORD + value: "{{ big_poppa_pg_pass }}" + - name: PGHOST + value: "{{ pg_host }}" + - name: PGPORT + value: "{{ pg_port }}" + - name: PGDATABASE + value: "{{ big_poppa_pg_database }}" + - name: POSTGRES_CONNECT_STRING + value: "postgres://{{ big_poppa_pg_user }}:{{ big_poppa_pg_pass }}@{{ big_poppa_pg_host }}/{{ big_poppa_pg_database }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: MONGO + value: "mongodb://{{ big_poppa_mongo_auth }}@{{ mongo_hosts }}/{{ api_mongo_database }}" + - name: MONGO_REPLSET_NAME + value: "{{ big_poppa_mongo_replset_name | default('ansible_undefined') }}" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: GITHUB_PROTOCOL + value: "http" + - name: STRIPE_API_KEY + value: "{{ cream_stripe_secret_key | default('ansible_undefined') }}" + - name: STRIPE_PUBLISHABLE_KEY + value: "{{ cream_stripe_publishable_key | default('ansible_undefined') }}" diff --git a/ansible/group_vars/alpha-big-poppa-http.yml b/ansible/group_vars/alpha-big-poppa-http.yml new file mode 100644 index 00000000..0bc78be5 --- /dev/null +++ b/ansible/group_vars/alpha-big-poppa-http.yml @@ -0,0 +1,23 @@ +name: big-poppa-http +service_name: big-poppa + +hosted_ports: ["{{ big_poppa_port }}"] +num_replicas: 4 + +envs: + - name: PORT + value: "{{ big_poppa_port }}" + - name: ROLLBAR_KEY + value: "{{ big_poppa_http_rollbar_token | default('ansible_undefined') }}" + - name: NEW_RELIC_APP_NAME + value: "{{ big_poppa_new_relic_app_name | default('ansible_undefined') }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ new_relic_license_key | default('ansible_undefined') }}" + - name: NEW_RELIC_LOG_LEVEL + value: "fatal" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + +container_envs: "{{ big_poppa_base_container_envs + envs }}" + +container_run_args: npm run start-http diff --git a/ansible/group_vars/alpha-big-poppa-worker.yml b/ansible/group_vars/alpha-big-poppa-worker.yml new file mode 100644 index 00000000..c3d74d8e --- /dev/null +++ b/ansible/group_vars/alpha-big-poppa-worker.yml @@ -0,0 +1,16 @@ +name: big-poppa-worker + +envs: + - name: ROLLBAR_KEY + value: "{{ big_poppa_worker_rollbar_token | default('ansible_undefined') }}" + +container_envs: "{{ big_poppa_base_container_envs + envs }}" + +dockerfile_pre_install_commands: [ + "apt-get update", + "apt-get install postgresql-client=9.4+165+deb8u2 -y", + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +container_run_args: npm run start-worker + diff --git a/ansible/group_vars/alpha-cadvisor.yml b/ansible/group_vars/alpha-cadvisor.yml new file mode 100644 index 00000000..fd3d38f2 --- /dev/null +++ b/ansible/group_vars/alpha-cadvisor.yml @@ -0,0 +1,15 @@ +name: cadvisor + +container_image: google/{{ name }} +container_tag: "v0.24.1" + +memory_hard_limit: 100M + +container_run_opts: > + --name={{ name }} + --volume=/:/rootfs:ro + --volume=/var/run:/var/run:rw + --volume=/sys:/sys:ro + --volume=/var/lib/docker/:/var/lib/docker:ro + --publish={{ cadvisor_port }}:8080 + --memory-reservation=50mb diff --git a/ansible/group_vars/alpha-charon.yml b/ansible/group_vars/alpha-charon.yml new file mode 100644 index 00000000..98fa7a5b --- /dev/null +++ b/ansible/group_vars/alpha-charon.yml @@ -0,0 +1,21 @@ +name: charon +app_name: "{{ name }}" +app_repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: ["{{ charon_port }}"] + +enviroment_vars: + API_URL: "{{ api_url }}" + API_TOKEN: "{{ hello_runnable_github_token }}" + DATADOG_HOST: "{{ datadog_host_address }}" + DATADOG_PORT: "{{ datadog_port }}" + DOMAIN_FILTER: "{{ user_content_domain }}" + PORT: "{{ hosted_ports[0] }}" + REDIS_HOST: "{{ redis_external_host_address }}" + REDIS_PORT: "{{ redis_port }}" + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" + - key: api/url + value: "{{ api_url }}" diff --git a/ansible/group_vars/alpha-clio.yml b/ansible/group_vars/alpha-clio.yml new file mode 100644 index 00000000..d01a1049 --- /dev/null +++ b/ansible/group_vars/alpha-clio.yml @@ -0,0 +1,29 @@ +name: clio + +inject_ca: false +hosted_ports: ["{{ clio_port }}"] +repo: git@github.com:CodeNow/{{ name }}.git +node_version: "4.8.0" +npm_version: "2.15.11" + +container_envs: + - name: MONGO + value: "mongodb://{{ clio_mongo_auth }}@{{ mongo_hosts }}/{{ clio_mongo_database }}" + - name: MONGO_REPLSET_NAME + value: "{{ clio_mongo_replset_name | default('ansible_undefined') }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: PORT + value: "{{ clio_port }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" diff --git a/ansible/group_vars/alpha-consul-single.yml b/ansible/group_vars/alpha-consul-single.yml new file mode 100644 index 00000000..51c99ebd --- /dev/null +++ b/ansible/group_vars/alpha-consul-single.yml @@ -0,0 +1,11 @@ +name: consul + +hosted_ports: ["{{ consul_api_port }}"] + +container_image: consul +container_tag: v0.6.4 + +container_run_args: consul agent -server -client=0.0.0.0 -bootstrap-expect=1 -data-dir=/tmp/db -ui + +service_type: "LoadBalancer" +service_load_balancer_ranges: ["{{ docks_ip_range }}"] diff --git a/ansible/group_vars/alpha-consul-values.yml b/ansible/group_vars/alpha-consul-values.yml new file mode 100644 index 00000000..75e0b928 --- /dev/null +++ b/ansible/group_vars/alpha-consul-values.yml @@ -0,0 +1,9 @@ +name: consul-update + +container_image: tutum/curl +container_tag: trusty + +advance_arg: true + +container_run_args: > + {% for item in consul_seed %} curl -X PUT http://{{ consul_url }}:8500/v1/kv/{{ item.key }} --data {{ item.value }} && {% endfor %} echo Finished diff --git a/ansible/group_vars/alpha-consul.yml b/ansible/group_vars/alpha-consul.yml new file mode 100644 index 00000000..6285b108 --- /dev/null +++ b/ansible/group_vars/alpha-consul.yml @@ -0,0 +1,44 @@ +name: consul + +# for database role +db_path: /etc/consul.d + +# for container_kill_start +pause_length_minutes: 3 + +container_image: "{{ registry_host }}/runnable/consul" +container_tag: v0.6.3 + +container_run_opts: > + -d + -h {{ inventory_hostname }} + -v /consul:/data + -v /etc/consul.d:/etc/consul.d:ro + -v /opt/runnable/consul/consul.json:/consul.json:ro + -v /opt/consul/server:/opt/consul/server:ro + -p {{ ansible_default_ipv4.address }}:8300:8300 + -p {{ ansible_default_ipv4.address }}:8301:8301 + -p {{ ansible_default_ipv4.address }}:8301:8301/udp + -p {{ ansible_default_ipv4.address }}:8302:8302 + -p {{ ansible_default_ipv4.address }}:8302:8302/udp + -p {{ ansible_default_ipv4.address }}:8400:8400 + -p {{ ansible_default_ipv4.address }}:{{ consul_api_port }}:8500 + -p {{ ansible_default_ipv4.address }}:{{ consul_https_port }}:{{ consul_https_port }} + --restart=always + +storage_size_gb: "{{ consul_storage_size_gb }}" + +container_run_args: > + consul agent + --config-file /consul.json + --config-dir /etc/consul.d + +# some seed values +# pulled 2015/16/11 - Bryan +consul_seed: + - key: node/env + value: "{{ node_env }}" + - key: api/hostname + value: "{{ api_hostname }}" + - key: api/url + value: "{{ api_url }}" diff --git a/ansible/group_vars/alpha-cream-base.yml b/ansible/group_vars/alpha-cream-base.yml new file mode 100644 index 00000000..1ddc7da8 --- /dev/null +++ b/ansible/group_vars/alpha-cream-base.yml @@ -0,0 +1,47 @@ +node_version: 4.5.0 +npm_version: 2 + +inject_ca: false +repo: "git@github.com:CodeNow/cream.git" + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +cream_base_container_envs: + - name: BIG_POPPA_HOST + value: "http://{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: GIT_BRANCH + value: "{{ git_branch }}" + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ cream_hello_runnable_github_token }}" + - name: INTERCOM_API_KEY + value: "{{ cream_intercom_key | default('ansible_undefined') }}" + - name: INTERCOM_APP_ID + value: "{{ cream_intercom_id | default('ansible_undefined') }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: RUNNABLE_API_URL + value: "{{ api_url }}" + - name: RUNNABLE_USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + - name: STRIPE_API_KEY + value: "{{ cream_stripe_secret_key }}" + - name: STRIPE_PUBLISHABLE_KEY + value: "{{ cream_stripe_publishable_key }}" diff --git a/ansible/group_vars/alpha-cream-http.yml b/ansible/group_vars/alpha-cream-http.yml new file mode 100644 index 00000000..e74e4194 --- /dev/null +++ b/ansible/group_vars/alpha-cream-http.yml @@ -0,0 +1,14 @@ +name: cream-http +service_name: cream + +hosted_ports: ["{{ cream_port }}"] + +envs: + - name: ROLLBAR_KEY + value: "{{ cream_http_rollbar_token }}" + - name: PORT + value: "{{ hosted_ports[0] }}" + +container_envs: "{{ cream_base_container_envs + envs }}" + +container_run_args: npm run start-http diff --git a/ansible/group_vars/alpha-cream-worker.yml b/ansible/group_vars/alpha-cream-worker.yml new file mode 100644 index 00000000..0f1b0452 --- /dev/null +++ b/ansible/group_vars/alpha-cream-worker.yml @@ -0,0 +1,9 @@ +name: cream-worker + +envs: + - name: ROLLBAR_KEY + value: "{{ cream_worker_rollbar_token }}" + +container_envs: "{{ cream_base_container_envs + envs }}" + +container_run_args: npm run start-worker diff --git a/ansible/group_vars/alpha-customerbot.yml b/ansible/group_vars/alpha-customerbot.yml new file mode 100644 index 00000000..2c2638e4 --- /dev/null +++ b/ansible/group_vars/alpha-customerbot.yml @@ -0,0 +1,51 @@ +name: customerbot + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "6.11.0" + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +container_envs: + - name: APP_NAME + value: "{{ customerbot_app_name }}" + - name: BIG_POPPA_HOST + value: "http://{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: BOT_API_KEY + value: "{{ customerbot_bot_api_key }}" + - name: DATADOG_API_KEY + value: "{{ customerbot_datadog_api_key }}" + - name: DATADOG_APP_KEY + value: "{{ customerbot_datadog_app_key }}" + - name: GITHUB_ACCESS_TOKEN + value: "{{ customerbot_github_access_token }}" + - name: INTERCOM_APP_API_KEY + value: "{{ customerbot_intercom_api_key }}" + - name: INTERCOM_APP_ID + value: "{{ customerbot_intercom_app_id }}" + - name: JIRA_PASSWORD + value: "{{ customerbot_jira_password }}" + - name: JIRA_USERNAME + value: "{{ customerbot_jira_username }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: RUNNABLE_API_URL + value: "{{ customerbot_runnable_api_url }}" + - name: RUNNABLE_USER_CONTENT_DOMAIN + value: "{{ customerbot_runnable_user_content_domain }}" + - name: SLACK_WHITELIST + value: "{{ customerbot_slack_whitelist }}" + - name: STRIPE_SECRET_KEY + value: "{{ customerbot_stripe_secret_key }}" diff --git a/ansible/group_vars/alpha-datadog.yml b/ansible/group_vars/alpha-datadog.yml new file mode 100644 index 00000000..439006e1 --- /dev/null +++ b/ansible/group_vars/alpha-datadog.yml @@ -0,0 +1,15 @@ +name: datadog + +container_image: datadog/docker-dd-agent +container_tag: latest-dogstatsd +hosted_ports: ["{{ datadog_port }}"] +protocol: UDP + +container_envs: + - name: TAGS + value: "env:{{ node_env }}" + - name: API_KEY + value: "{{ datadog_api_key }}" + +advance_arg: true +container_run_args: "sed -i -e \\\"s/^# tags:.*$/tags: ${TAGS}/\\\" /etc/dd-agent/datadog.conf && supervisord -n -c /etc/dd-agent/supervisor.conf" diff --git a/ansible/group_vars/alpha-deployer.yml b/ansible/group_vars/alpha-deployer.yml new file mode 100644 index 00000000..31f5dec8 --- /dev/null +++ b/ansible/group_vars/alpha-deployer.yml @@ -0,0 +1,18 @@ +--- +name: deployer + +repo: git@github.com:CodeNow/devops-scripts.git + +container_envs: >- + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + +container_run_opts: > + -h {{ name }} + -d + -v {{ secret_root }}/{{ env }}.pem:/root/.ssh/{{ env }}.pem + -v {{ secret_root }}/vault-pass:/root/.ssh/vault-pass + -v {{ secret_root }}/id_rsa:/root/.ssh/id_rsa + {{ container_envs }} diff --git a/ansible/group_vars/alpha-detention.yml b/ansible/group_vars/alpha-detention.yml new file mode 100644 index 00000000..b295ece4 --- /dev/null +++ b/ansible/group_vars/alpha-detention.yml @@ -0,0 +1,24 @@ +name: "detention" + +repo: "git@github.com:CodeNow/{{ name }}.git" +hosted_ports: ["{{ detention_port }}"] +node_version: "0.12.0" +npm_version: "2.1.18" + +dockerfile_post_install_commands: [ + "wget -nv https://github.com/eSlider/sassc-binaries/raw/develop/dist/sassc -O /usr/bin/sass", + "chmod +x /usr/bin/sass", + "npm run grunt" +] + +container_envs: + - name: ABSOLUTE_URL + value: "{{ detention_hostname }}" + - name: API_URL + value: "{{ api_url }}" + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ api_hello_runnable_github_token }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: PORT + value: "{{ detention_port }}" diff --git a/ansible/group_vars/alpha-dock-init.yml b/ansible/group_vars/alpha-dock-init.yml new file mode 100644 index 00000000..2a7effcd --- /dev/null +++ b/ansible/group_vars/alpha-dock-init.yml @@ -0,0 +1,8 @@ +name: dock-init +app_name: "{{ name }}" +app_repo: git@github.com:CodeNow/{{ name }}.git + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" diff --git a/ansible/group_vars/alpha-docker-listener.yml b/ansible/group_vars/alpha-docker-listener.yml new file mode 100644 index 00000000..d4c0d478 --- /dev/null +++ b/ansible/group_vars/alpha-docker-listener.yml @@ -0,0 +1,44 @@ +name: docker-listener + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +container_envs: + - name: CONSUL_HOST + value: "{{ consul_internal_host_address }}:{{ consul_api_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: DOCKER_CERT_PATH + value: "/etc/ssl/docker" + - name: LOGGLY_TOKEN + value: "{{ loggly_token }}" + - name: LOG_LEVEL + value: "debug" + - name: STDOUT_LOG_LEVEL + value: "debug" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: ROLLBAR_KEY + value: "{{ docker_listener_rollbar_key | default('ansible_undefined') }}" + - name: SWARM_HOST + value: "{{ swarm_host_address }}:{{ swarm_master_port }}" + - name: IMAGE_INSPECT_LIST + value: "localhost,runnable/image-builder" + diff --git a/ansible/group_vars/alpha-drake-base.yml b/ansible/group_vars/alpha-drake-base.yml new file mode 100644 index 00000000..82733390 --- /dev/null +++ b/ansible/group_vars/alpha-drake-base.yml @@ -0,0 +1,33 @@ +node_version: "4.4.7" +npm_version: 2 + +inject_ca: false +repo: "git@github.com:CodeNow/drake.git" + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +drake_base_container_envs: + - name: API_URL + value: "{{ api_url }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: LOG_LEVEL + value: "info" diff --git a/ansible/group_vars/alpha-drake-http.yml b/ansible/group_vars/alpha-drake-http.yml new file mode 100644 index 00000000..86021c68 --- /dev/null +++ b/ansible/group_vars/alpha-drake-http.yml @@ -0,0 +1,16 @@ +name: drake-http +service_name: drake + +hosted_ports: ["{{ drake_port }}"] + +envs: + - name: DATADOG_SECRET + value: "I_solemnly_swear_that_I_am_up_to_no_good" + - name: ROLLBAR_KEY + value: "{{ drake_http_rollbar_token | default('ansible_undefined') }}" + - name: PORT + value: "{{ drake_port }}" + +container_envs: "{{ drake_base_container_envs + envs }}" + +container_run_args: npm run start-http diff --git a/ansible/group_vars/alpha-drake-worker.yml b/ansible/group_vars/alpha-drake-worker.yml new file mode 100644 index 00000000..dccdd330 --- /dev/null +++ b/ansible/group_vars/alpha-drake-worker.yml @@ -0,0 +1,9 @@ +name: drake-worker + +envs: + - name: ROLLBAR_KEY + value: "{{ drake_worker_rollbar_token | default('ansible_undefined') }}" + +container_envs: "{{ drake_base_container_envs + envs }}" + +container_run_args: npm run start-worker diff --git a/ansible/group_vars/alpha-enterprise-sign-in.yml b/ansible/group_vars/alpha-enterprise-sign-in.yml new file mode 100644 index 00000000..91f378ed --- /dev/null +++ b/ansible/group_vars/alpha-enterprise-sign-in.yml @@ -0,0 +1,35 @@ +name: "enterprise-sign-in" + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.8.0" +npm_version: "2.15.11" + +restart_policy: "Never" + +dockerfile_post_install_commands: [ + "wget -nv https://github.com/eSlider/sassc-binaries/raw/develop/dist/sassc -O /usr/bin/sass", + "chmod +x /usr/bin/sass", + "npm install -g gulp", + "npm install" +] + +advance_arg: true + +container_envs: + - name: API_URL + value: "https://{{ api_hostname }}" + - name: ANGULAR_URL + value: "{{ angular_url }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: AWS_REGION + value: "{{ web_aws_bucket_region | default('us-east-1') }}" + - name: AWS_BUCKET_NAME + value: "{{ domain }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ aws_access_key }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ aws_secret_key }}" + +container_run_args: gulp build:dev && gulp push diff --git a/ansible/group_vars/alpha-eru.yml b/ansible/group_vars/alpha-eru.yml new file mode 100644 index 00000000..f0ee37c6 --- /dev/null +++ b/ansible/group_vars/alpha-eru.yml @@ -0,0 +1,91 @@ +--- +name: eru + +inject_ca: false +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: [ "5501", "5502" ] +node_version: 4.4.4 +npm_version: 3.8 +has_shrinkwrap: true + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}", + "BIG_POPPA_HOST {{ big_poppa_host_address }}:{{ big_poppa_port }}", + "RABBITMQ_HOSTNAME {{ rabbit_host_address }}", + "RABBITMQ_PASSWORD {{ rabbit_password }}", + "RABBITMQ_PORT {{ rabbit_port }}" +] + +dockerfile_pre_install_commands: [ + 'echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc' +] + +dockerfile_post_install_commands: [ + apt-get update && apt-get install -y supervisor, + npm run build +] + +container_envs: + - name: AWS_ACCESS_KEY + value: "{{ eru_aws_access_key_id }}" + - name: AWS_ENVIRONMENT + value: "{{ eru_aws_environment }}" + - name: AWS_SECRET_KEY + value: "{{ eru_aws_secret_access_key }}" + - name: BIG_POPPA_HOST + value: "{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: CONSUL_HOST + value: "{{ consul_host_address }}:{{ consul_api_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: DOMAIN + value: "{{ eru_subdomain }}.{{ domain }}" + - name: GITHUB_CLIENT_ID + value: "{{ eru_github_id }}" + - name: GITHUB_CLIENT_SECRET + value: "{{ eru_github_secret }}" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: INTERCOM_API_KEY + value: "{{ eru_intercom_key }}" + - name: INTERCOM_APP_ID + value: "{{ eru_intercom_id }}" + - name: LOG_ENVIRONMENT + value: "{{ node_env }}" + - name: LOG_LEVEL + value: "trace" + - name: MONGODB_DATABASE + value: "{{ eru_mongodb_database }}" + - name: MONGODB_HOSTS + value: "{{ mongo_hosts }}" + - name: MONGODB_PASSWORD + value: "{{ eru_mongodb_password }}" + - name: MONGODB_REPLSET + value: "{{ eru_mongodb_replset }}" + - name: MONGODB_USERNAME + value: "{{ eru_mongodb_username }}" + - name: NODE_ENV + value: "production" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: REDIS_HOSTNAME + value: "{{ redis_host_address }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: RUNNABLE_DOMAIN + value: "{{ domain }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + +advance_arg: true +container_run_args: supervisord --configuration supervisord.conf && sleep 10 && tail -n 100 -qf /tmp/*std*.log /tmp/supervisord.log diff --git a/ansible/group_vars/alpha-github-varnish.yml b/ansible/group_vars/alpha-github-varnish.yml new file mode 100644 index 00000000..f01d5624 --- /dev/null +++ b/ansible/group_vars/alpha-github-varnish.yml @@ -0,0 +1,19 @@ +--- +name: github-varnish + +inject_ca: false +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: ["{{ github_varnish_port }}"] +container_run_args: '' + +container_envs: + - name: VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: VARNISH_MALLOC + value: "100M" + - name: GITHUB_DOMAIN + value: "{{ github_domain }}" + - name: IS_GITHUB_ENTERPRISE + value: "{{ is_github_enterprise }}" + - name: GITHUB_PROTOCOL + value: "{{ github_protocol }}" diff --git a/ansible/group_vars/alpha-gke-dock.yml b/ansible/group_vars/alpha-gke-dock.yml new file mode 100644 index 00000000..6ffb1dcf --- /dev/null +++ b/ansible/group_vars/alpha-gke-dock.yml @@ -0,0 +1,6 @@ +name: gke-dock-{{ target_host | replace('.', '-') }} + +container_image: runnable/swarm +container_tag: v1.2.3-0 + +container_run_args: join --addr={{ target_host }}:{{ docker_port }} consul://{{ consul_host_address }}:{{ consul_api_port }}/swarm diff --git a/ansible/group_vars/alpha-image-builder.yml b/ansible/group_vars/alpha-image-builder.yml new file mode 100644 index 00000000..d2a1e158 --- /dev/null +++ b/ansible/group_vars/alpha-image-builder.yml @@ -0,0 +1,9 @@ +app_name: image-builder +name: "{{ app_name }}" + +repo: git@github.com:CodeNow/{{ name }}.git + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" diff --git a/ansible/group_vars/alpha-ingress-proxy.yml b/ansible/group_vars/alpha-ingress-proxy.yml new file mode 100644 index 00000000..133a7c0b --- /dev/null +++ b/ansible/group_vars/alpha-ingress-proxy.yml @@ -0,0 +1,60 @@ +--- +name: ingress-proxy + +container_image: "nginx" +container_tag: "1.10" +hosted_ports: ["80", "443"] +container_run_args: '' + +service_type: "LoadBalancer" + +volume_mounts: + - name: "{{ name }}-{{ domain | replace('.', '-') }}-certs" + path: /etc/ssl/certs/{{ domain }} + kind: configMap + - name: "{{ name }}-base-conf" + path: /etc/nginx/ + kind: configMap + - name: "{{ name }}-mixpanel-config" + path: /etc/nginx/other-sites-enabled + kind: configMap + - name: "{{ name }}-sites-enabled-config" + path: /etc/nginx/sites-enabled + kind: configMap + +services: + - name: api + upstreams: + - name: base + route: "/" + port: "{{ api_port }}" + - name: drake + upstreams: + - name: base + route: "/" + port: "{{ drake_port }}" + - name: optimus + upstreams: + - name: base + route: "/" + port: "{{ optimus_port }}" + - name: apisock + upstreams: + - name: base + route: "/" + port: "{{ api_port }}" + - name: eru + include: "{{ include_eru_proxy | default(true) }}" + upstreams: + - name: base + route: "/" + port: "{{ eru_http_port }}" + - name: graphql + route: "/graphql" + port: "{{ eru_graphql_port }}" + - name: egret + include: "{{ include_ergre_proxy | default(true) }}" + upstreams: + - name: base + route: "/" + port: "{{ egret_port }}" diff --git a/ansible/group_vars/alpha-kartographer.yml b/ansible/group_vars/alpha-kartographer.yml new file mode 100644 index 00000000..f06fec0c --- /dev/null +++ b/ansible/group_vars/alpha-kartographer.yml @@ -0,0 +1,40 @@ +name: kartographer + +repo: "git@github.com:CodeNow/{{ name }}.git" + +kube_config: /keys +kube_config_name: kube-config +google_key_name: google-key.json + +container_envs: + - name: CONFIG_FILE_PATH + value: "{{ kube_config }}/{{ kube_config_name }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "{{ kube_config }}/{{ google_key_name }}" + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ kartographer_hello_runnable_github_token }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: ROLLBAR_KEY + value: "{{ kartographer_rollbar_token | default('ansible_undefined') }}" + - name: RUNNABLE_API_URL + value: "{{ api_url }}" + - name: WORKER_MAX_NUM_RETRIES + value: "2" + +volume_mounts: + - name: "{{ name }}-keys" + path: /keys + kind: configMap diff --git a/ansible/group_vars/alpha-keymaker-base.yml b/ansible/group_vars/alpha-keymaker-base.yml new file mode 100644 index 00000000..1c20d7d5 --- /dev/null +++ b/ansible/group_vars/alpha-keymaker-base.yml @@ -0,0 +1,38 @@ +node_version: 6.10.2 + +inject_ca: false +repo: "git@github.com:CodeNow/keymaker.git" + +keymaker_base_container_envs: + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: POSTGRES_CONNECT_STRING + value: "postgres://{{ keymaker_pg_user }}:{{ keymaker_pg_pass }}@{{ keymaker_pg_host }}/{{ keymaker_pg_database }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: GITHUB_PROTOCOL + value: "http" + - name: NEW_RELIC_APP_NAME + value: "{{ keymaker_new_relic_app_name | default('ansible_undefined') }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ new_relic_license_key | default('ansible_undefined') }}" + - name: NEW_RELIC_LOG_LEVEL + value: "fatal" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + - name: LOG_LEVEL + value: "{{ keymaker_log_level }}" diff --git a/ansible/group_vars/alpha-keymaker-http.yml b/ansible/group_vars/alpha-keymaker-http.yml new file mode 100644 index 00000000..801736ed --- /dev/null +++ b/ansible/group_vars/alpha-keymaker-http.yml @@ -0,0 +1,14 @@ +name: keymaker-http +service_name: keymaker + +hosted_ports: ["{{ keymaker_port }}"] + +envs: + - name: PORT + value: "{{ keymaker_port }}" + - name: ROLLBAR_KEY + value: "{{ keymaker_http_rollbar_token | default('ansible_undefined') }}" + +container_envs: "{{ keymaker_base_container_envs + envs }}" + +container_run_args: npm run start-http diff --git a/ansible/group_vars/alpha-keymaker-worker.yml b/ansible/group_vars/alpha-keymaker-worker.yml new file mode 100644 index 00000000..ff29b291 --- /dev/null +++ b/ansible/group_vars/alpha-keymaker-worker.yml @@ -0,0 +1,10 @@ +name: keymaker-worker + +envs: + - name: ROLLBAR_KEY + value: "{{ big_poppa_worker_rollbar_token | default('ansible_undefined') }}" + +container_envs: "{{ keymaker_base_container_envs + envs }}" + +container_run_args: npm run start-worker + diff --git a/ansible/group_vars/alpha-khronos.yml b/ansible/group_vars/alpha-khronos.yml new file mode 100644 index 00000000..20138665 --- /dev/null +++ b/ansible/group_vars/alpha-khronos.yml @@ -0,0 +1,119 @@ +name: khronos + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.4.7" +npm_version: 2 + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] +dockerfile_pre_install_commands: [ + 'echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc' +] + +prod_cron_queues: + - cron_queue: canary.build.run + cron_scedule: "*/5 * * * *" + - cron_queue: canary.github-branch.run + cron_scedule: "*/5 * * * *" + - cron_queue: canary.log.run + cron_scedule: "*/5 * * * *" + - cron_queue: canary.network.run + cron_scedule: "*/5 * * * *" + - cron_queue: metrics.container-status + cron_scedule: "*/5 * * * *" + - cron_queue: canary.failover.run + cron_scedule: "*/30 * * * *" + +cron_queues: + - cron_queue: containers.image-builder.prune + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + - cron_queue: containers.orphan.prune + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + - cron_queue: context-versions.prune-expired + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + - cron_queue: images.prune + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + - cron_queue: instances.cleanup + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + - cron_queue: weave.prune + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + +cron_events: + - cron_event: time.one-day.passed + cron_scedule: "0 15 * * *" + - cron_event: time.four-hours.passed + cron_scedule: "13 1,4,7,10,13,16,19,22 * * *" + - cron_event: time.one-hour.passsed + cron_scedule: "49 * * * *" + - cron_event: time.thirty-minutes.passed + cron_scedule: "*/30 * * * *" + - cron_event: time.five-minutes.passed + cron_scedule: "*/5 * * * *" + +cron_rabbit_host_address: "{{ rabbit_host_address }}:{{ rabbit_port }}" +cron_rabbit_auth: --username {{ rabbit_username }} --password {{ rabbit_password }} + +container_envs: + - name: API_SOCKET_SERVER + value: "https://{{ api_socket_server_hostname }}" + - name: API_URL + value: "{{ api_url }}" + - name: BIG_POPPA_HOST + value: "http://{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: CONSUL_HOST + value: "{{ consul_internal_host_address }}:{{ consul_api_port }}" + - name: CANARY_API_TOKEN + value: "{{ khronos_canary_token | default('undefined') }}" + - name: CANARY_API_FAILOVER_TOKEN + value: "{{ khronos_canary_failover_token | default('ansible_undefined')}}" + - name: CANARY_GITHUB_BRANCHES_INSTANCE_ID + value: "{{ khronos_canary_github_branches_instance_id | default('undefined') }}" + - name: CANARY_LOG_INSTANCE_ID + value: "{{ khronos_canary_logs_instance_id | default('undefined') }}" + - name: CANARY_LOG_TERMINAL_SLEEP + value: "10" + - name: CANARY_REBUILD_INSTANCE_ID + value: "{{ khronos_canary_rebuild_instance_id | default('undefined') }}" + - name: CANARY_REBUILD_NAVI_URL + value: "{{ khronos_canary_rebuild_navi_url | default('undefined') }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: INTERCOM_API_KEY + value: "{{ khronos_intercom_api_key | default('undefined') }}" + - name: INTERCOM_APP_ID + value: "{{ khronos_intercom_app_id | default('undefined') }}" + - name: KHRONOS_MONGO + value: "mongodb://{{ api_mongo_auth }}@{{ mongo_hosts }}/{{ api_mongo_database }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: SWARM_HOST + value: "http://{{ swarm_host_address }}:{{ swarm_master_port }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + - name: WORKER_MAX_RETRY_DELAY + value: "3600000" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: GITHUB_PROTOCOL + value: "http" + +container_run_args: timeout 1h npm start diff --git a/ansible/group_vars/alpha-krain.yml b/ansible/group_vars/alpha-krain.yml new file mode 100644 index 00000000..454c1792 --- /dev/null +++ b/ansible/group_vars/alpha-krain.yml @@ -0,0 +1,15 @@ +# krain options +name: krain + +krain_env: default + +# upstart template variables +app_name: krain +app_repo: git@github.com:CodeNow/krain.git + +enviroment_vars: {} + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" diff --git a/ansible/group_vars/alpha-link.yml b/ansible/group_vars/alpha-link.yml new file mode 100644 index 00000000..c61dd9ff --- /dev/null +++ b/ansible/group_vars/alpha-link.yml @@ -0,0 +1,30 @@ +name: link + +inject_ca: false +repo: git@github.com:CodeNow/{{ name }}.git +node_version: "4.2.1" +npm_version: "2.14.7" + +container_envs: + - name: API_URL + value: "{{ api_url }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ link_hello_runnable_github_token }}" + - name: MONGO + value: "mongodb://{{ navi_mongo_auth }}@{{ navi_mongo_hosts }}/{{ navi_mongo_database }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" diff --git a/ansible/group_vars/alpha-marketing.yml b/ansible/group_vars/alpha-marketing.yml new file mode 100644 index 00000000..118561a3 --- /dev/null +++ b/ansible/group_vars/alpha-marketing.yml @@ -0,0 +1,20 @@ +name: "runnable.com" + +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.3.1" +npm_version: "3.7.5" +do_not_push: yes + +dockerfile_enviroment: [ + "API_URL https://{{ api_hostname }}", + "AWS_ACCESS_KEY {{ marketing_aws_access_key }}", + "AWS_SECRET_KEY {{ marketing_aws_secret_key }}", + "ANGULAR_URL {{ angular_url }}", + "AWS_BUCKET {{ marketing_bucket }}", + "AWS_REGION {{ web_aws_bucket_region | default('us-standard') }}", + "NODE_ENV {{ node_env }}" +] + +dockerfile_post_install_commands: [ + "npm run deploy" +] diff --git a/ansible/group_vars/alpha-metabase.yml b/ansible/group_vars/alpha-metabase.yml new file mode 100644 index 00000000..79fb41cb --- /dev/null +++ b/ansible/group_vars/alpha-metabase.yml @@ -0,0 +1,21 @@ +name: metabase + +container_image: metabase/{{ name }} +container_tag: v0.19.3 +hosted_ports: ["{{ metabase_port }}"] + +memory_hard_limit: 20G + +container_envs: + - name: MB_DB_TYPE + value: "postgres" + - name: MB_DB_DBNAME + value: "{{ metabase_pg_database }}" + - name: MB_DB_PORT + value: "{{ pg_port }}" + - name: MB_DB_USER + value: "{{ metabase_pg_user }}" + - name: MB_DB_PASS + value: "{{ metabase_pg_pass }}" + - name: MB_DB_HOST + value: "{{ pg_host }}" diff --git a/ansible/group_vars/alpha-mongo-create-users.yml b/ansible/group_vars/alpha-mongo-create-users.yml new file mode 100644 index 00000000..abaf4885 --- /dev/null +++ b/ansible/group_vars/alpha-mongo-create-users.yml @@ -0,0 +1,19 @@ +name: mongo-create-users + +container_image: mongo +container_tag: "3.2.6" + +advance_arg: true +restart_policy: "Never" + +admin_mongo_query: "db.createUser({user:'{{ api_mongo_user }}',pwd:'{{ api_mongo_password }}',roles:[{role:'root',db:'admin'}]});" +api_mongo_query: "db.createUser({user:'{{ api_mongo_user }}',pwd:'{{ api_mongo_password }}',roles:[{role:'readWrite',db:'api'}]});" +clio_mongo_query: "db.createUser({user:'{{ clio_mongo_user }}',pwd:'{{ clio_mongo_password }}',roles:[{role:'readWrite',db:'clio'}]});" +navi_mongo_query: "db.createUser({user:'{{ navi_mongo_user }}',pwd:'{{ navi_mongo_password }}',roles:[{role:'readWrite',db:'navi'}]});" + +container_run_args: > + mongo {{ mongo_hosts }}/admin --eval \"{{ admin_mongo_query }}\" && + mongo {{ mongo_hosts }}/{{ api_mongo_database }} --eval \"{{ api_mongo_query }}\" && + mongo {{ mongo_hosts }}/{{ clio_mongo_database }} --eval \"{{ clio_mongo_query }}\" && + mongo {{ navi_mongo_hosts }}/{{ navi_mongo_database }} --eval \"{{ navi_mongo_query }}\" && + echo Finished diff --git a/ansible/group_vars/alpha-mongo-seed-db.yml b/ansible/group_vars/alpha-mongo-seed-db.yml new file mode 100644 index 00000000..523aee37 --- /dev/null +++ b/ansible/group_vars/alpha-mongo-seed-db.yml @@ -0,0 +1,123 @@ +name: mongo-seed-db + +container_image: quay.io/runnable/api + +container_run_args: npm run migrate-up + +container_envs: + - name: LOG_LEVEL + value: trace + - name: DOCKER_IMAGE_BUILDER_CACHE + value: "/home/anandkumarpatel/cache" + - name: VALID_REDIR_TLDS + value: "{{ domain }},runnablecloud.com" + - name: FULL_API_DOMAIN + value: "{{ api_url }}" + - name: FULL_FRONTEND_DOMAIN + value: "https://{{ domain }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ api_aws_access_key_id }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ api_aws_secret_access_key }}" + - name: BIG_POPPA_HOST + value: "http://{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: CLIO_HTTP_URL + value: "http://{{ clio_host_address }}:{{ clio_port }}/" + - name: COOKIE_DOMAIN + value: "{{ domain }}" + - name: CREAM_HOST + value: "http://{{ cream_host_address }}:{{ cream_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: DOCKER_BUILD_LINE_TIMEOUT_MS + value: "3600000" + - name: DOMAIN + value: "{{ domain }}" + - name: KEYMAKER_HTTP_URL + value: "http://{{ keymaker_host_address }}:{{ keymaker_port }}/" + - name: GITHUB_CALLBACK_URL + value: "{{ api_url }}/auth/github/callback" + - name: GITHUB_CLIENT_ID + value: "{{ api_github_client_id }}" + - name: GITHUB_CLIENT_SECRET + value: "{{ api_github_client_secret }}" + - name: GITHUB_DEPLOY_KEYS_BUCKET + value: "{{ api_github_deploy_keys_bucket }}" + - name: GITHUB_HOOK_URL + value: "{{ api_url }}/actions/github" + - name: GITHUB_UPGRADE_REDIRECT_URL + value: "https://app.{{ domain }}/githubAuthUpgrade/" + - name: GITHUB_WEBHOOK_URL + value: "https://{{ drake_hostname }}/github" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: GITHUB_PROTOCOL + value: "http" + - name: HELLO_RUNNABLE_GITHUB_ID + value: "{{ hello_runnable_github_id }}" + - name: HELLO_RUNNABLE_GITHUB_TOKEN + value: "{{ api_hello_runnable_github_token }}" + - name: KRAIN_PORT + value: "{{ krain_port }}" + - name: MIXPANEL_APP_ID + value: "{{ api_mixpanel_app_id | default('ansible_undefined') }}" + - name: MONGO_AUTH + value: "{{ api_mongo_auth }}" + - name: MONGO_DB + value: "{{ api_mongo_database }}" + - name: MONGO_HOSTS + value: "{{ mongo_hosts }}" + - name: MONGO_REPLSET_NAME + value: "{{ api_mongo_replset_name | default('ansible_undefined') }}" + - name: MONGO + value: "mongodb://{{ api_mongo_auth }}@{{ mongo_hosts }}/{{ api_mongo_database }}" + - name: NAVI_HOST + value: "http://{{ navi_host_address }}:{{ navi_http_port }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: NUM_WORKERS + value: "1" + - name: OPTIMUS_HOST + value: "http://{{ optimus_hostname }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: REDIS_IPADDRESS + value: "{{ redis_host_address }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: S3_CONTEXT_RESOURCE_BUCKET + value: "{{ api_s3_context_bucket }}" + - name: S3_LOG_BUCKET + value: "{{ api_s3_log_bucket }}" + - name: SENDGRID_KEY + value: "{{ sendgrid_key }}" + - name: SWARM_HOST + value: "http://{{ swarm_host_address }}:{{ swarm_master_port }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + - name: AWS_ALIAS_HOST + value: "{{ api_aws_alias_host }}" + - name: NEW_RELIC_APP_NAME + value: "{{ api_new_relic_app_name | default('ansible_undefined') }}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ new_relic_license_key | default('ansible_undefined') }}" + - name: NEW_RELIC_LOG_LEVEL + value: "fatal" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" + - name: INTERCOM_APP_ID + value: "{{ api_intercom_app_id | default('ansible_undefined') }}" + - name: INTERCOM_API_KEY + value: "{{ api_intercom_api_key | default('ansible_undefined') }}" + - name: USER_VAULT_ENDPOINT + value: "http://{{ user_vault_load_balancer }}:{{ user_vault_port }}" diff --git a/ansible/group_vars/alpha-mongo.yml b/ansible/group_vars/alpha-mongo.yml new file mode 100644 index 00000000..d7c68cbe --- /dev/null +++ b/ansible/group_vars/alpha-mongo.yml @@ -0,0 +1,17 @@ +name: mongo +hosted_ports: ["{{ mongo_port }}"] +container_run_args: '' + +storage_size_gb: "{{ mongo_storage_size_gb }}" + +volume_id: "{{ mongo_volume_id }}" + +volume_mounts: + - name: "{{ name }}-db-claim" + path: /data/db + kind: persistent + +container_image: mongo +container_tag: "3.2.6" + +container_run_args: mongod diff --git a/ansible/group_vars/alpha-navi-proxy.yml b/ansible/group_vars/alpha-navi-proxy.yml new file mode 100644 index 00000000..ea613d66 --- /dev/null +++ b/ansible/group_vars/alpha-navi-proxy.yml @@ -0,0 +1,21 @@ +--- +name: navi-proxy + +container_image: "nginx" +container_tag: "1.10" + +hosted_ports: "{{ navi_extra_ports + [80, 443] }}" + +service_type: "LoadBalancer" +container_run_args: '' + +volume_mounts: + - name: "{{ name }}-{{ user_content_domain | replace('.', '-') }}-certs" + path: /etc/ssl/certs/{{ user_content_domain }} + kind: configMap + - name: "{{ name }}-base-conf" + path: /etc/nginx/ + kind: configMap + - name: "{{ name }}-navi-conf" + path: /etc/nginx/sites-enabled + kind: configMap diff --git a/ansible/group_vars/alpha-navi.yml b/ansible/group_vars/alpha-navi.yml new file mode 100644 index 00000000..ca0896f8 --- /dev/null +++ b/ansible/group_vars/alpha-navi.yml @@ -0,0 +1,56 @@ +name: navi + +inject_ca: false +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: [ "{{ navi_http_port }}" ] +node_version: "4.2.4" +npm_version: "2.8.3" +num_replicas: 8 + +container_envs: + - name: API_URL + value: "{{ api_url }}" + - name: COOKIE_DOMAIN + value: ".{{ user_content_domain }}" + - name: COOKIE_SECRET + value: "{{ navi_cookie_secret }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: ENABLE_LRU_CACHE + value: "1" + - name: ERROR_URL + value: "http://{{ detention_host_address }}:{{ detention_port }}" + - name: HTTP_PORT + value: "{{ hosted_ports[0] }}" + - name: LOG_LEVEL_STDOUT + value: "trace" + - name: MONGO + value: "mongodb://{{ navi_mongo_auth }}@{{ navi_mongo_hosts }}/{{ navi_mongo_database }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: REDIS_IPADDRESS + value: "{{ redis_host_address }}" + - name: REDIS_PORT + value: "{{ redis_port }}" + - name: INTERCOM_API_KEY + value: "{{ navi_intercom_api_key | default('ansible_undefined')}}" + - name: INTERCOM_APP_ID + value: "{{ navi_intercom_app_id | default('ansible_undefined')}}" + - name: NEW_RELIC_APP_NAME + value: "{{ navi_new_relic_app_name | default('ansible_undefined')}}" + - name: NEW_RELIC_LICENSE_KEY + value: "{{ new_relic_license_key | default('ansible_undefined')}}" + - name: NEW_RELIC_LOG_LEVEL + value: "fatal" + - name: NEW_RELIC_NO_CONFIG_FILE + value: "true" diff --git a/ansible/group_vars/alpha-node-exporter.yml b/ansible/group_vars/alpha-node-exporter.yml new file mode 100644 index 00000000..0aee53ca --- /dev/null +++ b/ansible/group_vars/alpha-node-exporter.yml @@ -0,0 +1,15 @@ +name: node-exporter + +container_image: prom/{{ name }} +container_tag: "0.12.0" + +memory_hard_limit: 100M + +container_run_opts: > + --name={{ name }} + --net=host + --memory-reservation=50mb + +container_run_args: > + --web.listen-address=:{{ node_exporter_port }} + --collectors.enabled=conntrack,diskstats,filefd,filesystem,loadavg,meminfo,netdev,netstat,stat,time \ diff --git a/ansible/group_vars/alpha-optimus.yml b/ansible/group_vars/alpha-optimus.yml new file mode 100644 index 00000000..8fd61db1 --- /dev/null +++ b/ansible/group_vars/alpha-optimus.yml @@ -0,0 +1,27 @@ +name: optimus + +inject_ca: false +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: ["{{ optimus_port }}"] +node_version: "4.3.2" +npm_version: "2.8.3" + +container_envs: + - name: NODE_ENV + value: "{{ node_env }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: PORT + value: "{{ optimus_port }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ optimus_aws_access_id }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ optimus_aws_secret_id }}" + - name: S3_DEPLOY_KEY_BUCKET + value: "{{ optimus_github_deploy_keys_bucket }}" + - name: ROLLBAR_KEY + value: "{{ optimus_rollbar_token | default('ansible_undefined') }}" + - name: LOG_LEVEL + value: "trace" diff --git a/ansible/group_vars/alpha-palantiri.yml b/ansible/group_vars/alpha-palantiri.yml new file mode 100644 index 00000000..ef57eba4 --- /dev/null +++ b/ansible/group_vars/alpha-palantiri.yml @@ -0,0 +1,39 @@ +name: palantiri + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +container_envs: + - name: CONSUL_HOST + value: "{{ consul_internal_host_address }}:{{ consul_api_port }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: ROLLBAR_KEY + value: "{{ palantiri_rollbar_key | default('ansible_undefined') }}" + - name: RSS_LIMIT + value: "2000000" + - name: LOG_LEVEL_STDOUT + value: "trace" + - name: SWARM_HOSTNAME + value: "{{ swarm_host_address }}" + - name: SWARM_PORT + value: "{{ swarm_master_port }}" diff --git a/ansible/group_vars/alpha-pheidi.yml b/ansible/group_vars/alpha-pheidi.yml new file mode 100644 index 00000000..2ea154c6 --- /dev/null +++ b/ansible/group_vars/alpha-pheidi.yml @@ -0,0 +1,66 @@ +name: pheidi + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +container_envs: + - name: BIG_POPPA_HOST + value: "http://{{ big_poppa_host_address }}:{{ big_poppa_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: DOMAIN + value: "{{ domain }}" + - name: EMAIL_GITHUB_ACCESS_TOKEN + value: "{{ pheidi_email_github_token }}" + - name: FULL_API_DOMAIN + value: "https://api.{{ domain }}" + - name: INTERCOM_ADMIN_ID + value: "{{ pheidi_intercom_admin_id }}" + - name: INTERCOM_API_KEY + value: "{{ pheidi_intercom_key }}" + - name: INTERCOM_APP_ID + value: "{{ pheidi_intercom_id }}" + - name: LOGGLY_TOKEN + value: "{{ loggly_token }}" + - name: MONGO + value: "mongodb://{{ api_mongo_auth }}@{{ mongo_hosts }}/{{ api_mongo_database }}" + - name: MONGO_REPLSET_NAME + value: "{{ api_mongo_replset_name | default('ansible_undefined') }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: ROLLBAR_KEY + value: "{{ pheidi_rollbar_token | default('ansible_undefined') }}" + - name: RUNNABOT_GITHUB_ACCESS_TOKENS + value: "{{ pheidi_runnabot_tokens }}" + - name: SENDGRID_KEY + value: "{{ sendgrid_key }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + - name: WEB_URL + value: "https://app.{{ domain }}" + - name: GITHUB_VARNISH_HOST + value: "{{ github_varnish_host }}" + - name: GITHUB_VARNISH_PORT + value: "{{ github_varnish_port }}" + - name: GITHUB_PROTOCOL + value: "http" diff --git a/ansible/group_vars/alpha-postgres.yml b/ansible/group_vars/alpha-postgres.yml new file mode 100644 index 00000000..79284dc5 --- /dev/null +++ b/ansible/group_vars/alpha-postgres.yml @@ -0,0 +1,22 @@ +name: postgres +hosted_ports: ["{{ pg_port }}"] +container_run_args: '' + +storage_size_gb: "{{ postgres_storage_size_gb }}" + +volume_mounts: + - name: "{{ name }}-db-claim" + path: /data/db + kind: persistent + - name: "{{ name }}-init-user-db" + path: /docker-entrypoint-initdb.d/ + kind: configMap + +container_image: postgres +container_tag: "9.5.2" + +container_envs: + - name: POSTGRES_PASSWORD + value: "{{ pg_pass }}" + - name: POSTGRES_USER + value: "{{ pg_user }}" diff --git a/ansible/group_vars/alpha-prometheus-alerts.yml b/ansible/group_vars/alpha-prometheus-alerts.yml new file mode 100644 index 00000000..79d9cb25 --- /dev/null +++ b/ansible/group_vars/alpha-prometheus-alerts.yml @@ -0,0 +1,19 @@ +name: prometheus-alerts + +container_image: prom/alertmanager +container_tag: v0.6.2 +hosted_ports: ["{{ prometheus_alert_port }}"] + +memory_hard_limit: 10G + +volume_mounts: + - name: "{{ name }}-conf" + path: /prometheus-alerts + kind: configMap + +container_run_args: > + -config.file=/prometheus-alerts/prometheus-alerts.yml + -data.retention 720h0m0s + -storage.path "data" + -web.external-url http://localhost + -web.listen-address ":{{ prometheus_alert_port }}" diff --git a/ansible/group_vars/alpha-prometheus.yml b/ansible/group_vars/alpha-prometheus.yml new file mode 100644 index 00000000..77af5c80 --- /dev/null +++ b/ansible/group_vars/alpha-prometheus.yml @@ -0,0 +1,27 @@ +name: prometheus + +container_image: prom/prometheus +container_tag: v1.4.1 +hosted_ports: ["{{ prometheus_port }}"] + +memory_hard_limit: 15G + +storage_size_gb: "{{ prometheus_storage_size_gb }}" + +volume_id: "{{ prometheus_volume_id }}" + +volume_mounts: + - name: "{{ name }}-conf" + path: /prometheus + kind: configMap + - name: "{{ name }}-db-claim" + path: /data + kind: persistent + +container_run_args: > + -alertmanager.url "{{ prometheus_alert_url }}" + -config.file=/prometheus/prometheus.yml + -storage.local.path "/data" + -storage.local.retention 168h0m0s + -web.external-url http://localhost + -web.listen-address ":{{ prometheus_port }}" diff --git a/ansible/group_vars/alpha-rabbit-create-exchanges.yml b/ansible/group_vars/alpha-rabbit-create-exchanges.yml new file mode 100644 index 00000000..53e3bf3b --- /dev/null +++ b/ansible/group_vars/alpha-rabbit-create-exchanges.yml @@ -0,0 +1,13 @@ +name: rabbit-create-exchanges + +container_image: tutum/curl +container_tag: "trusty" + +advance_arg: true + +container_run_args: > + curl -o exchanges.txt https://gist.githubusercontent.com/thejsj/a9d32a8c01cdd12e8e8e7e09b58935c4/raw/e37d7d7b8942120b30d1ba82f65173b8f16f112a/gistfile1.txt && + EXCHANGES=$(cat exchanges.txt) && + echo 'Exchanges: $EXCHANGES' && + for exchange in $EXCHANGES; do curl -i -u {{ rabbit_username }}:{{ rabbit_password }} -H 'content-type:application/json' -XPUT -d '{\"type\":\"fanout\",\"auto_delete\":false,\"durable\":true,\"arguments\":[]}' http://rabbitmq:15672/api/exchanges/%2f/$exchange; done && + echo Finished diff --git a/ansible/group_vars/alpha-rabbitmq.yml b/ansible/group_vars/alpha-rabbitmq.yml new file mode 100644 index 00000000..47d3adae --- /dev/null +++ b/ansible/group_vars/alpha-rabbitmq.yml @@ -0,0 +1,22 @@ +name: rabbitmq +service_name: rabbitmq + +hosted_ports: [ "{{ rabbit_port }}", 15672 ] + +storage_size_gb: "{{ rabbit_storage_size_gb }}" + +volume_id: "{{ rabbit_volume_id }}" + +volume_mounts: + - name: "{{ name }}-db-claim" + path: /var/lib/rabbitmq + kind: persistent + - name: "{{ name }}-conf" + path: /etc/rabbitmq/ + kind: configMap + +container_image: rabbitmq +container_tag: "3.5.3-management" +container_run_args: '' + +post_start_command: sleep 5; rabbitmq-plugins enable rabbitmq_management; true diff --git a/ansible/group_vars/alpha-redis.yml b/ansible/group_vars/alpha-redis.yml new file mode 100644 index 00000000..6922dcb0 --- /dev/null +++ b/ansible/group_vars/alpha-redis.yml @@ -0,0 +1,18 @@ +name: redis +hosted_ports: ["{{ redis_port }}"] + +storage_size_gb: "{{ redis_storage_size_gb }}" + +volume_id: "{{ redis_volume_id }}" + +volume_mounts: + - name: "{{ name }}-db-claim" + path: /data + kind: persistent + +container_image: redis +container_tag: "3.0.5" +container_run_args: '' + +service_type: "LoadBalancer" +service_load_balancer_ranges: ["{{ docks_ip_range }}"] diff --git a/ansible/group_vars/alpha-sauron.yml b/ansible/group_vars/alpha-sauron.yml new file mode 100644 index 00000000..70fd3a68 --- /dev/null +++ b/ansible/group_vars/alpha-sauron.yml @@ -0,0 +1,49 @@ +name: sauron + +inject_ca: false +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +weave_path: "/bin/weave" +docker_path: "/bin/docker" + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +dockerfile_pre_install_commands: [ + "curl -L https://get.docker.com/builds/Linux/x86_64/docker-1.6.2 -o {{ docker_path }}", + "chmod a+x {{ docker_path }}", + "curl -L https://github.com/weaveworks/weave/releases/download/v1.5.0/weave -o {{ weave_path }}", + "chmod a+x {{ weave_path }}" +] + +container_envs: + - name: CONSUL_HOST + value: "{{ consul_internal_host_address }}:{{ consul_api_port }}" + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: LOGGLY_TOKEN + value: "{{ loggly_token }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: ROLLBAR_KEY + value: "{{ sauron_rollbar_key | default('ansible_undefined') }}" + - name: SWARM_HOSTNAME + value: "{{ swarm_host_address }}" + - name: SWARM_PORT + value: "{{ swarm_master_port }}" + - name: WEAVE_PATH + value: "{{ weave_path }}" diff --git a/ansible/group_vars/alpha-shiva.yml b/ansible/group_vars/alpha-shiva.yml new file mode 100644 index 00000000..d073ccf4 --- /dev/null +++ b/ansible/group_vars/alpha-shiva.yml @@ -0,0 +1,36 @@ +name: shiva + +inject_ca: false +repo: git@github.com:CodeNow/astral.git +node_version: "4.2.1" +npm_version: "2.8.3" + +container_run_args: npm run shiva-start + +container_envs: + - name: DATADOG_HOST + valueFrom: "spec.nodeName" + - name: DATADOG_PORT + value: "{{ datadog_port }}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" + - name: AWS_ACCESS_KEY_ID + value: "{{ aws_access_key_id }}" + - name: AWS_SECRET_ACCESS_KEY + value: "{{ aws_secret_access_key }}" + - name: NODE_ENV + value: "{{ node_env }}" + - name: REGISTRY_HOST + value: "{{ registry_host }}" + - name: ROLLBAR_KEY + value: "{{ shiva_rollbar_token | default('ansible_undefined') }}" + - name: DOCKER_PORT + value: "{{ docker_port }}" + - name: USER_VAULT_LOAD_BALANCER + value: "{{ user_vault_load_balancer }}" diff --git a/ansible/group_vars/alpha-socket-server.yml b/ansible/group_vars/alpha-socket-server.yml new file mode 100644 index 00000000..c741a336 --- /dev/null +++ b/ansible/group_vars/alpha-socket-server.yml @@ -0,0 +1,11 @@ +name: api-socket-server +service_name: apisock +num_replicas: 8 + +hosted_ports: [ "{{ api_port }}" ] + +envs: + - name: ROLLBAR_KEY + value: "{{ api_socket_server_rollbar_key | default('ansible_undefined') }}" + +container_envs: "{{ api_base_container_envs + envs }}" diff --git a/ansible/group_vars/alpha-starlord.yml b/ansible/group_vars/alpha-starlord.yml new file mode 100644 index 00000000..544d399d --- /dev/null +++ b/ansible/group_vars/alpha-starlord.yml @@ -0,0 +1,23 @@ +name: starlord + +container_image: "{{ registry_host }}/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +inject_ca: false +repo: git@github.com:CodeNow/{{ name }}.git +node_version: "6.10.2" + +container_envs: + - name: NODE_ENV + value: "{{ node_env }}" + - name: VAULT_ENDPOINT + value: "http://{{ user_vault_host_address }}:{{ user_vault_port }}" + - name: VAULT_TOKEN + value: "{{starlord_vault_token}}" + - name: RABBITMQ_HOSTNAME + value: "{{ rabbit_host_address }}" + - name: RABBITMQ_PASSWORD + value: "{{ rabbit_password }}" + - name: RABBITMQ_PORT + value: "{{ rabbit_port }}" + - name: RABBITMQ_USERNAME + value: "{{ rabbit_username }}" diff --git a/ansible/group_vars/alpha-swarm-daemon.yml b/ansible/group_vars/alpha-swarm-daemon.yml new file mode 100644 index 00000000..643d73f3 --- /dev/null +++ b/ansible/group_vars/alpha-swarm-daemon.yml @@ -0,0 +1,20 @@ +name: "swarm-deamon" + +# container_kill_start settings +container_image: swarm +container_tag: 1.2.3 + +container_run_opts: > + --name {{ swarm_container_name }} + -d + +container_run_args: > + join + --heartbeat 15s --ttl 30s + --addr={{ ansible_default_ipv4.address }}:{{ docker_port }} + consul://{{ consul_host_address }}:{{ consul_api_port }}/swarm + +# consul values +consul_values: + - key: "swarm/version" + value: "{{ container_tag }}" diff --git a/ansible/group_vars/alpha-swarm-manager-metrics.yml b/ansible/group_vars/alpha-swarm-manager-metrics.yml new file mode 100644 index 00000000..dee7712c --- /dev/null +++ b/ansible/group_vars/alpha-swarm-manager-metrics.yml @@ -0,0 +1,36 @@ +--- +name: swarm-cloudwatch-reporter + +repository_name: furry-cactus +container_image: "{{ registry_address }}/runnable/{{ repository_name }}" +repo: git@github.com:CodeNow/{{ repository_name }}.git +node_version: "4.4.7" +npm_version: 3 + +inject_ca: false + +dockerfile_pre_install_commands: +- npm config set progress false + +cron_scedule: "*/5 * * * *" + +inject_ca: false + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +container_envs: + - name: AWS_ACCESS_KEY + value: "{{ aws_access_key }}" + - name: AWS_SECRET_KEY + value: "{{ aws_secret_key }}" + - name: DOCKER_CERT_PATH + value: "/etc/ssl/docker" + - name: ENVIRONMENT + value: "{{ env }}" + - name: SWARM_HOSTNAME + value: "{{ swarm_host_address }}" + - name: SWARM_PORT + value: "{{ swarm_master_port }}" diff --git a/ansible/group_vars/alpha-swarm-manager.yml b/ansible/group_vars/alpha-swarm-manager.yml new file mode 100644 index 00000000..c496f191 --- /dev/null +++ b/ansible/group_vars/alpha-swarm-manager.yml @@ -0,0 +1,22 @@ +name: swarm-manager +service_name: swarm + +hosted_ports: ["{{ swarm_master_port }}"] +container_image: runnable/swarm +container_tag: "v1.2.3-0" + +memory_hard_limit: 10G +memory_request: 2G + +volume_mounts: + - name: "{{ name }}-docker-ssl-certs" + path: /etc/ssl/docker + kind: configMap + +container_run_args: > + manage + --tlsverify + --tlscacert=/etc/ssl/docker/ca.pem + --tlscert=/etc/ssl/docker/cert.pem + --tlskey=/etc/ssl/docker/key.pem + consul://{{ consul_internal_host_address }}:{{ consul_api_port }}/swarm diff --git a/ansible/group_vars/alpha-user-vault.yml b/ansible/group_vars/alpha-user-vault.yml new file mode 100644 index 00000000..d1e37fa9 --- /dev/null +++ b/ansible/group_vars/alpha-user-vault.yml @@ -0,0 +1,20 @@ +name: user-vault + +container_image: vault +container_tag: 0.7.0 +hosted_ports: ["{{ user_vault_port }}"] + +service_type: "LoadBalancer" + +volume_mounts: + - name: "{{ name }}" + path: /config + kind: configMap + +container_run_args: > + vault server + -log-level=warn + -config=/config/vault.hcl + +add_capabilities: + - IPC_LOCK diff --git a/ansible/group_vars/alpha-vault-single.yml b/ansible/group_vars/alpha-vault-single.yml new file mode 100644 index 00000000..961a57bb --- /dev/null +++ b/ansible/group_vars/alpha-vault-single.yml @@ -0,0 +1,24 @@ +name: vault + +container_image: quay.io/runnable/vault +container_tag: v0.5.1 +hosted_ports: ["{{ vault_api_port }}"] + +volume_mounts: + - name: "{{ name }}" + path: /config + kind: configMap + +container_run_args: > + vault server + -log-level=warn + -config=/config/vault.hcl + +add_capabilities: + - IPC_LOCK + +service_type: "LoadBalancer" + +# Describes policy needed by Vault to create IAM users for orgs to get their org ids +# Docks will not come up if policy is incorrect +vault_seed_policy: "{\\\"Version\\\": \\\"2012-10-17\\\", \\\"Statement\\\": [{\\\"Action\\\": [\\\"ec2:DescribeInstances\\\", \\\"ec2:DescribeTags\\\"], \\\"Resource\\\": [\\\"*\\\"], \\\"Effect\\\": \\\"Allow\\\", \\\"Sid\\\": \\\"Stmt1445655064000\\\"}]}" diff --git a/ansible/group_vars/alpha-vault.yml b/ansible/group_vars/alpha-vault.yml new file mode 100644 index 00000000..50f7ee4b --- /dev/null +++ b/ansible/group_vars/alpha-vault.yml @@ -0,0 +1,41 @@ +name: vault + +# for database role +db_path: /opt/runnable/vault + +container_image: "{{ registry_host }}/runnable/vault" +container_tag: v0.5.1 + +container_run_opts: > + -d + -h {{ inventory_hostname }} + -v /opt/runnable/vault/vault.hcl:/vault.hcl:ro + -v /opt/vault/client-consul:/opt/vault/client-consul:ro + -v /opt/vault/server:/opt/vault/server:ro + -v {{ app_log_dir }}:{{ app_log_dir }}:rw + -p {{ ansible_default_ipv4.address }}:{{ vault_port }}:{{ vault_port }} + -p {{ ansible_default_ipv4.address }}:8201:8201 + --cap-add IPC_LOCK + --restart=always + +container_run_args: > + vault server + -log-level=warn + -config=/vault.hcl + +# vault seed data +# pulled 2015/16/12 - Bryan +vault_seed_values: + - key: secret/loggly + data: + token: "{{ loggly_token }}" + - key: secret/rabbitmq + data: + username: "{{ rabbit_username }}" + password: "{{ rabbit_password }}" + - key: secret/github/hellorunnable + data: + token: "{{ vault_hello_runnable_github_token }}" + +# for the love of all that you find holy, don't change the following unless you _KNOW WHAT YOU ARE DOING_. +vault_seed_policy: "{\\\"Version\\\": \\\"2012-10-17\\\", \\\"Statement\\\": [{\\\"Action\\\": [\\\"ec2:DescribeInstances\\\", \\\"ec2:DescribeTags\\\"], \\\"Resource\\\": [\\\"*\\\"], \\\"Effect\\\": \\\"Allow\\\", \\\"Sid\\\": \\\"Stmt1445655064000\\\"}]}" diff --git a/ansible/group_vars/alpha-web.yml b/ansible/group_vars/alpha-web.yml new file mode 100644 index 00000000..6686a7b0 --- /dev/null +++ b/ansible/group_vars/alpha-web.yml @@ -0,0 +1,41 @@ +name: "runnable-angular" + +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "0.10.38" +npm_version: "2.1.18" + +restart_policy: "Never" + +container_envs: + - name: API_SOCK_URL + value: "https://{{ api_socket_server_hostname }}" + - name: API_URL + value: "https://{{ api_hostname }}" + - name: MIXPANEL_PROXY_URL + value: "{{ mixpanel_proxy_url }}" + - name: SUPER_USER + value: "{{ super_user }}" + - name: AWS_ACCESS_KEY + value: "{{ aws_access_key }}" + - name: AWS_BUCKET + value: "app.{{ domain }}" + - name: AWS_REGION + value: "{{ web_aws_bucket_region | default('us-east-1') }}" + - name: AWS_SECRET_KEY + value: "{{ aws_secret_key }}" + - name: INTERCOM_APP_ID + value: "{{ web_intercom_id }}" + - name: MARKETING_URL + value: "{{ marketing_url }}" + - name: NODE_ENV + value: production + - name: SIFT_API_KEY + value: "{{ web_sift_public_key | default('ansible_undefined') }}" + - name: STRIPE_TOKEN + value: "{{ cream_stripe_publishable_key | default('ansible_undefined') }}" + - name: USER_CONTENT_DOMAIN + value: "{{ user_content_domain }}" + +npm_start_command: run gruntDeploy + +container_run_args: npm {{ npm_start_command }} diff --git a/ansible/group_vars/alpha-workers.yml b/ansible/group_vars/alpha-workers.yml new file mode 100644 index 00000000..22ac1dc3 --- /dev/null +++ b/ansible/group_vars/alpha-workers.yml @@ -0,0 +1,13 @@ +name: api-worker + +memory_hard_limit: 5G +memory_request: 1G +num_replicas: 4 + +envs: + - name: IS_QUEUE_WORKER + value: "true" + - name: ROLLBAR_KEY + value: "{{ api_workers_rollbar_key | default('ansible_undefined') }}" + +container_envs: "{{ api_base_container_envs + envs }}" diff --git a/ansible/group_vars/current_versions.yml b/ansible/group_vars/current_versions.yml new file mode 100644 index 00000000..5f5fe7e2 --- /dev/null +++ b/ansible/group_vars/current_versions.yml @@ -0,0 +1,13 @@ +detention_branch=v1.3.2 +eru_branch=v1.1.0 +khronos_branch=v2.5.2 +link_branch=v1.3.1 +metis_branch=v7.0.6 +optimus_branch=v4.0.3 +palantiri_branch=v0.1.10 +sauron_branch=v4.0.2 +shiva_branch=v7.0.6 +angular_branch=v4.2.0 +api_branch=v6.32.4 +navi_branch=v5.0.2 + diff --git a/ansible/group_vars/migration-router.yml b/ansible/group_vars/migration-router.yml new file mode 100644 index 00000000..e4b95db5 --- /dev/null +++ b/ansible/group_vars/migration-router.yml @@ -0,0 +1 @@ +name: migration-router diff --git a/ansible/group_vars/node-base.yml b/ansible/group_vars/node-base.yml new file mode 100644 index 00000000..cc9ba621 --- /dev/null +++ b/ansible/group_vars/node-base.yml @@ -0,0 +1,4 @@ +name: "node_base" +container_image: "{{ registry_host }}/runnable/{{ name }}" +dockerfile: "node_base/Dockerfile" +container_tag: "latest" diff --git a/ansible/image-builder.yml b/ansible/image-builder.yml new file mode 100644 index 00000000..3fca438c --- /dev/null +++ b/ansible/image-builder.yml @@ -0,0 +1,13 @@ +--- +- hosts: user-local + vars_files: + - group_vars/alpha-image-builder.yml + roles: + - { role: build_with_dockerfile } + +- hosts: "{{ dock | default('docks') }}" + vars_files: + - group_vars/alpha-image-builder.yml + roles: + - { role: pull-image } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/ingress-proxy.yml b/ansible/ingress-proxy.yml new file mode 100644 index 00000000..233cba9d --- /dev/null +++ b/ansible/ingress-proxy.yml @@ -0,0 +1,9 @@ +--- +- hosts: ingress + vars_files: + - group_vars/alpha-ingress-proxy.yml + roles: + - role: runnable-domain-proxy + + - role: k8-deployment + - role: k8-service diff --git a/ansible/keymaker-http.yml b/ansible/keymaker-http.yml new file mode 100644 index 00000000..baa18b45 --- /dev/null +++ b/ansible/keymaker-http.yml @@ -0,0 +1,9 @@ +--- +- hosts: keymaker + vars_files: + - group_vars/alpha-keymaker-base.yml + - group_vars/alpha-keymaker-http.yml + roles: + - role: builder + - role: k8-deployment + - role: k8-service diff --git a/ansible/keymaker-worker.yml b/ansible/keymaker-worker.yml new file mode 100644 index 00000000..2fe0189f --- /dev/null +++ b/ansible/keymaker-worker.yml @@ -0,0 +1,8 @@ +--- +- hosts: keymaker + vars_files: + - group_vars/alpha-keymaker-base.yml + - group_vars/alpha-keymaker-worker.yml + roles: + - role: builder + - role: k8-deployment diff --git a/ansible/keymaker.yml b/ansible/keymaker.yml new file mode 100644 index 00000000..942890b7 --- /dev/null +++ b/ansible/keymaker.yml @@ -0,0 +1,2 @@ +- include: keymaker-http.yml +- include: keymaker-worker.yml diff --git a/ansible/khronos.yml b/ansible/khronos.yml new file mode 100644 index 00000000..cb26be0b --- /dev/null +++ b/ansible/khronos.yml @@ -0,0 +1,13 @@ +--- +- hosts: khronos + vars_files: + - group_vars/alpha-khronos.yml + roles: + - role: notify + rollbar_token: "{{ rollbar_khronos_token }}" + + - role: builder + + - role: khronos + - role: docker_client + - role: k8-deployment diff --git a/ansible/krain.yml b/ansible/krain.yml new file mode 100644 index 00000000..10722e83 --- /dev/null +++ b/ansible/krain.yml @@ -0,0 +1,11 @@ +--- +- hosts: "{{ dock | default('docks') }}" + vars_files: + - group_vars/alpha-krain.yml + roles: + - { role: notify, tags: [notify] } + - { role: build_essential } + - { role: git_repo } + - { role: node_service } + - { role: loggly } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/library/nodejs b/ansible/library/nodejs new file mode 100644 index 00000000..506c0cb5 --- /dev/null +++ b/ansible/library/nodejs @@ -0,0 +1,82 @@ +#!/bin/bash + +# From http://jpmens.net/2012/07/05/shell-scripts-as-ansible-modules/ +eval $(sed -e "s/\s?\([^=]+\)\s?=\s?\(\x22\([^\x22]+\)\x22|\x27\([^\x27]+\)\x27|\(\S+\)\)\s?/\1='\2'/p" $1) + +if [ -z $version ] +then + echo "{\"failed\": true, \"msg\": \"version is required. E.g. 0.10.8\"}" + exit 1 +fi + +have=$(node -v 2>/dev/null) + +# check if node is already installed +if [ ! $have ] +then + # NO NODE + cd /tmp + file="node-v$version-linux-x64" + wget http://nodejs.org/dist/v$version/$file.tar.gz &>/dev/null + if [ ! -f $file.tar.gz ] + then + echo "{\"failed\": true, \"msg\": \"Failed to download node.js binary\"}" + exit + fi + tar xf $file.tar.gz + cd $file + ./bin/npm install n &>/dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install 'n' locally\"}" + exit 1 + fi + ./node_modules/.bin/n $version &>/dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js to system\"}" + exit 1 + fi + installed=$(node -v 2>/dev/null) + if [ $installed != "v$version" ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js version $version\"}" + exit 1 + else + echo "{\"changed\": true, \"version\": \"$version\"}" + exit + fi +else + # WRONG VERSION + if [ $have != "v$version" ] + then + # do we have n? + nversion=$(n -V 2> /dev/null) + if [ ! $nversion ] + then + npm install -g n &> /dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install 'n' globally\"}" + exit 1 + fi + fi + n $version &> /dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js version $version\"}" + exit 1 + fi + installed=$(node -v 2>/dev/null) + if [ $installed != "v$version" ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js version $version\"}" + exit 1 + else + echo "{\"changed\": true, \"version\": \"$version\"}" + exit + fi + else + echo "{\"changed\": false, \"version\": \"$version\"}" + fi +fi \ No newline at end of file diff --git a/ansible/library/rollbar/rollbar.py b/ansible/library/rollbar/rollbar.py new file mode 100644 index 00000000..bbc2aa5d --- /dev/null +++ b/ansible/library/rollbar/rollbar.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014, Max Riveiro, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rollbar_deployment +version_added: 1.6 +author: "Max Riveiro (@kavu)" +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments + (see https://rollbar.com/docs/deploys_other/) +options: + token: + description: + - Your project access token. + required: true + environment: + description: + - Name of the environment being deployed, e.g. 'production'. + required: true + revision: + description: + - Revision number/sha being deployed. + required: true + user: + description: + - User who deployed. + required: false + rollbar_user: + description: + - Rollbar username of the user who deployed. + required: false + comment: + description: + - Deploy comment (e.g. what is being deployed). + required: false + url: + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. + This should only be used on personally controlled sites using + self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] +''' + +EXAMPLES = ''' +- rollbar_deployment: token=AAAAAA + environment='staging' + user='ansible' + revision=4.2, + rollbar_user='admin', + comment='Test Deploy' +''' + +import urllib + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(required=False), + rollbar_user=dict(required=False), + comment=dict(required=False), + url=dict( + required=False, + default='https://api.rollbar.com/api/1/deploy/' + ), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception, e: + module.fail_json(msg='Unable to notify Rollbar: %s' % e) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() \ No newline at end of file diff --git a/ansible/link.yml b/ansible/link.yml new file mode 100644 index 00000000..b8652fe0 --- /dev/null +++ b/ansible/link.yml @@ -0,0 +1,10 @@ +--- +- hosts: link + vars_files: + - group_vars/alpha-link.yml + roles: + - role: notify + + - role: builder + + - role: k8-deployment diff --git a/ansible/marketing.yml b/ansible/marketing.yml new file mode 100644 index 00000000..64a8a07b --- /dev/null +++ b/ansible/marketing.yml @@ -0,0 +1,7 @@ +--- +- hosts: marketing + vars_files: + - "group_vars/alpha-marketing.yml" + roles: + - { role: notify, tags: "notify" } + - { role: builder, tags: "build" } diff --git a/ansible/metabase.yml b/ansible/metabase.yml new file mode 100644 index 00000000..18535379 --- /dev/null +++ b/ansible/metabase.yml @@ -0,0 +1,9 @@ +--- +- hosts: metabase + vars_files: + - group_vars/alpha-metabase.yml + roles: + - role: notify + + - role: k8-deployment + - role: k8-service diff --git a/ansible/migration-router.yml b/ansible/migration-router.yml new file mode 100644 index 00000000..a62438e7 --- /dev/null +++ b/ansible/migration-router.yml @@ -0,0 +1,4 @@ +--- +- hosts: migration-router + roles: + - { role: nginx } diff --git a/ansible/mongo-create-users.yml b/ansible/mongo-create-users.yml new file mode 100644 index 00000000..bb642031 --- /dev/null +++ b/ansible/mongo-create-users.yml @@ -0,0 +1,6 @@ +--- +- hosts: mongo + vars_files: + - group_vars/alpha-mongo-create-users.yml + roles: + - role: k8-job diff --git a/ansible/mongo-seed-db.yml b/ansible/mongo-seed-db.yml new file mode 100644 index 00000000..41f6cc82 --- /dev/null +++ b/ansible/mongo-seed-db.yml @@ -0,0 +1,6 @@ +--- +- hosts: api + vars_files: + - group_vars/alpha-mongo-seed-db.yml + roles: + - role: k8-job diff --git a/ansible/mongo.yml b/ansible/mongo.yml new file mode 100644 index 00000000..abfa555d --- /dev/null +++ b/ansible/mongo.yml @@ -0,0 +1,10 @@ +--- +- hosts: mongo + vars_files: + - group_vars/alpha-mongo.yml + roles: + - role: notify + + - role: database + - role: k8-deployment + - role: k8-service diff --git a/ansible/navi-port-router.yml b/ansible/navi-port-router.yml new file mode 100644 index 00000000..837cde38 --- /dev/null +++ b/ansible/navi-port-router.yml @@ -0,0 +1,39 @@ +--- +- hosts: navi-port-router + tasks: + - name: ensure target target_ip + fail: msg="`target_ip` needs to be defined to run this role. Ip of navi proxy" + when: target_ip is not defined + + - name: ensure ip_forward is one + become: yes + command: echo 1 > /proc/sys/net/ipv4/ip_forward + + - name: proxy 80:442, 444:65535 to 80 and 443 to 443 + tags: deploy + become: yes + iptables: + table=nat + chain=PREROUTING + in_interface=eth0 + protocol=tcp + destination_port={{ item.from }} + jump=DNAT + to_destination={{ target_ip }}:{{ item.to }} + comment="Redirect {{ item.from }} traffic to navi" + with_items: + - from: 80:442 + to: 80 + - from: 444:65535 + to: 80 + - from: 443 + to: 443 + + - name: proxy 80:442, 444:65535 to 80 + tags: deploy + become: yes + iptables: + table=nat + chain=POSTROUTING + jump=MASQUERADE + comment="MASQUERADE" diff --git a/ansible/navi-proxy.yml b/ansible/navi-proxy.yml new file mode 100644 index 00000000..ac29def3 --- /dev/null +++ b/ansible/navi-proxy.yml @@ -0,0 +1,9 @@ +--- +- hosts: navi-proxy + vars_files: + - group_vars/alpha-navi-proxy.yml + roles: + - role: content-domain-proxy + + - role: k8-deployment + - role: k8-service diff --git a/ansible/navi.yml b/ansible/navi.yml new file mode 100644 index 00000000..fd02a52f --- /dev/null +++ b/ansible/navi.yml @@ -0,0 +1,12 @@ +--- +- hosts: navi + vars_files: + - group_vars/alpha-navi.yml + roles: + - role: notify + rollbar_token: "{{ navi_rollbar_token }}" + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/node-base.yml b/ansible/node-base.yml new file mode 100644 index 00000000..4c638deb --- /dev/null +++ b/ansible/node-base.yml @@ -0,0 +1,6 @@ +--- +- hosts: "{{ target_host }}" + vars_files: + - "group_vars/node-base.yml" + roles: + - { role: builder, tags: "build" } diff --git a/ansible/node-exporter.yml b/ansible/node-exporter.yml new file mode 100644 index 00000000..20a3768f --- /dev/null +++ b/ansible/node-exporter.yml @@ -0,0 +1,7 @@ +--- +- hosts: docks + vars_files: + - "group_vars/alpha-node-exporter.yml" + roles: + - { role: notify, tags: "notify" } + - { role: container_kill_start } diff --git a/ansible/oneOffScripts/add-new-ssh-key.yml b/ansible/oneOffScripts/add-new-ssh-key.yml new file mode 100644 index 00000000..b52024ea --- /dev/null +++ b/ansible/oneOffScripts/add-new-ssh-key.yml @@ -0,0 +1,16 @@ +--- +- name: Rotate SSH Keys + hosts: all + gather_facts: no + vars: + - new_pub_key: "{{ pub_key_path }}.pub" + - new_priv_key: "{{ pub_key_path }}.pem" + + tasks: + - name: Add new key to authorized_keys + authorized_key: key="{{ lookup('file', new_pub_key) }}" + user=ubuntu state=present + + - name: Make use of new private key when connecting + set_fact: ansible_private_ssh_key={{ new_priv_key }} + diff --git a/ansible/oneOffScripts/changeCharonLogLevel.yml b/ansible/oneOffScripts/changeCharonLogLevel.yml new file mode 100644 index 00000000..d6fcdc49 --- /dev/null +++ b/ansible/oneOffScripts/changeCharonLogLevel.yml @@ -0,0 +1,21 @@ +- hosts: docks + gather_facts: no + tasks: + - fail: msg="`log_level` needs to be defined for this role" + when: log_level is not defined + + - name: put log level in charon upstart file + become: yes + lineinfile: + dest: /etc/init/charon.conf + state: present + backup: yes + regexp: ^env LOG_LEVEL=.+$ + insertafter: ^env DATADOG_PORT=[0-9]+$ + line: env LOG_LEVEL={{ log_level }} + + - name: restart charon + become: yes + service: + name: charon + state: restarted diff --git a/ansible/oneOffScripts/killWeaveContainers.yml b/ansible/oneOffScripts/killWeaveContainers.yml new file mode 100644 index 00000000..b43c1826 --- /dev/null +++ b/ansible/oneOffScripts/killWeaveContainers.yml @@ -0,0 +1,7 @@ +- hosts: docks + gather_facts: no + serial: 1 + tasks: + - name: kill weave containers + become: true + shell: 'sudo docker ps | grep weaveworks/weave | cut -d" " -f 1 | xargs sudo docker kill' diff --git a/ansible/oneOffScripts/ntp.yml b/ansible/oneOffScripts/ntp.yml new file mode 100644 index 00000000..74f80749 --- /dev/null +++ b/ansible/oneOffScripts/ntp.yml @@ -0,0 +1,6 @@ +--- +- hosts: all + gather_facts: no + + roles: + - { role: package_ntp, tags "ntp" } diff --git a/ansible/oneOffScripts/ntpForceUpdate.yml b/ansible/oneOffScripts/ntpForceUpdate.yml new file mode 100644 index 00000000..45a92f8f --- /dev/null +++ b/ansible/oneOffScripts/ntpForceUpdate.yml @@ -0,0 +1,10 @@ +--- +- hosts: all + gather_facts: no + tasks: + +# pl test on test box + - name: Froce ntp update + become: true + shell: + "service ntp stop && /usr/sbin/ntpdate pool.ntp.org && service ntp start" diff --git a/ansible/oneOffScripts/pullWeaveImage.yml b/ansible/oneOffScripts/pullWeaveImage.yml new file mode 100644 index 00000000..7633bf89 --- /dev/null +++ b/ansible/oneOffScripts/pullWeaveImage.yml @@ -0,0 +1,6 @@ +- hosts: docks + gather_facts: no + tasks: + - name: pull weave image + become: true + shell: 'sudo docker pull weaveworks/weave:1.4.6' diff --git a/ansible/oneOffScripts/removeDockService.yml b/ansible/oneOffScripts/removeDockService.yml new file mode 100644 index 00000000..5ebd99ab --- /dev/null +++ b/ansible/oneOffScripts/removeDockService.yml @@ -0,0 +1,22 @@ +- hosts: docks + gather_facts: no + tasks: + - fail: msg="`name` needs to be defined for this role" + when: name is not defined + + - name: stop service + become: true + service: + name={{ name }} + state=stopped + + - name: remove files + become: true + file: + path={{ item }} + state=absent + with_items: + - /etc/init/{{ name }}.conf + - /etc/init/{{ name }}.conf.bak + - /etc/init/{{ name }}.override + - /opt/runnable/{{ name }} diff --git a/ansible/oneOffScripts/removeOldRsyslogConfigs.yml b/ansible/oneOffScripts/removeOldRsyslogConfigs.yml new file mode 100644 index 00000000..cdd42858 --- /dev/null +++ b/ansible/oneOffScripts/removeOldRsyslogConfigs.yml @@ -0,0 +1,23 @@ +- hosts: "{{ host }}" + gather_facts: no + tasks: + - fail: msg="`host` needs to be defined for this role" + when: host is not defined + - fail: msg="`name` needs to be defined for this role" + when: name is not defined + + - name: stop service + become: true + service: + name=rsyslog + state=stopped + + - name: remove files + become: true + file: + path="{{ item }}" + state=absent + with_items: + - /etc/rsyslog.d/21-filemonitoring-{{ name }}.conf + - /etc/rsyslog.d/21-rotated-{{ name }}.conf + - /etc/rsyslog.d/15-loggly-{{ name }}.conf diff --git a/ansible/oneOffScripts/reset-registry.sh.j2 b/ansible/oneOffScripts/reset-registry.sh.j2 new file mode 100644 index 00000000..49a2c12a --- /dev/null +++ b/ansible/oneOffScripts/reset-registry.sh.j2 @@ -0,0 +1,29 @@ +#!/bin/bash +# PLAYBOOK DOCUMENTATION: https://github.com/CodeNow/devops-scripts/wiki/Reset-Customer-Local-Registry +set -e + +export DOCK_INIT_BASE="/opt/runnable/dock-init" +export CONSUL_HOSTNAME="{{ consul_host_address }}" +export CONSUL_PORT="8500" +export ORG_ID="$(cat /etc/default/docker | perl -n -e'/org=(\d+)/ && print $1')" +export VAULT_TOKEN="{{ vault_token }}" + +REGISTRY_STATUS=$(curl localhost/debug/health) +if [[ "$REGISTRY_STATUS" == "404 page not found" ]]; then + echo "do not need to fix registry" + exit 0 +fi + +CURRENT_REGISTRY=$(docker ps | awk '/bin\/registry/{ print $1 }') +docker stop -t 5 "$CURRENT_REGISTRY" +docker rm "$CURRENT_REGISTRY" + +source "$DOCK_INIT_BASE/lib/container.sh" + +echo "starting container" +container::_start_registry_container + +sleep 5 + +echo "checking container" +curl localhost/debug/health diff --git a/ansible/oneOffScripts/resetRegistry.yml b/ansible/oneOffScripts/resetRegistry.yml new file mode 100644 index 00000000..49e5b73a --- /dev/null +++ b/ansible/oneOffScripts/resetRegistry.yml @@ -0,0 +1,25 @@ +--- +# DOCUMENTATION https://github.com/CodeNow/devops-scripts/wiki/Reset-Customer-Local-Registry +- hosts: docks + gather_facts: no + tasks: + - fail: msg="`vault_token` needs to be defined for this role" + when: vault_token is not defined + + - fail: msg="`consul_host_address` needs to be defined for this role" + when: consul_host_address is not defined + + - name: put script on machine + become: yes + template: + dest: /tmp/reset-registry.sh + src: reset-registry.sh.j2 + mode: 0550 + + - name: run script + become: yes + command: /tmp/reset-registry.sh + + - name: remove script + become: yes + command: shred -u /tmp/reset-registry.sh diff --git a/ansible/oneOffScripts/resetWeave.yml b/ansible/oneOffScripts/resetWeave.yml new file mode 100644 index 00000000..ef8ebe32 --- /dev/null +++ b/ansible/oneOffScripts/resetWeave.yml @@ -0,0 +1,6 @@ +- hosts: docks + gather_facts: no + tasks: + - name: reset weave + become: true + shell: 'sudo weave reset' diff --git a/ansible/oneOffScripts/restartServiceOnAllHosts.yml b/ansible/oneOffScripts/restartServiceOnAllHosts.yml new file mode 100644 index 00000000..f14b9d43 --- /dev/null +++ b/ansible/oneOffScripts/restartServiceOnAllHosts.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + gather_facts: no + tasks: + - fail: msg="`name` needs to be defined for this role" + when: name is not defined + + - name: restart {{ name }} + become: true + service: + name={{ name }} + state=restarted diff --git a/ansible/oneOffScripts/runSingleRole.yml b/ansible/oneOffScripts/runSingleRole.yml new file mode 100644 index 00000000..e8536afe --- /dev/null +++ b/ansible/oneOffScripts/runSingleRole.yml @@ -0,0 +1,5 @@ +--- +- hosts: "{{ host }}" + gather_facts: no + roles: + - { role: "../roles/{{ role }}" } diff --git a/ansible/oneOffScripts/set-ssh-key.yml b/ansible/oneOffScripts/set-ssh-key.yml new file mode 100644 index 00000000..1fa77239 --- /dev/null +++ b/ansible/oneOffScripts/set-ssh-key.yml @@ -0,0 +1,12 @@ +--- +- name: Rotate SSH Keys + hosts: all + gather_facts: no + vars: + - new_pub_key: "{{ pub_key_path }}.pub" + - new_priv_key: "{{ pub_key_path }}.pem" + + tasks: + - name: Make our new key exclusive + authorized_key: key="{{ lookup('file', new_pub_key) }}" + user=ubuntu state=present exclusive=yes diff --git a/ansible/optimus.yml b/ansible/optimus.yml new file mode 100644 index 00000000..491c8e1f --- /dev/null +++ b/ansible/optimus.yml @@ -0,0 +1,12 @@ +--- +- hosts: optimus + vars_files: + - group_vars/alpha-optimus.yml + roles: + - role: notify + rollbar_token: "{{ optimus_rollbar_token }}" + + - role: builder + + - role: k8-deployment + - role: k8-service diff --git a/ansible/palantiri.yml b/ansible/palantiri.yml new file mode 100644 index 00000000..4fd29f7f --- /dev/null +++ b/ansible/palantiri.yml @@ -0,0 +1,12 @@ +--- +- hosts: palantiri + vars_files: + - group_vars/alpha-palantiri.yml + roles: + - role: notify + rollbar_token: "{{ palantiri_rollbar_token }}" + + - role: builder + + - role: docker_client + - role: k8-deployment diff --git a/ansible/pheidi.yml b/ansible/pheidi.yml new file mode 100644 index 00000000..f7c2807f --- /dev/null +++ b/ansible/pheidi.yml @@ -0,0 +1,11 @@ +--- +- hosts: pheidi + vars_files: + - group_vars/alpha-pheidi.yml + roles: + - role: notify + rollbar_token: "{{ pheidi_rollbar_token }}" + + - role: builder + + - role: k8-deployment diff --git a/ansible/postgres.yml b/ansible/postgres.yml new file mode 100644 index 00000000..01f9a403 --- /dev/null +++ b/ansible/postgres.yml @@ -0,0 +1,11 @@ +--- +- hosts: postgres + vars_files: + - group_vars/alpha-postgres.yml + roles: + - role: notify + + - role: database + - role: postgres + - role: k8-deployment + - role: k8-service diff --git a/ansible/prometheus-alerts.yml b/ansible/prometheus-alerts.yml new file mode 100644 index 00000000..6f8126e3 --- /dev/null +++ b/ansible/prometheus-alerts.yml @@ -0,0 +1,11 @@ +--- +- hosts: prometheus + vars_files: + - group_vars/alpha-prometheus-alerts.yml + roles: + - role: notify + + - role: prometheus-alerts + + - role: k8-deployment + - role: k8-service diff --git a/ansible/prometheus.yml b/ansible/prometheus.yml new file mode 100644 index 00000000..4760e5e8 --- /dev/null +++ b/ansible/prometheus.yml @@ -0,0 +1,12 @@ +--- +- hosts: prometheus + vars_files: + - group_vars/alpha-prometheus.yml + roles: + - role: notify + + - role: prometheus + + - role: database + - role: k8-deployment + - role: k8-service diff --git a/ansible/rabbit-create-exchanges.yml b/ansible/rabbit-create-exchanges.yml new file mode 100644 index 00000000..8f0f894b --- /dev/null +++ b/ansible/rabbit-create-exchanges.yml @@ -0,0 +1,6 @@ +--- +- hosts: rabbitmq + vars_files: + - group_vars/alpha-rabbit-create-exchanges.yml + roles: + - role: k8-job diff --git a/ansible/rabbitmq.yml b/ansible/rabbitmq.yml new file mode 100644 index 00000000..fb87be41 --- /dev/null +++ b/ansible/rabbitmq.yml @@ -0,0 +1,11 @@ +--- +- hosts: rabbitmq + vars_files: + - group_vars/alpha-rabbitmq.yml + roles: + - role: notify + + - role: database + - role: rabbitmq + - role: k8-deployment + - role: k8-service diff --git a/ansible/redis.yml b/ansible/redis.yml new file mode 100644 index 00000000..fc9b52fc --- /dev/null +++ b/ansible/redis.yml @@ -0,0 +1,10 @@ +--- +- hosts: redis + vars_files: + - group_vars/alpha-redis.yml + roles: + - role: notify + + - role: database + - role: k8-deployment + - role: k8-service diff --git a/ansible/roles/add_image_tags/tasks/main.yml b/ansible/roles/add_image_tags/tasks/main.yml new file mode 100644 index 00000000..8ed70278 --- /dev/null +++ b/ansible/roles/add_image_tags/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: add additional tags + become: true + shell: docker tag "{{ container_image }}:{{ container_tag }}" "registry.runnable.com/runnable/{{ tag_name_item }}:{{ container_tag }}" + when: additional_tags is defined + with_items: "{{ additional_tags }}" + loop_control: + loop_var: tag_name_item diff --git a/ansible/roles/apt_update/tasks/main.yml b/ansible/roles/apt_update/tasks/main.yml new file mode 100644 index 00000000..36fd4b1f --- /dev/null +++ b/ansible/roles/apt_update/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: update cache for apt repository + become: true + apt: + update_cache=yes diff --git a/ansible/roles/apt_upgrade/tasks/main.yml b/ansible/roles/apt_upgrade/tasks/main.yml new file mode 100644 index 00000000..77cd827e --- /dev/null +++ b/ansible/roles/apt_upgrade/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: upgrade a server + become: true + apt: + upgrade=full diff --git a/ansible/roles/bash_aliases/tasks/main.yml b/ansible/roles/bash_aliases/tasks/main.yml new file mode 100644 index 00000000..cbd9b465 --- /dev/null +++ b/ansible/roles/bash_aliases/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: copy bash aliases to ubuntu + tags: [ loggly, bash_aliases ] + template: + src=dot_bash_aliases.sh.j2 + dest=/home/ubuntu/.bash_aliases + owner=ubuntu + group=ubuntu + mode=0700 diff --git a/ansible/roles/bash_aliases/templates/dot_bash_aliases.sh.j2 b/ansible/roles/bash_aliases/templates/dot_bash_aliases.sh.j2 new file mode 100644 index 00000000..84eb9c94 --- /dev/null +++ b/ansible/roles/bash_aliases/templates/dot_bash_aliases.sh.j2 @@ -0,0 +1,51 @@ +# Follows the logfile for a given app_name interpolating the datetime string into the logpath (/var/log/runnable/YYYY/MM/DD/HH/.log) +# Usage: logtail + +logpath() { + local app_name="$1" + local datetime=`date +%Y/%m/%d/%H` + local app_log_dir="{{ app_log_dir }}" + echo "${app_log_dir}/${datetime}/${app_name}.log" +} + +logtail() { + local app_name="$1" + tail -f "$(logpath ${app_name})" | bunyan +} + +# Follows the logfile for a given app_name interpolating the datetime string into the logpath (/var/log/runnable/YYYY/MM/DD/HH/.log) +# Usage: lograw +lograw() { + local app_name="$1" + tail -f "$(logpath ${app_name})" +} + +# Just display the last few lines of a log (2nd arg specifies # of lines or tail default if left blank) +# Usage: loglast [ <#_of_lines> ] +loglast() { + local app_name="$1" + local tailopts="" + # do not check contents of $2, just if exists, escape jinja2 keyword. + if [ 2 -eq "{{ '${#}' }}" ] ; then + tailopts="-${2}" + fi + tail "${tailopts}" "$(logpath ${app_name})" | bunyan +} + +# Follow a log, grep for "${regexp}" +# Usage: greplog +greplog() { + local app_name="$1" + local regexp="" + # again, not checking arg2, just making sure it exists, and espace jinja2 keyword. + if [ 2 -eq "{{ '${#}' }}" ] ; then + regexp="${2}" + fi + tail -f "$(logpath ${app_name})" | grep "${regexp}" | bunyan +} + +# Outputs contents of an npm start log for , if it exists, into a pager for reading. +# Usage: npmlog +npmlog() { + less "$(logpath ${app_name})" +} diff --git a/ansible/roles/bastion_sshd/tasks/main.yml b/ansible/roles/bastion_sshd/tasks/main.yml new file mode 100644 index 00000000..5c3c80d8 --- /dev/null +++ b/ansible/roles/bastion_sshd/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Install Bastion SSHd Configuration + tags: bastion-sshd + become: yes + template: + src=sshd_config.j2 + dest=/etc/ssh/sshd_config + owner=root + group=root + mode=444 + +- name: Restart SSHd + tags: bastion-sshd + become: yes + service: + name=ssh + state=restarted diff --git a/ansible/roles/bastion_sshd/templates/sshd_config.j2 b/ansible/roles/bastion_sshd/templates/sshd_config.j2 new file mode 100644 index 00000000..6620a031 --- /dev/null +++ b/ansible/roles/bastion_sshd/templates/sshd_config.j2 @@ -0,0 +1,90 @@ +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +# - always use 22 as a backup, block via SG +Port 22 +Port {{ bastion_sshd_port }} +# Use these options to restrict which interfaces/protocols sshd will bind to +#ListenAddress :: +#ListenAddress 0.0.0.0 +Protocol 2 +# HostKeys for protocol version 2 +HostKey /etc/ssh/ssh_host_rsa_key +HostKey /etc/ssh/ssh_host_dsa_key +HostKey /etc/ssh/ssh_host_ecdsa_key +HostKey /etc/ssh/ssh_host_ed25519_key +#Privilege Separation is turned on for security +UsePrivilegeSeparation yes + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel INFO + +# Authentication: +LoginGraceTime 120 +PermitRootLogin no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +#AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +#IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosGetAFSToken no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes + +# GSSAPI options +#GSSAPIAuthentication no +#GSSAPICleanupCredentials yes + +X11Forwarding yes +X11DisplayOffset 10 +PrintMotd no +PrintLastLog yes +TCPKeepAlive yes +#UseLogin no + +#MaxStartups 10:30:60 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +Subsystem sftp /usr/lib/openssh/sftp-server + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM yes diff --git a/ansible/roles/build_essential/tasks/main.yml b/ansible/roles/build_essential/tasks/main.yml new file mode 100644 index 00000000..852c4429 --- /dev/null +++ b/ansible/roles/build_essential/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: install build essentials + become: yes + action: apt + pkg=build-essential + state=latest + update_cache=yes + cache_valid_time=604800 diff --git a/ansible/roles/build_with_dockerfile/defaults/main.yml b/ansible/roles/build_with_dockerfile/defaults/main.yml new file mode 100644 index 00000000..90b7599d --- /dev/null +++ b/ansible/roles/build_with_dockerfile/defaults/main.yml @@ -0,0 +1,2 @@ +--- +build_dir: /opts/builds/docker_build diff --git a/ansible/roles/build_with_dockerfile/tasks/main.yml b/ansible/roles/build_with_dockerfile/tasks/main.yml new file mode 100644 index 00000000..f2e76689 --- /dev/null +++ b/ansible/roles/build_with_dockerfile/tasks/main.yml @@ -0,0 +1,49 @@ +--- +- name: Ensure Tag Deploy For Prod + tags: deploy, build + when: node_env=="production-delta" and not git_branch | match("^v([0-9]+)\.([0-9]+)\.([0-9]+)$") + fail: msg="only tag can be deployed on prod not {{ git_branch }}" + +- name: create build folder + delegate_to: "{{ builder }}" + tags: deploy, build + become: true + file: + path: "{{ build_dir }}/{{ name }}" + state: directory + +- name: pull the git repository + delegate_to: "{{ builder }}" + tags: deploy, build + become: true + git: + repo: "{{ repo }}" + dest: "{{ build_dir }}/{{ name }}/repo" + version: "{{ git_branch }}" + update: yes + accept_hostkey: yes + force: yes + +- name: build docker image and tag + delegate_to: "{{ builder }}" + tags: deploy, build + become: yes + command: docker build {{ build_args | default("") }} --tag="{{ container_image }}:{{ container_tag }}" "{{ build_dir }}/{{ name }}/repo" + +- name: login to registry + delegate_to: "{{ builder }}" + tags: deploy, build + become: yes + command: docker login -u {{ registry_username }} -p {{ registry_token }} -e "info@runnable.com" {{ registry_host }} + +- name: push docker image + delegate_to: "{{ builder }}" + tags: deploy, build + become: yes + command: docker push {{ container_image }}:{{ container_tag }} + +- name: logout of registry + delegate_to: "{{ builder }}" + tags: deploy, build + become: yes + command: docker logout {{ registry_host }} diff --git a/ansible/roles/builder/defaults/main.yml b/ansible/roles/builder/defaults/main.yml new file mode 100644 index 00000000..ad8e029c --- /dev/null +++ b/ansible/roles/builder/defaults/main.yml @@ -0,0 +1,3 @@ +--- +build_dir: /opts/builds/docker_build +npm_start_command: start diff --git a/ansible/roles/builder/tasks/main.yml b/ansible/roles/builder/tasks/main.yml new file mode 100644 index 00000000..21d242b8 --- /dev/null +++ b/ansible/roles/builder/tasks/main.yml @@ -0,0 +1,196 @@ +--- +# commands to build an image +# +# commands to build an image +- name: check if image is a tag and check environment + tags: deploy, build + set_fact: + is_image_tag: '{{ git_branch | match("^v([0-9]+)\.([0-9]+)\.([0-9]+)$") }}' + is_production_delta: '{{ node_env is defined and node_env=="production-delta" }}' + +- name: Ensure Tag Deploy For Prod + tags: deploy, build + when: (is_production_delta and not is_image_tag) + fail: msg="only tag can be deployed on prod not {{ container_tag }}" + +- name: set if is image tag and production delta + tags: deploy, build + set_fact: + is_prod_and_tag: "{{ is_image_tag and is_production_delta }}" + +- name: query registry for tag + tags: deploy, build + uri: + # Overwrite the name of the image repository (`runnable-angular`, big-poppa-http) with `repository_name` + url: https://quay.io/api/v1/repository/runnable/{{ repository_name | default(name) }}/tag/?limit=1&specificTag={{ container_tag }} + method: GET + headers: + Authorization: 'Bearer {{ quay_api_token }}' + register: currently_built_tags + +- name: set number of images built + tags: deploy, build + set_fact: + # Quay IO includes deleted images in this query through the `end_ts` property + no_images_found: "{{ currently_built_tags|json_query('json.tags')|length == 0 }}" + +- name: set number of images built + tags: deploy, build + set_fact: + # Only build the image if no images are found or image is not a tag + # Use built tag images in staging/gamma unless forced + build_image: '{{ no_images_found or not is_image_tag or (force_image_push is defined and force_image_push) }}' + +- name: create build folder + delegate_to: "{{ builder }}" + tags: deploy, build + become: true + when: build_image + file: + path: "{{ build_dir }}/{{ name }}" + state: directory + +- name: pull the git repository + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: true + git: + repo: "{{ repo }}" + dest: "{{ build_dir }}/{{ name }}/repo" + version: "{{ container_tag }}" + update: yes + accept_hostkey: yes + force: yes + +- name: get new tags from remote + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: true + shell: "git fetch --tags" + args: + chdir: "{{ build_dir }}/{{ name }}/repo" + +- name: get latest tag name + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: true + shell: "git describe --tags `git rev-list --tags --max-count=1`" + args: + chdir: "{{ build_dir }}/{{ name }}/repo" + register: latest_tag + +- name: ensure latest tag is deployed + delegate_to: "{{ builder }}" + tags: deploy, build + fail: msg="Cannot deploy {{ container_tag}} because latest is {{latest_tag.stdout}}. Bypass with `-t i_am_deploying_an_old_tag`" + when: build_image and is_production_delta and (latest_tag.stdout != container_tag and i_am_deploying_an_old_tag is not defined) + +- name: copy dockerfile to build folder + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: true + template: + src: "{{ dockerfile }}" + dest: "{{ build_dir }}/{{ name }}" + +- name: copy .dockerignore file into build folder + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: true + template: + src: ".dockerignore" + dest: "{{ build_dir }}/{{ name }}" + +- name: copy secrets into build dir + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image and inject_ca + become: true + copy: + src=./{{ docker_client_root }}/{{ file_name_item }} + dest={{ build_dir }}/{{ name }} + owner=ubuntu + group=ubuntu + mode=0700 + with_items: + - id_rsa + - known_hosts + loop_control: + loop_var: file_name_item + +- name: build docker image and tag + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: yes + command: docker build {{ build_args | default("") }} --tag="{{ container_image }}:{{ container_tag }}" "{{ build_dir }}/{{ name }}" + +- name: remove secrets from build dir + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image and inject_ca + become: true + file: + path: "{{ build_dir }}/{{ name }}/{{ file_name_item }}" + state: absent + with_items: + - id_rsa + - known_hosts + loop_control: + loop_var: file_name_item + +- name: get image id for newly created image + delegate_to: "{{ builder }}" + tags: deploy, build + become: true + when: build_image + shell: docker images -a | grep "{{ container_image }}" | grep "{{ container_tag }}" | awk '{print $3}' + register: unsquahed_image_id + +- name: squash newly created image (Expect this to take some time [5 min. approx]) + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image and inject_ca + become: true + shell: docker-squash "{{ unsquahed_image_id.stdout_lines[0] }}" -t "{{ container_image }}:{{ container_tag }}" + +- name: get number of layers + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image and inject_ca + become: true + shell: docker history "{{ container_image }}:{{ container_tag }}" | wc -l + register: number_of_layers + +- name: Ensure only squashed images are pushed + delegate_to: "{{ builder }}" + tags: deploy, build + # One line for layer and one line for headers + when: build_image and inject_ca and (number_of_layers is defined and number_of_layers.stdout != "2") + fail: msg="Only squashed layers can be pushed to registry" + +- name: login to registry + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: yes + command: docker login -u {{ registry_username }} -p {{ registry_token }} -e "info@runnable.com" {{ registry_host }} + +- name: push docker image + delegate_to: "{{ builder }}" + tags: deploy, build + become: yes + when: build_image + command: docker push {{ container_image }}:{{ container_tag }} + +- name: logout of registry + delegate_to: "{{ builder }}" + tags: deploy, build + when: build_image + become: yes + command: docker logout {{ registry_host }} diff --git a/ansible/roles/builder/templates/.dockerignore b/ansible/roles/builder/templates/.dockerignore new file mode 100644 index 00000000..f3b64113 --- /dev/null +++ b/ansible/roles/builder/templates/.dockerignore @@ -0,0 +1 @@ +**/.git diff --git a/ansible/roles/builder/templates/basic_node/Dockerfile b/ansible/roles/builder/templates/basic_node/Dockerfile new file mode 100644 index 00000000..14bc94fa --- /dev/null +++ b/ansible/roles/builder/templates/basic_node/Dockerfile @@ -0,0 +1,61 @@ +FROM node:{{ node_version }} + +{% if npm_version is defined %} +RUN npm install npm@{{ npm_version }} -g +{% endif %} + +{% if hosted_ports is defined %} +# Expose port to Host +EXPOSE {% for hosted_port in hosted_ports %}{{ hosted_port }} {% endfor %} +{% endif %} + +{% if dockerfile_enviroment is defined %} +# Envs +{% for env in dockerfile_enviroment %} +ENV {{ env }} +{% endfor %} +{% endif %} + +WORKDIR /{{ name }} +{% if dockerfile_pre_install_commands is defined %} +{% for command in dockerfile_pre_install_commands %} +RUN {{ command }} +{% endfor %} +{% endif %} + +{% if inject_ca %} +RUN mkdir /root/.ssh/ +ADD id_rsa /root/.ssh/id_rsa +ADD known_hosts /root/.ssh/known_hosts +RUN chmod 600 /root/.ssh/id_rsa +{% endif %} + +# Add package.json from the current build context (`.` is the repo) second +ADD ./repo/package.json /{{ name }}/package.json + +# Add shrinkwrap from the current build context (`.` is the repo) first +# If we change a non first-level dependency, this will break cache as expected +{% if has_shrinkwrap is defined and has_shrinkwrap %} +ADD ./repo/npm-shrinkwrap.json /{{ name }}/npm-shrinkwrap.json +{% endif %} + +# install, should will skip if no package.json change +WORKDIR /{{ name }} +RUN npm install --production + +# move the current build context (`.` is the repo) to /{{ name }} +ADD ./repo /{{ name }} + +{% if dockerfile_post_install_commands is defined %} +{% for command in dockerfile_post_install_commands %} +RUN {{ command }} +{% endfor %} +{% endif %} + +{% if inject_ca %} +RUN rm /root/.ssh/id_rsa +RUN rm /root/.ssh/known_hosts +{% endif %} + +# Define default command. +CMD ulimit -c unlimited && /usr/local/bin/npm {{ npm_start_command | default('start') }} diff --git a/ansible/roles/builder/templates/node_base/Dockerfile b/ansible/roles/builder/templates/node_base/Dockerfile new file mode 100644 index 00000000..3dbe7bb4 --- /dev/null +++ b/ansible/roles/builder/templates/node_base/Dockerfile @@ -0,0 +1,28 @@ +# +# node_base Dockerfile +# +# Pull base image. +FROM {{ registry_address }}/runnable/base:latest + +# Node.js @ 0.10.28 +# npm @ 2.1.18 +# curl +# lsof +# sass +# ruby + +WORKDIR / + +ENV PATH=./node_modules/.bin:$PATH +# HACK FOR SASS +ENV LC_ALL en_US.UTF-8 + +RUN apt-get update && \ + apt-get install -y curl && \ + curl -sL https://deb.nodesource.com/setup | sudo bash - && \ + apt-get install -y nodejs lsof ruby && \ + locale-gen en_US.UTF-8 && \ + gem install sass && \ + npm install -g n && \ + n 0.10.28 && \ + npm install -g npm@2.1.18 diff --git a/ansible/roles/consul-services/tasks/main.yml b/ansible/roles/consul-services/tasks/main.yml new file mode 100644 index 00000000..9f0f2a32 --- /dev/null +++ b/ansible/roles/consul-services/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: make /etc/consul.d folder + become: true + file: + path=/etc/consul.d + state=directory + +- name: remove all current configs + become: true + shell: rm -f /etc/consul.d/*.json + +- name: put service files in place + become: true + run_once: true + template: + dest=/etc/consul.d/{{ item.name }}.json + src=service.json + with_items: + - name: 'datadog' + host_address: '{{ datadog_host_address }}' + tags: ['master'] + port: '{{ datadog_port }}' + - name: 'rabbitmq' + host_address: '{{ rabbit_host_address }}' + tags: ['master'] + port: '{{ rabbit_port }}' + - name: 'redis' + host_address: '{{ redis_host_address }}' + tags: ['master'] + port: '{{ redis_port }}' + +- name: send consul SIGUP to reload services + become: true + shell: pkill --signal SIGHUP consul diff --git a/ansible/roles/consul-services/templates/service.json b/ansible/roles/consul-services/templates/service.json new file mode 100644 index 00000000..c875e3c5 --- /dev/null +++ b/ansible/roles/consul-services/templates/service.json @@ -0,0 +1,8 @@ +{ + "service": { + "name": "{{ item.name }}", + "tags": [ "{{ item.tags | join('","') }}" ], + "address": "{{ item.host_address }}", + "port": {{ item.port }} + } +} diff --git a/ansible/roles/consul/meta/main.yml b/ansible/roles/consul/meta/main.yml new file mode 100644 index 00000000..bb817eb0 --- /dev/null +++ b/ansible/roles/consul/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: tls-server-cert } diff --git a/ansible/roles/consul/tasks/main.yml b/ansible/roles/consul/tasks/main.yml new file mode 100644 index 00000000..d759b59c --- /dev/null +++ b/ansible/roles/consul/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: create configuration directory + become: yes + file: + path: /opt/runnable/consul + state: directory + recurse: yes + +- name: create server tls certificate directory + become: yes + file: + path: /opt/consul/server + state: directory + recurse: yes + +- name: install server certificates (3) + become: yes + copy: + content: "{{ item.content }}" + dest: /opt/consul/server/{{ item.file }} + mode: 0400 + owner: root + group: root + with_items: + - file: ca.pem + content: "{{ new_certs.data.issuing_ca }}" + - file: cert.pem + content: "{{ new_certs.data.certificate }}" + - file: key.pem + content: "{{ new_certs.data.private_key }}" + +- name: copy vault config + become: yes + template: + src: consul.json.j2 + dest: /opt/runnable/consul/consul.json + +- name: add datadog monitoring + become: true + tags: datadog + template: + src: datadog-consul.yaml.j2 + dest: /etc/dd-agent/conf.d/consul.yaml + mode: 0444 + owner: root + group: root + +- name: restart datadog agent + become: true + tags: datadog + service: + name: datadog-agent + state: restarted diff --git a/ansible/roles/consul/templates/consul.json.j2 b/ansible/roles/consul/templates/consul.json.j2 new file mode 100644 index 00000000..5516b137 --- /dev/null +++ b/ansible/roles/consul/templates/consul.json.j2 @@ -0,0 +1,32 @@ +{ + "advertise_addr": "{{ ansible_default_ipv4.address }}", + "bind_addr": "0.0.0.0", + {% if consul_host_address == ansible_default_ipv4.address %} + "bootstrap_expect": {{ groups['consul'] | length }}, + {% endif %} + "ca_file": "/opt/consul/server/ca.pem", + "cert_file": "/opt/consul/server/cert.pem", + "client_addr": "0.0.0.0", + "data_dir": "/data", + "key_file": "/opt/consul/server/key.pem", + "log_level": "info", + "node_name": "{{ inventory_hostname }}", + "ports": { + "https": {{ consul_https_port }} + }, + "recursors": [ + "8.8.8.8" + ], + "dogstatsd_addr": "{{ ansible_default_ipv4.address }}:{{ datadog_port }}", + {% if consul_host_address != ansible_default_ipv4.address %} + "retry_join": [ + "{{ consul_host_address }}" + ], + {% endif %} + "server": true, + {% if consul_host_address == ansible_default_ipv4.address %} + "ui_dir": "/ui", + {% endif %} + "verify_incoming": true, + "verify_outgoing": true +} diff --git a/ansible/roles/consul/templates/datadog-consul.yaml.j2 b/ansible/roles/consul/templates/datadog-consul.yaml.j2 new file mode 100644 index 00000000..6ac41458 --- /dev/null +++ b/ansible/roles/consul/templates/datadog-consul.yaml.j2 @@ -0,0 +1,22 @@ +init_config: + +instances: + # Where your Consul HTTP Server Lives + - url: http://{{ ansible_default_ipv4.address }}:8500 + + # Whether to perform checks against the Consul service Catalog + # catalog_checks: yes + + # Whether to enable new leader checks from this agent + # Note: if this is set on multiple agents in the same cluster + # you will receive one event per leader change per agent + new_leader_checks: {% if consul_host_address == ansible_default_ipv4.address %}yes{% else %}no{% endif %} + + # Services to restrict catalog querying to + # The default settings query up to 50 services. So if you have more than + # this many in your Consul service catalog, you will want to fill in the + # whitelist + # service_whitelist: + # - zookeeper + # - haproxy + # - redis diff --git a/ansible/roles/consul_value/tasks/main.yml b/ansible/roles/consul_value/tasks/main.yml new file mode 100644 index 00000000..e92441f5 --- /dev/null +++ b/ansible/roles/consul_value/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Debug role + when: debug_info is defined + debug: + msg="Consul Server {{ consul_host_address }}:{{ consul_api_port }}" + +- name: make sure httplib2 is installed + run_once: true + become: true + apt: + package=python-httplib2 + state=present + update_cache=yes + cache_valid_time=604800 + +- name: put values into consul + tags: deploy, consul_values + run_once: true + when: consul_host_address is defined and consul_api_port is defined and consul_values is defined and dock is not defined + uri: + method=PUT + url=http://{{ consul_host_address }}:{{ consul_api_port }}/v1/kv/{{ item.key }} + body="{{ item.value }}" + with_items: "{{ consul_values }}" diff --git a/ansible/roles/container_kill_start/README.md b/ansible/roles/container_kill_start/README.md new file mode 100644 index 00000000..85ed4801 --- /dev/null +++ b/ansible/roles/container_kill_start/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to Install base_centos deps + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/container_kill_start/defaults/main.yml b/ansible/roles/container_kill_start/defaults/main.yml new file mode 100644 index 00000000..9701f0e1 --- /dev/null +++ b/ansible/roles/container_kill_start/defaults/main.yml @@ -0,0 +1,2 @@ +--- +container_tag: latest diff --git a/ansible/roles/container_kill_start/files/findTagRunning.sh b/ansible/roles/container_kill_start/files/findTagRunning.sh new file mode 100755 index 00000000..1cfaee40 --- /dev/null +++ b/ansible/roles/container_kill_start/files/findTagRunning.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +IMAGE_NAME="$1" +CONTAINERS=`docker ps -a | grep -v '^CONTAINER' | awk '{print $1}'` + +if [ "" = "${CONTAINERS}" ] ; then + exit 0 +else + for container in ${CONTAINERS} ; do + docker inspect "${container}" 2>/dev/null| grep -q '"Image": "'"${IMAGE_NAME}": > /dev/null 2>&1 + if [ ${?} -eq 0 ] ; then + if [ -z "${RUNNING_CONTAINERS}" ] ; then + RUNNING_CONTAINERS="${container}" + else + RUNNING_CONTAINERS="${RUNNING_CONTAINERS} ${container}" + fi + fi + done +fi + +if [ ! -z "${RUNNING_CONTAINERS}" ] ; then + echo "${RUNNING_CONTAINERS//['\t\r\n']}" +fi diff --git a/ansible/roles/container_kill_start/files/normalize.sh b/ansible/roles/container_kill_start/files/normalize.sh new file mode 100755 index 00000000..d1a68a0f --- /dev/null +++ b/ansible/roles/container_kill_start/files/normalize.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# $1 should be image id to keep $2 = image name +for CONT in `sudo docker ps --no-trunc -q ` +do + IMAGE_NAME=`sudo docker inspect $CONT | grep Image | grep $2` + if [[ $IMAGE_NAME ]]; then + IMAGE_ID=`sudo docker inspect $CONT | grep Image | grep -v $2 | awk -F\" '{print $4}'` + if [[ "$IMAGE_ID" != "$1" ]]; then + echo "kill" + sudo docker kill $CONT + sudo docker rm $CONT + fi + fi +done \ No newline at end of file diff --git a/ansible/roles/container_kill_start/handlers/main.yml b/ansible/roles/container_kill_start/handlers/main.yml new file mode 100644 index 00000000..5572ad47 --- /dev/null +++ b/ansible/roles/container_kill_start/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: get new container ports + tags: deploy + when: hosted_ports is defined + become: yes + shell: docker port {{new_container_id.stdout}} {{ hosted_ports[0] }} | awk --field-separator ':' '{print $2}' + register: container_ports + +# this assumes only one container is running, ever +- name: update redis key + tags: deploy + when: container_ports is defined and is_redis_update_required is defined + become: yes + command: docker run --rm redis redis-cli -h {{redis_host_address}} lset {{redis_key}} 1 {{hosted_protocol | default('http') }}://{{ansible_default_ipv4.address}}:{{container_ports.stdout}} diff --git a/ansible/roles/container_kill_start/meta/main.yml b/ansible/roles/container_kill_start/meta/main.yml new file mode 100644 index 00000000..d24b245d --- /dev/null +++ b/ansible/roles/container_kill_start/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: docker, tags: docker } + - { role: loggly } diff --git a/ansible/roles/container_kill_start/tasks/main.yml b/ansible/roles/container_kill_start/tasks/main.yml new file mode 100644 index 00000000..aba821e9 --- /dev/null +++ b/ansible/roles/container_kill_start/tasks/main.yml @@ -0,0 +1,85 @@ +- name: look for stopped {{ container_image }} containers + tags: deploy + become: yes + shell: > + docker ps -a | + grep Exited | + grep {{ container_image }} | + awk '{print $1}' + register: stopped_containers + changed_when: stopped_containers.stdout != '' + +- name: removed stopped {{ container_image }} containers + tags: deploy + become: yes + when: stopped_containers.changed + shell: docker rm {{ item }} + with_items: "{{ stopped_containers.stdout_lines }}" + +# get current running container with this image +- name: look for running containers running {{ container_image }} + tags: deploy + become: true + script: findTagRunning.sh {{ container_image }} + register: old_containers_id + changed_when: old_containers_id.stdout | length > 4 + +# get latest image +# tag will default to latest +- name: pull down docker image {{ container_image }}:{{ container_tag }} + tags: deploy + become: yes + command: docker pull {{ container_image }}:{{ container_tag }} + when: not do_not_push + +# get id of new image +- name: get id of the docker image + tags: deploy + become: yes + shell: > + docker images + --no-trunc | + grep {{ container_image }}.*{{ container_tag }} | + awk '{print $3}' + register: new_image_id + +# stop the old containers that were running this previously +- name: stop old containers + tags: deploy + when: old_containers_id.changed + become: yes + command: docker stop -t {{ stop_time }} {{ old_containers_id.stdout }} + +# remove the old containers that were running previously +- name: remove old containers + tags: deploy + when: old_containers_id.changed + become: yes + command: docker rm {{ old_containers_id.stdout }} + +- name: pause to allow for any odd conditions in system caching tables + tags: deploy + when: pause_length_minutes is defined and old_containers_id.changed + pause: minutes={{ pause_length_minutes }} + +# start our new container with options and args +- name: start container + tags: deploy + become: yes + command: | + docker run \ + --memory {{ memory_hard_limit | default ( "1g" ) }} \ + --log-driver={{ log_driver }} \ + {% if log_driver == "syslog" %} + --log-opt syslog-facility={{ log_facility }} \ + --log-opt tag="{{ log_tag | default ( name ) }}" \ + {% endif %} + -v {{ app_log_dir }}:{{ app_log_dir }}:rw \ + --restart={{ restart_policy | default('always') }} \ + {{ container_run_opts | trim }} \ + {{ container_image }}:{{ container_tag }} \ + {{ container_run_args | trim }} + register: new_container_id + notify: + - get new container ports + - update redis key diff --git a/ansible/roles/container_restart/handlers/main.yml b/ansible/roles/container_restart/handlers/main.yml new file mode 100644 index 00000000..c576eed5 --- /dev/null +++ b/ansible/roles/container_restart/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: clean up stopped containers + tags: [ deploy ] + become: true + shell: docker ps --filter status=exited -q | xargs docker rm diff --git a/ansible/roles/container_restart/meta/main.yml b/ansible/roles/container_restart/meta/main.yml new file mode 100644 index 00000000..cb7d8e04 --- /dev/null +++ b/ansible/roles/container_restart/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/container_restart/tasks/main.yml b/ansible/roles/container_restart/tasks/main.yml new file mode 100644 index 00000000..200e57fe --- /dev/null +++ b/ansible/roles/container_restart/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: look for current container + become: yes + shell: docker ps | grep {{ docker_image }} || true + register: running_containers + changed_when: "running_containers.stdout_lines | length > 0" + tags: [ deploy ] + +- name: get version of running container + when: running_containers.changed + set_fact: + running_container_version: "{{ running_containers.stdout_lines[0] | split_regex('\\s+') | split_get_index(1) | split_regex(':') | split_get_index(1) }}" + running_container_id: "{{ running_containers.stdout_lines[0] | split_regex('\\s+') | split_get_index(0) }}" + tags: [ deploy ] + +- name: restart container if it is the same version + when: running_containers.changed and running_container_version == docker_image_version + become: yes + shell: docker {{ docker_restart_command | default('restart') }} {{ running_container_id }} + register: restarted_container + changed_when: "restarted_container.rc == 0" + tags: [ deploy ] + +- name: stop old container + when: force_stop is defined and running_container_id != '' or (running_containers.changed and running_container_version != docker_image_version) + become: yes + shell: docker stop {{ running_container_id }} + register: stopped_container + tags: [ deploy ] + notify: + - clean up stopped containers + +- name: start new container + when: force_stop is defined or (stopped_container.skipped and not running_containers.changed) + tags: [ deploy ] + become: yes + shell: docker run {{ docker_container_run_opts | trim }} {{ docker_image }}:{{ docker_image_version }} diff --git a/ansible/roles/container_start/README.md b/ansible/roles/container_start/README.md new file mode 100644 index 00000000..85ed4801 --- /dev/null +++ b/ansible/roles/container_start/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to Install base_centos deps + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/container_start/files/findTagRunning.sh b/ansible/roles/container_start/files/findTagRunning.sh new file mode 100755 index 00000000..5f7f98a0 --- /dev/null +++ b/ansible/roles/container_start/files/findTagRunning.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +IMAGE_NAME="$1" +CONTAINERS=`docker ps | grep -v '^CONTAINER' | awk '{print $1}'` + +if [ "" = "${CONTAINERS}" ] ; then + exit 0 +else + for container in ${CONTAINERS} ; do + docker inspect "${container}" | grep -q '"Image": "'"${IMAGE_NAME}": + if [ ${?} -eq 0 ] ; then + if [ -z "${RUNNING_CONTAINERS}" ] ; then + RUNNING_CONTAINERS="${container}" + else + RUNNING_CONTAINERS="${RUNNING_CONTAINERS} ${container}" + fi + fi + done +fi + +if [ ! -z "${RUNNING_CONTAINERS}" ] ; then + echo "${RUNNING_CONTAINERS}" +fi diff --git a/ansible/roles/container_start/files/normalize.sh b/ansible/roles/container_start/files/normalize.sh new file mode 100755 index 00000000..ac5521c4 --- /dev/null +++ b/ansible/roles/container_start/files/normalize.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# $1 should be image id to keep $2 = image name +echo "ARGS" $1 $2 +for CONT in `sudo docker ps --no-trunc -q ` +do + IMAGE_NAME=`sudo docker inspect $CONT | grep Image | grep $2` + if [[ $IMAGE_NAME ]]; then + echo "image found $IMAGE_NAME" + IMAGE_ID=`sudo docker inspect $CONT | grep Image | grep -v $2 | awk -F\" '{print $4}'` + if [[ "$IMAGE_ID" != "$1" ]]; then + echo "stoping $IMAGE_ID does not match $1" + sudo docker kill $CONT + fi + fi +done \ No newline at end of file diff --git a/ansible/roles/container_start/handlers/main.yml b/ansible/roles/container_start/handlers/main.yml new file mode 100644 index 00000000..ceb3ba0c --- /dev/null +++ b/ansible/roles/container_start/handlers/main.yml @@ -0,0 +1,31 @@ +--- +- name: get new container ports + delegate_to: "{{ builder }}" + tags: deploy + when: hosted_ports is defined + become: yes + shell: docker port {{ item }} {{ hosted_ports[0] }} | awk --field-separator ':' '{ print $2 }' + register: container_ports + with_items: "{{ new_container_ids.stdout_lines }}" + +- name: remove all hosts from redis key + delegate_to: "{{ builder }}" + tags: deploy + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} ltrim {{ redis_key }} 0 0 + when: is_redis_update_required is defined and container_ports is defined + +- name: put new hosts on redis keys + delegate_to: "{{ builder }}" + tags: deploy + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} rpush {{ redis_key }} {{ hosted_protocol | default('http') }}://{{ ansible_default_ipv4.address }}:{{ item.stdout }} + when: is_redis_update_required is defined and container_ports is defined + with_items: "{{ container_ports.results }}" + +- name: stop old containers + delegate_to: "{{ builder }}" + tags: deploy + become: yes + command: docker stop --time={{ stop_time }} {{ item }} + with_items: "{{ old_containers_ids.stdout_lines }}" diff --git a/ansible/roles/container_start/tasks/main.yml b/ansible/roles/container_start/tasks/main.yml new file mode 100644 index 00000000..27628bc6 --- /dev/null +++ b/ansible/roles/container_start/tasks/main.yml @@ -0,0 +1,82 @@ +--- +# get current running container with this image +- name: "look for running containers running {{ container_image }}" + delegate_to: "{{ builder }}" + tags: deploy + become: true + script: findTagRunning.sh {{ container_image }} + register: old_containers_ids + changed_when: old_containers_ids.stdout != '' + +- name: get latest images + delegate_to: "{{ builder }}" + tags: deploy + when: not do_not_push + become: yes + command: docker pull {{ container_image }}:{{ container_tag }} + +- name: get id of latest image + delegate_to: "{{ builder }}" + tags: deploy + become: yes + shell: > + docker images --no-trunc | + grep {{ container_image }}.*{{ container_tag }} | + awk '{print $3}' + register: new_image_id + +- name: default number of containers + delegate_to: "{{ builder }}" + tags: deploy + when: number_of_containers is undefined + set_fact: + number_of_containers: 1 + +- name: print number of contaienrs + delegate_to: "{{ builder }}" + tags: deploy + debug: + msg: starting this many containers -- {{ number_of_containers }} + +- name: login to registry + delegate_to: "{{ builder }}" + tags: deploy + become: yes + command: docker login -u {{ registry_username }} -p {{ registry_token }} -e "info@runnable.com" {{ registry_host }} + +- name: start new container + delegate_to: "{{ builder }}" + tags: deploy + become: yes + shell: | + for i in {{ '{' }}1..{{ number_of_containers }}{{ '}' }}; do + docker run \ + {% if memory_hard_limit is defined %} + --memory {{ memory_hard_limit }} \ + {% endif %} + --log-driver={{ log_driver }} \ + {% if log_driver == "syslog" %} + --log-opt syslog-facility={{ log_facility }} \ + --log-opt tag="{{ log_tag | default ( name ) }}" \ + {% endif %} + -v {{ app_log_dir }}:{{ app_log_dir }}:rw \ + -v /dev/log:/dev/log:rw \ + --restart={{ restart_policy | default('no') }} \ + {{ container_run_opts | trim }} \ + {{ container_image }}:{{ container_tag }} \ + {{ container_run_args | trim }} + done + args: + executable: /bin/bash + register: new_container_ids + notify: + - get new container ports + - remove all hosts from redis key + - put new hosts on redis keys + - stop old containers + +- name: logout of registry + delegate_to: "{{ builder }}" + tags: deploy + become: yes + command: docker logout {{ registry_host }} diff --git a/ansible/roles/content-domain-proxy/tasks/main.yml b/ansible/roles/content-domain-proxy/tasks/main.yml new file mode 100644 index 00000000..ec1b0ca4 --- /dev/null +++ b/ansible/roles/content-domain-proxy/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: create chained cert + tags: [ certs ] + shell: | + cat {{ domains_root }}/{{ user_content_domain }}/cert.pem > {{ domains_root }}/{{ user_content_domain }}/chained.pem + echo "" >> {{ domains_root }}/{{ user_content_domain }}/chained.pem # Add newline + cat {{ domains_root }}/{{ user_content_domain }}/ca.pem >> {{ domains_root }}/{{ user_content_domain }}/chained.pem + +- name: create dhparam.pem + tags: [ certs ] + command: openssl dhparam -out {{ domains_root }}/{{ user_content_domain }}/dhparam.pem 2048 + +- set_fact: + ca_data: "{{ lookup('file', '{{ domains_root }}/{{ user_content_domain }}/ca.pem') }}" +- set_fact: + cert_data: "{{ lookup('file', '{{ domains_root }}/{{ user_content_domain }}/cert.pem') }}" +- set_fact: + key_data: "{{ lookup('file', '{{ domains_root }}/{{ user_content_domain }}/key.pem') }}" +- set_fact: + chained_data: "{{ lookup('file', '{{ domains_root }}/{{ user_content_domain }}/chained.pem') }}" +- set_fact: + dhparam_data: "{{ lookup('file', '{{ domains_root }}/{{ user_content_domain }}/dhparam.pem') }}" + +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create cert config map + tags: [ configure_proxy, configure_files ] + template: + src: certs.yml + dest: "{{ config_maps_path }}/{{ name }}-certs.yml" + +- name: create proxy template + tags: [ configure_proxy, configure_files ] + template: + src: proxy-nginx.conf + dest: "{{ config_maps_path }}/{{ name }}-base-config.yml" + +- name: create navi template + tags: [ configure_proxy, configure_files ] + template: + src: navi.tmpl + dest: "{{ config_maps_path }}/{{ name }}-navi-config.yml" diff --git a/ansible/roles/content-domain-proxy/templates/certs.yml b/ansible/roles/content-domain-proxy/templates/certs.yml new file mode 100644 index 00000000..7baa880f --- /dev/null +++ b/ansible/roles/content-domain-proxy/templates/certs.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-{{ user_content_domain |replace('.', '-') }}-certs +data: + ca.pem: | + {{ ca_data | indent(4) }} + cert.pem: | + {{ cert_data | indent(4) }} + chained.pem: | + {{ chained_data | indent(4) }} + dhparam.pem: | + {{ dhparam_data | indent(4) }} + key.pem: | + {{ key_data | indent(4) }} diff --git a/ansible/roles/content-domain-proxy/templates/navi.tmpl b/ansible/roles/content-domain-proxy/templates/navi.tmpl new file mode 100644 index 00000000..f2f7725d --- /dev/null +++ b/ansible/roles/content-domain-proxy/templates/navi.tmpl @@ -0,0 +1,97 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-navi-conf +data: + status.conf: | + server { + listen 80; + server_name status.{{ user_content_domain }}; + + location /nginx_status { + access_log off; + stub_status on; + } + } + + + navi.conf: | + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + + upstream navi { + server navi:{{ navi_http_port }} max_fails=0 fail_timeout=1s; + } + + server { + listen 80; +{% for port in navi_extra_ports %} + listen {{ port }}; +{% endfor %} + client_max_body_size 200m; + server_name {{ user_content_domain }}; + access_log /var/log/nginx/navi.access.log; + + location / { + proxy_pass http://navi; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + } + } + + server { + listen 443 ssl; + client_max_body_size 200m; + server_name {{ user_content_domain }}; + access_log /var/log/nginx/navi.ssl.access.log; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ user_content_domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ user_content_domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ user_content_domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ user_content_domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + location / { + proxy_pass http://navi; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + } + } diff --git a/ansible/roles/content-domain-proxy/templates/proxy-nginx.conf b/ansible/roles/content-domain-proxy/templates/proxy-nginx.conf new file mode 100644 index 00000000..24cf96cf --- /dev/null +++ b/ansible/roles/content-domain-proxy/templates/proxy-nginx.conf @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-base-conf +data: + nginx.conf: | + user www-data; + worker_processes 4; + pid /run/nginx.pid; + + events { + worker_connections 5000; + } + + http { + ## + # Basic Settings + ## + tcp_nodelay on; + keepalive_timeout 65; + server_tokens off; + + ## + # Logging Settings + ## + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + ## + # Virtual Host Configs + ## + + include /etc/nginx/sites-enabled/*; + } diff --git a/ansible/roles/copy_secret_file/tasks/main.yaml b/ansible/roles/copy_secret_file/tasks/main.yaml new file mode 100644 index 00000000..58b264fc --- /dev/null +++ b/ansible/roles/copy_secret_file/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +- name: create secret folder + tags: [ secrets ] + become: true + file: + path: "{{ secret_root }}" + state: directory + +- name: copy {{ file_names }} into secrets + tags: [ secrets ] + become: true + copy: + src=./secrets/{{ item }} + dest={{ secret_root }} + owner=ubuntu + group=ubuntu + mode=0700 + with_items: "{{ file_names }}" diff --git a/ansible/roles/database/tasks/main.yml b/ansible/roles/database/tasks/main.yml new file mode 100644 index 00000000..e4d4d156 --- /dev/null +++ b/ansible/roles/database/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: create volume folder + file: + state: directory + path: "{{ volumes_path }}" + +- name: ensure volume id exist + fail: msg="`volume_id` is note defined. Must have created a volume of size {{ storage_size_gb }} aws ec2 create-volume --region us-west-2 --availability-zone us-west-2a --size {{ storage_size_gb }} --volume-type gp2" + when: volume_id is not defined + +- name: create volume yaml + template: + dest: "{{ volumes_path }}/{{ name }}.yml" + src: volume.yml + +- name: create volume claim yaml + template: + dest: "{{ volumes_path }}/{{ name }}-claim.yml" + src: volume-claim.yml diff --git a/ansible/roles/database/templates/volume-claim.yml b/ansible/roles/database/templates/volume-claim.yml new file mode 100644 index 00000000..c4f8065a --- /dev/null +++ b/ansible/roles/database/templates/volume-claim.yml @@ -0,0 +1,12 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ name }}-db-claim + labels: + type: amazonEBS +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: {{ storage_size_gb }}Gi diff --git a/ansible/roles/database/templates/volume.yml b/ansible/roles/database/templates/volume.yml new file mode 100644 index 00000000..fda21184 --- /dev/null +++ b/ansible/roles/database/templates/volume.yml @@ -0,0 +1,14 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: {{ name }}-db + labels: + type: amazonEBS +spec: + capacity: + storage: {{ storage_size_gb }}Gi + accessModes: + - ReadWriteOnce + awsElasticBlockStore: + volumeID: {{ volume_id }} + fsType: ext4 diff --git a/ansible/roles/datadog-daemon/tasks/main.yml b/ansible/roles/datadog-daemon/tasks/main.yml new file mode 100644 index 00000000..a857388d --- /dev/null +++ b/ansible/roles/datadog-daemon/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: create daemon set folder + file: + state: directory + path: "{{ daemon_sets_path }}" + +- name: create datadog daemonset + tags: [ deploy ] + template: + dest: "{{ daemon_sets_path }}/{{ name }}" + src: datadog-deamon.yml diff --git a/ansible/roles/datadog-daemon/templates/datadog-deamon.yml b/ansible/roles/datadog-daemon/templates/datadog-deamon.yml new file mode 100644 index 00000000..5c288a21 --- /dev/null +++ b/ansible/roles/datadog-daemon/templates/datadog-deamon.yml @@ -0,0 +1,48 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: dd-agent +spec: + template: + metadata: + labels: + app: dd-agent + name: dd-agent + spec: + containers: + - image: datadog/docker-dd-agent:latest + imagePullPolicy: Always + name: dd-agent + ports: + - containerPort: 8125 + hostPort: 8125 + name: dogstatsdport + protocol: UDP + env: + - name: API_KEY + value: {{ datadog_api_key }} + - name: KUBERNETES + value: "yes" + - name: SD_BACKEND + value: docker + - name: TAGS + value: "env:{{ env }}" + volumeMounts: + - name: dockersocket + mountPath: /var/run/docker.sock + - name: procdir + mountPath: /host/proc + readOnly: true + - name: cgroups + mountPath: /host/sys/fs/cgroup + readOnly: true + volumes: + - hostPath: + path: /var/run/docker.sock + name: dockersocket + - hostPath: + path: /proc + name: procdir + - hostPath: + path: /sys/fs/cgroup + name: cgroups diff --git a/ansible/roles/datadog/handlers/main.yml b/ansible/roles/datadog/handlers/main.yml new file mode 100644 index 00000000..4c23028e --- /dev/null +++ b/ansible/roles/datadog/handlers/main.yml @@ -0,0 +1,5 @@ +- name: restart datadog-agent + become: true + service: + name=datadog-agent + state=restarted diff --git a/ansible/roles/datadog/tasks/main.yml b/ansible/roles/datadog/tasks/main.yml new file mode 100644 index 00000000..9d85651a --- /dev/null +++ b/ansible/roles/datadog/tasks/main.yml @@ -0,0 +1,77 @@ +--- +- name: add https transport + become: true + apt: + name=apt-transport-https + state=latest + +- name: add datadog apt key + become: true + apt_key: + id=C7A7DA52 + keyserver=keyserver.ubuntu.com + state=present + +- name: add datadog repository + become: true + apt_repository: + repo='deb http://apt.datadoghq.com/ stable main' + state=present + update_cache=yes + +- name: install dd agent + become: true + apt: + name=datadog-agent + state=latest + +- name: add docker to dd-agent group + become: true + when: no_datadog_docker_monitoring is not defined + user: + name=dd-agent + groups=docker + +- name: create dd configdir + become: true + file: + path=/etc/dd-agent/conf.d + owner=root + mode=0555 + state=directory + +- name: Create main Datadog agent configuration file + become: true + template: + src=datadog.conf.j2 + dest=/etc/dd-agent/datadog.conf + notify: restart datadog-agent + +- name: install network checks for dock services + become: true + template: + src=tcp_check.yaml.j2 + dest=/etc/dd-agent/conf.d/tcp_check.yaml + notify: restart datadog-agent + +- name: install docker integration + become: true + when: no_datadog_docker_monitoring is not defined + template: + src=docker_daemon.yaml.j2 + dest=/etc/dd-agent/conf.d/docker_daemon.yaml + notify: restart datadog-agent + +# assumes a config for {{ name }} exists +- name: install datadog integrations + become: true + when: has_dd_integration is defined + template: + src="{{ name }}.yaml.j2" + dest="/etc/dd-agent/conf.d/{{ name }}.yaml" + notify: restart datadog-agent + +- name: force restart + command: echo restart datadog + when: force_restart is defined + notify: restart datadog-agent diff --git a/ansible/roles/datadog/templates/datadog.conf.j2 b/ansible/roles/datadog/templates/datadog.conf.j2 new file mode 100644 index 00000000..a3d0b063 --- /dev/null +++ b/ansible/roles/datadog/templates/datadog.conf.j2 @@ -0,0 +1,10 @@ +# Managed by Ansible +[Main] + +dd_url: https://app.datadoghq.com + +api_key: {{ datadog_api_key }} + +use_mount: no + +bind_host: 0.0.0.0 diff --git a/ansible/roles/datadog/templates/docker_daemon.yaml.j2 b/ansible/roles/datadog/templates/docker_daemon.yaml.j2 new file mode 100644 index 00000000..bdf73b33 --- /dev/null +++ b/ansible/roles/datadog/templates/docker_daemon.yaml.j2 @@ -0,0 +1,12 @@ +init_config: + +instances: + - url: "unix://var/run/docker.sock" + + collect_labels_as_tags: ["instanceName"] + + collect_container_size: false + + performance_tags: ["container_id", "image_name", "image_tag"] + + container_tags: ["image_name", "image_tag"] diff --git a/ansible/roles/datadog/templates/mongo.yaml.j2 b/ansible/roles/datadog/templates/mongo.yaml.j2 new file mode 100644 index 00000000..03a1b1b2 --- /dev/null +++ b/ansible/roles/datadog/templates/mongo.yaml.j2 @@ -0,0 +1,4 @@ +init_config: + +instances: + - server: mongodb://{{ datadog_mongodb_user }}:{{ datadog_mongodb_pwd }}@localhost:27017 diff --git a/ansible/roles/datadog/templates/nginx.yaml.j2 b/ansible/roles/datadog/templates/nginx.yaml.j2 new file mode 100644 index 00000000..2ef098e0 --- /dev/null +++ b/ansible/roles/datadog/templates/nginx.yaml.j2 @@ -0,0 +1,9 @@ +init_config: + +instances: + # For every instance, you have an `nginx_status_url` and (optionally) + # a list of tags. + - nginx_status_url: http://localhost/nginx_status + tags: + - service:ingress + - env:delta diff --git a/ansible/roles/datadog/templates/rabbitmq.yaml.j2 b/ansible/roles/datadog/templates/rabbitmq.yaml.j2 new file mode 100644 index 00000000..db5221e0 --- /dev/null +++ b/ansible/roles/datadog/templates/rabbitmq.yaml.j2 @@ -0,0 +1,6 @@ +init_config: + +instances: + - {{ name }}_api_url: http://localhost:54320/api/ + {{ name }}_user: {{ rabbit_username }} + {{ name }}_pass: {{ rabbit_password }} diff --git a/ansible/roles/datadog/templates/tcp_check.yaml.j2 b/ansible/roles/datadog/templates/tcp_check.yaml.j2 new file mode 100644 index 00000000..924c0406 --- /dev/null +++ b/ansible/roles/datadog/templates/tcp_check.yaml.j2 @@ -0,0 +1,14 @@ +init_config: + +instances: + - name: docker-listener + host: 127.0.0.1 + port: 3000 + collect_response_time: true + skip_event: true + + - name: krain + host: 127.0.0.1 + port: 3100 + collect_response_time: true + skip_event: true diff --git a/ansible/roles/dock-images/tasks/main.yml b/ansible/roles/dock-images/tasks/main.yml new file mode 100644 index 00000000..774a8f2e --- /dev/null +++ b/ansible/roles/dock-images/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: starting docker + become: true + service: + name=docker + state=started + +- name: pulling docker images + become: true + command: docker pull {{ item }} + with_items: + - "swarm:1.2.5" + - "registry:2.3.1" + - "google/cadvisor:v0.24.1" + - "prom/node-exporter:v0.13.0" + - "weaveworks/weaveexec:1.5.0" + - "weaveworks/weavedb:latest" + - "weaveworks/weave:1.5.0" + - "node:argon" + - "ruby:2.3" + - "python:2.7" + - "php:7.0-apache" + - "runnable/node-starter" + - "runnable/rails-starter" + - "runnable/django-starter" + - "runnable/laravel-starter" + - "runnable/mysql:5.6" + - "runnable/postgres:9.4" + - "runnable/mongo:3.2" + - "runnable/redis:3.2" + +- name: stopping docker + become: true + service: + name=docker + state=stopped + +- name: removing docker key file + become: true + file: + path: /etc/docker/key.json + state: absent + +- name: removing docker pid file + become: true + file: + path: /var/run/docker.pid + state: absent diff --git a/ansible/roles/dock-init/tasks/main.yml b/ansible/roles/dock-init/tasks/main.yml new file mode 100644 index 00000000..eb1ef0f9 --- /dev/null +++ b/ansible/roles/dock-init/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- fail: msg="value tokens need to be defined for this role" + tags: vault_files + when: vault_auth_token is not defined or vault_token_01 is not defined or vault_token_02 is not defined or vault_token_03 is not defined + +- name: create vault auth directory + tags: vault_files + become: true + file: + dest="/opt/runnable/dock-init/consul-resources/vault/{{ node_env }}" + state=directory + owner=ubuntu + group=ubuntu + mode="0711" + +- name: copy vault auth files + tags: vault_files + become: true + copy: + dest="/opt/runnable/dock-init/consul-resources/vault/{{ node_env }}/{{ item.file_name }}" + content="{{ item.value }}" + mode="0400" + with_items: + - { file_name: 'auth-token', value: "{{ vault_auth_token }}" } + - { file_name: 'user-vault-auth-token', value: "{{ dock_vault_user_creation_access_token }}" } + - { file_name: 'token-01', value: "{{ vault_token_01 }}" } + - { file_name: 'token-02', value: "{{ vault_token_02 }}" } + - { file_name: 'token-03', value: "{{ vault_token_03 }}" } + +- name: copy rollbar token + tags: rollbar + become: true + when: docks_rollbar_key is defined + lineinfile: + dest="/opt/runnable/dock-init/key/rollbar.token" + line="{{ docks_rollbar_key }}" + create=yes + +- name: docker upstart override + become: true + lineinfile: + dest="/etc/init/docker.override" + line="manual" + create=yes + +- name: create ssh config for root + become: true + lineinfile: + dest="/root/.ssh/config" + line="StrictHostKeyChecking no" + create=yes + +- name: create ssh config for ubuntu + lineinfile: + dest="/home/ubuntu/.ssh/config" + line="StrictHostKeyChecking no" + create=yes + +- name: enforce sane permissions for dock-init RSA keys + become: true + file: + owner="root" + group="root" + path="/opt/runnable/dock-init/key/id_rsa_runnabledock" + mode="0400" diff --git a/ansible/roles/dock_launch_config/tasks/main.yml b/ansible/roles/dock_launch_config/tasks/main.yml new file mode 100644 index 00000000..92393ef7 --- /dev/null +++ b/ansible/roles/dock_launch_config/tasks/main.yml @@ -0,0 +1,95 @@ +--- +- name: load variables + include_vars: "group_vars/alpha-krain.yml" + +- name: create new config file for krain + template: + src=upstart.conf + dest=~/{{ app_name }}.conf + backup=yes + vars: + - app_name: "krain" + - enviroment_vars: enviroment_vars + +# this requires the use of gnu/coreutils base64 (`brew install coreutils`) +# Allows us to use same base64 locally and in host +- name: encode krain config to base64 + shell: cat ~/{{ app_name }}.conf | gbase64 -w 0 + register: krain_base64 + vars: + - app_name: "krain" + +- name: load variables + include_vars: "group_vars/alpha-charon.yml" + +- name: create new config file for charon + template: + src=upstart.conf + dest=~/{{ app_name }}.conf + backup=yes + vars: + - app_name: "charon" + - enviroment_vars: enviroment_vars + +# this requires the use of gnu/coreutils base64 (`brew install coreutils`) +# Allows us to use same base64 locally and in host +- name: encode charon config to base64 + shell: cat ~/{{ app_name }}.conf | gbase64 -w 0 + register: charon_base64 + vars: + - app_name: "charon" + +- name: register tokens + set_fact: + vault_tokens: + - { file_name: 'auth-token', value: "{{ vault_auth_token }}" } + - { file_name: 'token-01', value: "{{ vault_token_01 }}" } + - { file_name: 'token-02', value: "{{ vault_token_02 }}" } + - { file_name: 'token-03', value: "{{ vault_token_03 }}" } + +- name: Get stats for CA + stat: + path: "./{{ certs_root }}/ca.pem" + register: ca_stats + +- name: Get stats for CA key + stat: + path: "./{{ certs_root }}/ca-key.pem" + register: ca_key_stats + +- name: Get stats for pass file + stat: + path: "./{{ certs_root }}/pass" + register: pass_stats + +- fail: + msg: "Whoops! file ownership has changed" + when: not ca_stats.stat.exists or not ca_key_stats.stat.exists or not pass_stats.stat.exists + +- name: encode ca.pem to base64 + shell: "cat ./{{ certs_root }}/ca.pem | gbase64 -w 0" + register: ca_pem_base64 + +- name: encode ca-key.pem to base64 + shell: "cat ./{{ certs_root }}/ca-key.pem | gbase64 -w 0" + register: ca_key_pem_base64 + +- name: encode pass to base64 + shell: "cat ./{{ certs_root }}/pass | gbase64 -w 0" + register: pass_base64 + +- name: Generate dock script + template: + src=init.tmpl + dest="{{ environment_root }}/dock.sh" + vars: + tokens: "{{ vault_tokens }}" + is_dock_pool: false + +- name: Generate dock script + template: + src=init.tmpl + dest="{{ environment_root }}/dock-pool.sh" + vars: + tokens: "{{ vault_tokens }}" + is_dock_pool: true diff --git a/ansible/roles/dock_launch_config/templates/init.tmpl b/ansible/roles/dock_launch_config/templates/init.tmpl new file mode 100644 index 00000000..b89f1cdd --- /dev/null +++ b/ansible/roles/dock_launch_config/templates/init.tmpl @@ -0,0 +1,51 @@ +#!/bin/bash + +{% set has_token = ( ec2_describe_aws_access_key is defined and + ec2_describe_aws_secret_key is defined and + dock_registry_s3_access_key is defined and + dock_registry_s3_secret_key is defined ) %} + +{% if not has_token %} +# Create directory for env +mkdir -p /opt/runnable/dock-init/consul-resources/vault/{{ node_env }} +chown ubuntu:ubuntu /opt/runnable/dock-init/consul-resources/vault/{{ node_env }} +chmod 0711 /opt/runnable/dock-init/consul-resources/vault/{{ node_env }} + +# Set Vault Tokens (Used for fetching templates) +{% for item in tokens %} +echo {{ item.value }} > /opt/runnable/dock-init/consul-resources/vault/{{ node_env }}/{{ item.file_name }} +{% endfor %} +{% endif %} + +# Add upstart files for charon and krain +mkdir -p /docker/app-logs/ +echo {{ krain_base64['stdout'] }} | base64 --decode > /etc/init/krain.conf +echo {{ charon_base64['stdout'] }} | base64 --decode > /etc/init/charon.conf + +# Add Certs (Used for genereting Docker client keys + certs) +mkdir -p /etc/ssl/docker/ +echo {{ ca_pem_base64['stdout'] }} | base64 --decode > /etc/ssl/docker/ca.pem +echo {{ ca_key_pem_base64['stdout'] }} | base64 --decode > /etc/ssl/docker/ca-key.pem +echo {{ pass_base64['stdout'] }} | base64 --decode > /etc/ssl/docker/pass +chmod -R 0440 /etc/ssl/docker/ + +# Start services +{% if is_dock_pool %} +start amazon-ssm-agent +{% endif %} +service krain start +service charon start +{% if not is_dock_pool %} +CONSUL_HOSTNAME={{ consul_host_address }} \ +CONSUL_PORT={{ consul_api_port }} \ +VAULT_PORT={{ vault_port }} \ +USER_VAULT_HOSTNAME={{ user_vault_load_balancer }} \ +USER_VAULT_PORT={{ user_vault_port }} \ +{% if has_token %} +S3_ACCESS_KEY={{ dock_registry_s3_access_key }} \ +S3_SECRET_KEY={{ dock_registry_s3_secret_key }} \ +AWS_ACCESS_KEY={{ ec2_describe_aws_access_key }} \ +AWS_SECRET_KEY={{ ec2_describe_aws_secret_key }} \ +{% endif %} +bash /opt/runnable/dock-init/init.sh | tee /var/log/user-script-dock-init.log +{% endif %} diff --git a/ansible/roles/dock_launch_config/templates/upstart.conf b/ansible/roles/dock_launch_config/templates/upstart.conf new file mode 100644 index 00000000..38e0e7ad --- /dev/null +++ b/ansible/roles/dock_launch_config/templates/upstart.conf @@ -0,0 +1,41 @@ +#!upstart +description "{{ app_name }}" +author "Jorge Silva" + +env NPM_BIN=/usr/local/bin/npm +env APP_DIR=/opt/runnable/{{ app_name }} +env LOG_FILE={{ app_log_dir }}/{{ app_name }}.log +env NODE_ENV={{ node_env }} + +{% if enviroment_vars is defined %} +{% for name, value in enviroment_vars.iteritems() %} +env {{ name }}={{ value }} +{% endfor %} +{% endif %} + +start on (local-filesystems and net-device-up IFACE=eth0) +stop on shutdown + +script + touch $LOG_FILE + chdir $APP_DIR + echo $$ > /var/run/{{ app_name }}.pid + exec $NPM_BIN start >> $LOG_FILE 2>&1 +end script + +pre-start script + # Date format same as (new Date()).toISOString() for consistency + echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Starting" >> $LOG_FILE +end script + +pre-stop script + rm /var/run/{{ app_name }}.pid + echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Stopping" >> $LOG_FILE +end script + +post-start script + echo "===== App restarted =====" >> $LOG_FILE +end script + +respawn +respawn limit 5 1 # give up restart after 5 respawns in 1 seconds diff --git a/ansible/roles/docker/README.md b/ansible/roles/docker/README.md new file mode 100644 index 00000000..72610499 --- /dev/null +++ b/ansible/roles/docker/README.md @@ -0,0 +1,39 @@ +# Role Name + +Ansible Role to Install Docker on Ubuntu + +## Manual Setup + +*Important: You must set up the following certificates on new boxes manually (for now):* + +For the Docker daemon: +- `/etc/ssl/docker/`: + - `ca.pem`: CA certificate that also signed the client keys + - `cert.pem`: Docker _server_ certificate + - `key.pem`: Key used to sign the Docker server certificate + +For the Docker client: +- `/home/ubuntu/.docker/`: + - `ca.pem`: CA certificate that also signed the client keys (should be the same one as in `/etc/ssl/docker`) + - `cert.pem`: Docker _client_ certificate + - `key.pem`: Key used to sign the Docker client certificate + +To ensure docker verifies the local client, you need to either pass `--tlsverify` to the docker command, or you need to set `DOCKER_TLSVERIFY=1` in the environment. + +## Role Variables + +``` +docker_centos_packages: + - { package: "docker" } +``` + +## Example Playbook + + - hosts: docker-servers + roles: + - { role: docker-centos, + tags: ["docker"] } + +## Author Information + +anandkumarpatel diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 00000000..d9c35d9f --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +docker_package_name: docker-engine=1.12.0-0~trusty diff --git a/ansible/roles/docker/files/docker.conf b/ansible/roles/docker/files/docker.conf new file mode 100644 index 00000000..3b0e0877 --- /dev/null +++ b/ansible/roles/docker/files/docker.conf @@ -0,0 +1,61 @@ +description "Docker daemon" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKER=/usr/bin/$UPSTART_JOB + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + # redirect STDOUT/STDERR to logger app for rsyslog/loggly + exec "$DOCKER" daemon $DOCKER_OPTS 2>&1 | logger -t docker_engine -p local7.info +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + while ! [ -e /var/run/docker.sock ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for /var/run/docker.sock" + sleep 0.1 + done + echo "/var/run/docker.sock is up" + fi +end script diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 00000000..77116784 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,89 @@ +--- +- name: create docker cert directory + become: true + when: dock is defined + file: + path=/etc/ssl/docker + state=directory + +- name: copy docker upstart config + become: true + copy: + src=docker.conf + dest=/etc/init/docker.conf + mode=0444 + owner=root + group=root + +- name: install aufs with linux-image-extra-{{ ansible_kernel }} + become: true + tags: aufs + apt: + pkg="linux-image-extra-{{ ansible_kernel }}" + state=present + update_cache=true + cache_valid_time=604800 + +- name: install apt-transport-https + become: true + apt: + pkg="apt-transport-https" + state=present + update_cache=yes + cache_valid_time=604800 + +- name: add docker repository key + become: true + apt_key: + id: 2C52609D + url: https://apt.dockerproject.org/gpg + state: present + +- name: add docker repository + become: true + apt_repository: + repo: "deb https://apt.dockerproject.org/repo ubuntu-{{ ansible_distribution_release }} main" + state: present + update_cache: yes + +- name: copy docker config file + become: true + template: + src=dock + dest=/etc/default/docker + register: copied_config + tags: genDockerConfig + +- name: apt-get update + become: true + apt: + update_cache=yes + +- name: install docker + become: true + when: restart is defined or dock is defined + apt: + pkg={{ docker_package_name }} + state=present + force=yes + update_cache=yes + cache_valid_time=604800 + +- name: enforce modes on /docker + become: true + file: + path=/docker + state=directory + owner=root + group=root + mode=0755 + +- name: restart docker + when: (copied_config.changed and restart is defined) or dock is defined + command: sudo service docker restart + +- name: create docker group + become: true + group: + name: docker + state: present diff --git a/ansible/roles/docker/templates/dock b/ansible/roles/docker/templates/dock new file mode 100644 index 00000000..c3f219ba --- /dev/null +++ b/ansible/roles/docker/templates/dock @@ -0,0 +1,6 @@ +DOCKER_OPTS="-H=unix:///var/run/docker.sock -H=0.0.0.0:4242" +DOCKER_OPTS="$DOCKER_OPTS --tlsverify --tlscacert=/etc/ssl/docker/ca.pem" +DOCKER_OPTS="$DOCKER_OPTS --tlscert=/etc/ssl/docker/cert.pem --tlskey=/etc/ssl/docker/key.pem" +DOCKER_OPTS="$DOCKER_OPTS -g /docker --insecure-registry registry.runnable.com --icc=false" +DOCKER_OPTS="$DOCKER_OPTS --bip 172.17.42.1/16" +DOCKER_OPTS="$DOCKER_OPTS --dns=172.17.42.1 --dns={{ ansible_dns.nameservers[0] }} --dns=8.8.8.8" diff --git a/ansible/roles/docker/templates/runnable b/ansible/roles/docker/templates/runnable new file mode 100644 index 00000000..3408514d --- /dev/null +++ b/ansible/roles/docker/templates/runnable @@ -0,0 +1 @@ +DOCKER_OPTS="-g /docker --icc=false" diff --git a/ansible/roles/docker_client/README.md b/ansible/roles/docker_client/README.md new file mode 100644 index 00000000..3b95b530 --- /dev/null +++ b/ansible/roles/docker_client/README.md @@ -0,0 +1,14 @@ +# Role Name + +Ansible Role to Install Docker Client Certs on Ubuntu + +## Manual Setup + +Creating new docker client certs: +1. cd into this dir ```cd ``` +2. ensure you have ca-key.pem here `roles/docker_client/ca-key.pem` +3. run cert generator ```sudo ./scripts/genClientCert.sh ``` + +## Author Information + +anandkumarpatel diff --git a/ansible/roles/docker_client/ca.pem b/ansible/roles/docker_client/ca.pem new file mode 100644 index 00000000..d6b36004 --- /dev/null +++ b/ansible/roles/docker_client/ca.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnjCCA4agAwIBAgIJANIFZy8wwSmYMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAwMTQxNFoXDTI2MDUyMDAwMTQxNFowgZAxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMREwDwYDVQQK +EwhSdW5uYWJsZTEMMAoGA1UECxMDT3BzMRIwEAYDVQQDEwlsb2NhbGhvc3QxHzAd +BgkqhkiG9w0BCQEWEG9wc0BydW5uYWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCkX4cwQDcimGvnJg0HBl+A9da0zpUjJJVPbba3A2wJ/S7l +gKlYID5TXNYpSAepdmmWO+NEXcNVPUYVhoBe4DWkJFc+lxtLPy0UOseZ+TvMac7i +Zp0k/GSLl3ASloDPfKsBlpOpM+OhWvl5jzAzSJ1l6dGcCEAXE6dhtEUgPMUzfAfl +bUuQ7ri8iMB67Ktix8FJCEpwczlKfebzmxw3VxwGiNQSGbbyIknuCk5eGbMVPtdY +DBl+5R7h0S0enXxYtPtL7CRKs0uHxm8Kmvvo2htSf9bdOSsjnFzQvZdBLrrQipN+ +i8m/ZOL8IOzV/Wfwqd7Zo3w3hUE8rzrBP0Ce0f0BAgMBAAGjgfgwgfUwHQYDVR0O +BBYEFKoY1K08hkkW4dt/bo0153ccq9sMMIHFBgNVHSMEgb0wgbqAFKoY1K08hkkW +4dt/bo0153ccq9sMoYGWpIGTMIGQMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2Fs +aWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzERMA8GA1UEChMIUnVubmFi +bGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJbG9jYWxob3N0MR8wHQYJKoZIhvcN +AQkBFhBvcHNAcnVubmFibGUuY29tggkA0gVnLzDBKZgwDAYDVR0TBAUwAwEB/zAN +BgkqhkiG9w0BAQsFAAOCAQEAg9gyj48wThPw61lxZ/KpQsb5Vhme/EQD0FE8UR74 +uLXFBw0KOvmwd5MXUDJBmS2Qzcy7ja86YETUnYX6AWpyKaS5ahsVHxzsNlG8IwHO +lh9gMR1ZCiwYRiVUEq4d7PvwsgZ/xbzi9i4OeXQSsPGDcD2gSO7fqE+uQI5JSTM3 +RP961DrpabOUYVd8/B+TA0coYke+VgHNPMWzCAfKQv9SUCqzykJ8Gx5cKsofQAcV +Us2OqQgIbJtca4eds2bz6pDxfRux+7A5n/hfj86YqzQvrHUVRtzsL0ukIOM5G31Z +D80lBQLQV7QbTVu8plmZ+is8v76BS5eCLmKC0UnjEve7Jg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api-socket-server/cert.pem b/ansible/roles/docker_client/files/certs/api-socket-server/cert.pem new file mode 100644 index 00000000..3dcf0172 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-socket-server/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxOMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIyOVoXDTI3MDUxNTIxNDIyOVowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx0f3i6G3dpdExN5tW5Pj2yW2 +Gi5CgZuV8cve06N6pnytMuIss6CqnC/BMpB9GZFSKwJbZyRg7dCD8pe/UTK19b3G +cfeGQX6xTZi18fKz176OTNSzf0YfKDVCTj2drgM+TLNhDZnzNlj8j4pnZzRWEB0R +/Rni4nG30jk69gHsv4J8CJasDHzSrL4JskSvI848Glx2p5foHrAtMs10tYRTppja +8GcF+bV9CyEpfBrXQ0RuqLDAGpFvmrSlHbr+NijgkwymZHxGFPMG6b8eeFuVNB2N +hCM2Fp63iVFbV+wejUH+vfoKFOU4NJvTtHLvxKIMI8VMzc1fvjIw2VFCBuX3KwID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEAefuS +dqVi35UNOd0MElbDeGwUaTOTahvZ4X9aNFUjBxBNRgp9U+5vYa43S9H+WTeXXXih +7vkP+Mk17COTWu/N2ajbDT0Y9wurDkQjg4gAPtJR+6RXL2H315B1+VSfi/cjVRuf +HMsVt7OpJuno/pzwK2OPjZdqrJrC0tRYLbfcl+Z1m9k+ysRKC76MhvT6VT+6vHMT +GvfuyeX1W0ShiZbFJ+oUzTwYBfgWKSkJQLa1VUKpI+LkZyiSbo9jQ9aa6eTxExbB +FqHPp94C33BUHW90Yxu5a49tiqrgxpa4DTiMShjxaU21hhUTlQCJfhj0rsvGCcCE +OMr50/wktPSE56hUsg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api-socket-server/key.pem b/ansible/roles/docker_client/files/certs/api-socket-server/key.pem new file mode 100644 index 00000000..1582f190 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-socket-server/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAx0f3i6G3dpdExN5tW5Pj2yW2Gi5CgZuV8cve06N6pnytMuIs +s6CqnC/BMpB9GZFSKwJbZyRg7dCD8pe/UTK19b3GcfeGQX6xTZi18fKz176OTNSz +f0YfKDVCTj2drgM+TLNhDZnzNlj8j4pnZzRWEB0R/Rni4nG30jk69gHsv4J8CJas +DHzSrL4JskSvI848Glx2p5foHrAtMs10tYRTppja8GcF+bV9CyEpfBrXQ0RuqLDA +GpFvmrSlHbr+NijgkwymZHxGFPMG6b8eeFuVNB2NhCM2Fp63iVFbV+wejUH+vfoK +FOU4NJvTtHLvxKIMI8VMzc1fvjIw2VFCBuX3KwIDAQABAoIBAQCIBLsV91999mnd +5ePGqf7QScEtxX1ncYa672g8FE1W7apaHL5eLL/Gs2Gu3ahFl0b6TDWPBOmT6Mrm +/mR7S52fCdU1XLmqYosqZ51oczwFee8uyc9qg7Yof5GuTageaeBQAe+wlO/InJxm +bRV+/93uSalJlC2kWfIZ7y4rU80AOR8JjbYtT0LCTBSYj8l0UkUZSm3DFgy2+sJQ +kaOIm0XUTMlh/E/MkHr0GeRQhtQ7UXhczYIgVsrP1FvFA8mPdlPtyhua1R6wz5XZ +qr2bNrFB70M651o7roS15EzcAIQW7Syv03ayBOJgUl9jxgZyrCXSMAP1bXuxDQGo +qPLl6B5BAoGBAO+PJCaYrYPWCt9PRZGnaM7j5pjEpq2j/A5c2dR2ENKtY5tmfxAr +onvbFtcZMsLUlcyNG5rInZk6CFvtuzf2waT4o4uG99onA2+JBwqKXFqC97D6vpqe +IBIBL2dtiJ+2HMgUT/0xmjqYQL8hjY+IXOLnrDdnnkSnSKwbZkMdbqMDAoGBANT1 +LHlBsPPy7qUhHIrolbhqRHiyF5LQL1cP5CxhEDUuTV3axgBUiiMEvzYOJI5L5k7h ++dSXvp5eeukOfOjdT+jTTuLAfHgqYV8l7eadjZ03D/44nZoC2VePROSZY6e6gBi2 +5RjUNac1uibruzgf9f+DKrK0NtY9NNE8raXkWA65AoGAIalygf3oO+NCydhAxVPJ +1XCOwZ64cSPe0kHX1hP1DzWQDpEbf7Ex9sd1cSo4SMpIdmLJozlXc+WEsS45pYaF +9WF+0adYgo4C3JRbqe6BRbKrjXHegKmBhiYmfBkXHqIfbznzd3eV3W/xgSn801Nt +AfFd8LcdtTXwb0a2Wu3y0vMCgYBhtdSgFkoQxGhHD/g+rBT2NpwQHPbAABGYiswQ +qMB9Bi+tXpH22NNA2g65yKdxOtYDX5/3sWBe3c7bXDBn0BO4KixJ0jPKFvfyZj7P +z36IohkOdf+qCKCckjn0AYU+Ek5wCIr5Z3TusbIl5YdTaya+A/r1X8bBaffwAT1T +nn/g0QKBgDHghXLtUBkSQlPO6ZuDlnwcSI4KYaAH+Zu50JA/jYFVWjtJ0R9fWbrE +tM9YSMAj4neQ9zHpjlHQGdmxjiM7HTUNCrGX/WdEHW0+EQY6FHazcl59+eGmxf6M +TkCATsp7LQAMG/FO+AuLCPn5dKIth8GPc/lhhycvpti2M8B5Awbm +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/api-worker/cert.pem b/ansible/roles/docker_client/files/certs/api-worker/cert.pem new file mode 100644 index 00000000..159364f8 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-worker/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxPMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIzMFoXDTI3MDUxNTIxNDIzMFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAl4TupbNqRMSk4QgU1fWPyQiU +obqinHxmFyhgLmk0u5TdPrLPvpTFqyiU6m8aNgO09ar5salrCD7AmtmevznKvukq +3uYGO31XWhu8UluyJUWtu4MDBWjOL3zC/Up1bHHjI/PaXiQjeS6vGJeiw+ebbxjc +k0alQCxaZOHZwMwd7y91IPwDgcM3WDlXd2QS+4ldrUoNKEq+gZ2uH38OInwZS5JF +ndC5H8vVqv1oKU+97lT8jOsUs83rePoPt5fRpjFd1Ev6d7Bpwu+YmD5t3Nhdj26d +++uJchsWcwB3hlJEA/PS6rOTdx5X/ZySuc0rBztNoxhmx17t6+UjawijaWdQewID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEAUvPS +HIJDk5UL3eqqR64r7tnjhLL3F5cQywuKIlfJD/OJDW/eymTSdhCn3cmwi9S+EPlj +LI+a/T0j5SsiIzQGzh3LihoTAwsjUGgfLZFABSdfliT4I5GMomuRMPHMATVYW8MZ +ngAiUIJlK0XQ3MSLmh8yzgCwOZ9riTTk7W6v9dzW71n6q0JQXJMkKsXSvFHhAvue +pUIokDtnz2uURORWGcDA71n+2oRCfTleD9mPWYCTqb3ZQWMK59HTzDPbLMpauxb+ +CKXdt4GqttawL6HfgIPyELHUQtJBmC5tru9kh8OsZWJqANjIDIB1mOmXpxsYExjh +nsNjj4eKqrt+rZjfFg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api-worker/key.pem b/ansible/roles/docker_client/files/certs/api-worker/key.pem new file mode 100644 index 00000000..2a2500b0 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-worker/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAl4TupbNqRMSk4QgU1fWPyQiUobqinHxmFyhgLmk0u5TdPrLP +vpTFqyiU6m8aNgO09ar5salrCD7AmtmevznKvukq3uYGO31XWhu8UluyJUWtu4MD +BWjOL3zC/Up1bHHjI/PaXiQjeS6vGJeiw+ebbxjck0alQCxaZOHZwMwd7y91IPwD +gcM3WDlXd2QS+4ldrUoNKEq+gZ2uH38OInwZS5JFndC5H8vVqv1oKU+97lT8jOsU +s83rePoPt5fRpjFd1Ev6d7Bpwu+YmD5t3Nhdj26d++uJchsWcwB3hlJEA/PS6rOT +dx5X/ZySuc0rBztNoxhmx17t6+UjawijaWdQewIDAQABAoIBAGdJe/FCy8dTEV5B +RwrPWZPeIIvbESyBdIqGwIICv6q7gNJTzGALRHA3usPQttppTntFcO8WIVs+g8dL +X/bVK4tWoR3PSiJVgC1xyL+PuHHF/PddCLswRZWDg9xsXH1vaTpB6fLQycm69Ctc +etz2zjM+jGZs+wsy8O/iMbFGs4UvFS+2HgP4NIY1orU5tr6rohWrakznsJoUpMvE +dvMxKhyfi6scFqtCoNkNJckt60u0cSqofjGuoJ2LzLNNR09KxVQGv19QFLh9Ms0m +aNxPusPsPhnoCUb6aTd22vIu/AaLAgoJ/g1Sz8B2hqwbHrBGJWpV4ODoTxHQoQ8n +xo/2rCkCgYEAxrXS5nW5y2q1eEVj0teU+rzdxBY0OH58tpP8jfbrhtEfsm/9TLXo +s47TftsZW/9Zy6F18Yg5Zpc9Y0nQ3p/7DnGIIOaR4PsZmGPuAez88vEWEBrcxV7O +EhFWRRmZ9txE8uCPidetqEobcTo8irtd3jDCcTGqR+Zim+NaLTQ+3WUCgYEAwzQX +RtTpCXfEvqjclGtLXKsiqEWHWYSY4SAyisL+B7cXz7oglrCIueDPP3VUN6VwvKhb +pmR7mK3LoM6nPX3cHK4FMhUCqXxZ/ArMDta5+WJz+qrje+5gcoYY9fhWpRVClMK/ +Qjxx2tk8lxvKZOw3WFnR6lZlggGL14I4s1QICF8CgYEAlz5padQNOFDeEs7t7dr8 +n+QE/2zPDnjnv6ffwMZB+nlNoOkwaKTEm3aIb7OgF6/BzwXz2YUP+4/9v4a9ndUy +p4muTSxxuEYm8yCMVZbl6JuraVKo2ukP/ByKB0o3L/fz2tElL4ujt8XAIWxsDEQU +bDu8o59jthoa0AA58NIlTykCgYAVxF1EHMHPuHdL+3B9bGl+7+ZxdS9eoBacJAb+ +yjehjJ0vIMY5h333nEYAAYlemygzIWL27wWJRPgL+n9TsBE3cYBK0WT2mVM/c8TF +ctRSJtRgA2ZSj4ZZvvJRm1uuFOzTwdOqAyt/B/Jlt0eecWjKhVmPFAtFr/50DaVc ++eXbaQKBgAK79c5JnjR+qKuEVG9J2HA9UXjD4lm0ZzYfs+E4LeFY9lISL5swgDTF +1HlLChI1l+PhFseuyBmv4ORh3qHTkTpf6d8bvqDcoKcBz3nGyODl1r3y4eO50tj0 +cNMRnM3EAUiYUH6nY6SytSA1XkSyQBfvcnfwIOMzvpCCbNLFIoJ7 +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/api/cert.pem b/ansible/roles/docker_client/files/certs/api/cert.pem new file mode 100644 index 00000000..8b4d5b5a --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxNMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIyOFoXDTI3MDUxNTIxNDIyOFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr26EFQVGPUpSjNdwAQFbb8/X +0ptPTnkgPMXmTJTNbKwvAiS+tq+0sE+Tb7ACYw8Nx/qBwzG/buClldJmv2PthaFz +aVL0Mjb+/1YPY1TYO869buf9n5hqluguUIMyo1+UpzhfVVxaJa5e/uiNn/1Ru1DR +rftDlPesZGfR0Lvl2qcjHTLy1oArHl6Ztt+kQo4fJSHFHTXNEqTA4bjY/15KtktK +rUnC4GB9xPRwRcoEy1GjbLb5zLaND7yyCN3l5iObyft2dKn8T5soQULNQxiSpzDL +MQlj4dxevkHjooD2v3P2cydss+HImNfX83lxjKbukGE5TtoWk2YMzjut4tzPLwID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEAXI5F +mqWsydSTcAoXMHLp9RHIpg6vMmIXy+KhPnI74gK8PSJfitdRt4I/0IJLwQNlhlhZ +ZxndTSGNbJ8MuXTA61aV0o4d1UC27nrL4hmPYy/++4vh/sKRESV1a/8IzKPjwQwe +epvKJpwpkksI4PD086ZMwwGj8oll1SofrCn2WiTWD9fa0sHcsuiR7heMiMfq3o0+ +m8yb2A3cu0DgIqTtDBO2XBicwyqWjusA6qbCK5FzVS6GI2onNPr1mmXd3Jby6Jwz +Se2d/ORLqNVGJJg32op5JWaDriPPBMTpWbHk0O2Gps6wmKB2d7CPTSpqR0uRtzmB +dMfckpQ/ihazeHKSnA== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api/key.pem b/ansible/roles/docker_client/files/certs/api/key.pem new file mode 100644 index 00000000..eb266b01 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAr26EFQVGPUpSjNdwAQFbb8/X0ptPTnkgPMXmTJTNbKwvAiS+ +tq+0sE+Tb7ACYw8Nx/qBwzG/buClldJmv2PthaFzaVL0Mjb+/1YPY1TYO869buf9 +n5hqluguUIMyo1+UpzhfVVxaJa5e/uiNn/1Ru1DRrftDlPesZGfR0Lvl2qcjHTLy +1oArHl6Ztt+kQo4fJSHFHTXNEqTA4bjY/15KtktKrUnC4GB9xPRwRcoEy1GjbLb5 +zLaND7yyCN3l5iObyft2dKn8T5soQULNQxiSpzDLMQlj4dxevkHjooD2v3P2cyds +s+HImNfX83lxjKbukGE5TtoWk2YMzjut4tzPLwIDAQABAoIBAAel0Lo93Z8EY27i +YW6ZyYupSuZSgPWR0Db2BaEnB4GKTA5S3cGHDWB2R54h1SjOWjJymD+TJp5FytIq +NvJ2/77ImAhqZlzeCuXEsa0J4NdSnu17zZGsilED1CkMXyPCIe/cREOrX/6uiFOi +fbIQ0ARTdKCaCIsw9OH3qKzp1A5hEmiv+jD4x/UB0VqVD5I957fZFkHFS1MdnRGR ++YuqlBumRLwBf5nY69OFHmXvKKLPFuk2WvTzHANwfoQDvsvgfhynWxQ10NcFozOd +CHlh4isSciWazh6AZ93ubgQi6XvXBBeyzyqc65eIQIDCIVnBWPkJf/IVJdBcUJ/W +SRKadwECgYEA25KcM1ZTJdlPNzbKgQBY0O3G1NUqcumA1BVPxHDCTy4s+nwE9GJu +Lr60vspJ1d5xtSjMWTd0fQbgfi04nmA3FLsuguvNHMruIKgbFUxxGp/TSS0334m8 +VxvyhAc3Zx4IkgmsDb0RSGVH5eJEysFVLLVu4mt7/HADauORmYRyc68CgYEAzIk1 +zo8NfHZzzI5h003nWmyv9r7mbTWwxEfhrXRYFSNehnfbMrp47IJyDu9mJZv8iiZp +q9iLiqdqI9zgb25VAeR3IljYHp1GO7nACdMjhZAi4w1SH8fQm3OCitW3RV7jYPFr +s8gfbImYuUHN3Y2E8h4bjxuKjiFnBkKTDy6EvIECgYBRCQPC4pofPttAemU0ROeC +fC7Ef8OB03GrVA4zZpYf2N/atgiyFN2rPNDCFTA40TbP8DWyu4evWOljR9c8hC/v +48EfqlzxwmcuADy2mJaph9DXkeusAU5UK8lONseYs/nfw9gO3fdR66ckTWSsG7wO +mwtIXvjMSp7IRSKIMNx4mwKBgQCeuiCKkxNpMld9OEWMgNl4n/4SY2VY/SCXwhzN +ygsNJGyXt+3guTtcd4LiOkCwERRzdYnt9VedG3zQJfn6H8pSdKVLf10ArRazBNyW +A0d+wDOCtnSz+xHVgVeePGpEqHWWZuQpGbXhOgrOQdXsCIZSirDiZwIoeJ2GCyYq +0vPpgQKBgCKjiEml0UZmDIyZ8JD7ScDSZ3QtiOfjGO6pLWWZvQy+O80zLzL0bCwh +kFFBGF6KW+QjQGC/CW7gLB6zC//nCsTsCl2itZYdO4+19MSa65g6LSDikyIyVEGw +BXEZF42VRmyS6c0S/pJE4BrFP86fEP9O+YxmO5oIBM+kC7QaCp+L +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/ca.pem b/ansible/roles/docker_client/files/certs/ca.pem new file mode 100644 index 00000000..d6b36004 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/ca.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnjCCA4agAwIBAgIJANIFZy8wwSmYMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAwMTQxNFoXDTI2MDUyMDAwMTQxNFowgZAxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMREwDwYDVQQK +EwhSdW5uYWJsZTEMMAoGA1UECxMDT3BzMRIwEAYDVQQDEwlsb2NhbGhvc3QxHzAd +BgkqhkiG9w0BCQEWEG9wc0BydW5uYWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCkX4cwQDcimGvnJg0HBl+A9da0zpUjJJVPbba3A2wJ/S7l +gKlYID5TXNYpSAepdmmWO+NEXcNVPUYVhoBe4DWkJFc+lxtLPy0UOseZ+TvMac7i +Zp0k/GSLl3ASloDPfKsBlpOpM+OhWvl5jzAzSJ1l6dGcCEAXE6dhtEUgPMUzfAfl +bUuQ7ri8iMB67Ktix8FJCEpwczlKfebzmxw3VxwGiNQSGbbyIknuCk5eGbMVPtdY +DBl+5R7h0S0enXxYtPtL7CRKs0uHxm8Kmvvo2htSf9bdOSsjnFzQvZdBLrrQipN+ +i8m/ZOL8IOzV/Wfwqd7Zo3w3hUE8rzrBP0Ce0f0BAgMBAAGjgfgwgfUwHQYDVR0O +BBYEFKoY1K08hkkW4dt/bo0153ccq9sMMIHFBgNVHSMEgb0wgbqAFKoY1K08hkkW +4dt/bo0153ccq9sMoYGWpIGTMIGQMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2Fs +aWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzERMA8GA1UEChMIUnVubmFi +bGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJbG9jYWxob3N0MR8wHQYJKoZIhvcN +AQkBFhBvcHNAcnVubmFibGUuY29tggkA0gVnLzDBKZgwDAYDVR0TBAUwAwEB/zAN +BgkqhkiG9w0BAQsFAAOCAQEAg9gyj48wThPw61lxZ/KpQsb5Vhme/EQD0FE8UR74 +uLXFBw0KOvmwd5MXUDJBmS2Qzcy7ja86YETUnYX6AWpyKaS5ahsVHxzsNlG8IwHO +lh9gMR1ZCiwYRiVUEq4d7PvwsgZ/xbzi9i4OeXQSsPGDcD2gSO7fqE+uQI5JSTM3 +RP961DrpabOUYVd8/B+TA0coYke+VgHNPMWzCAfKQv9SUCqzykJ8Gx5cKsofQAcV +Us2OqQgIbJtca4eds2bz6pDxfRux+7A5n/hfj86YqzQvrHUVRtzsL0ukIOM5G31Z +D80lBQLQV7QbTVu8plmZ+is8v76BS5eCLmKC0UnjEve7Jg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/docker-listener/cert.pem b/ansible/roles/docker_client/files/certs/docker-listener/cert.pem new file mode 100644 index 00000000..f4386944 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/docker-listener/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxQMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIzMFoXDTI3MDUxNTIxNDIzMFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1qdlcLUDuVyLuRpU/jUxtbzX +e7mgdsIjSS9n0GXAKnK7Mr0pj/YfiSoCThpgBm/pvOO7v4LvWwwM8SLr/nC5mmjr +xxeEuaZQVEo0R1l0RgBAa9Hx51zIEKkG7canb4XMPnxLjd4ijBgWOPuqmD3IX5qa +5EKiEaCEtZGHPa0yI3r5XdY5Ek6dKNMQNqhJSC0oONmJRhEaSfsRgXk6uaouS7c0 +3YUtkIqvIhRU5ZUHUnW5Zgv5jLIfFoVaF9tTYycM9GKExW8NQU3ItrcigDrTkb6y +ozkiqaNEvyQe1FH6t2PIeqIdkFgkGEzfR+XqJ2INp40xEUGS1fYQaQDjOGIQxwID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEAOc1t +IydxH8YAzp60T+H9yRdxeKj7vnbQL2OutaG/ZbZJRopxvOgiPNTeXmP04mn4sJoL +iaKVxaIb8zyKi8LFA/yhDl7pvH0bo992HdodmtnqOAy0D1dpuR2m39bA/REOIWzE +Wph67b5XJ+noTjoNGfLQ9/iSuY34PBvE6dtLxLAVdTNQYOhHKaP+TaJCxFVV/kt5 +gd+NUwYjN5D2sAfSaQiS0M6lkUMHmHRKkGb1tJ3RI6ku0CoFnfPUfASo88rfW+ri +XdeVdvj3wg4YlHQsLl1hDOb0S3gkPzeWELeOUF+khH7cOFbm9bm19DlYqs2JaHrJ +XvD61+GYzQScdejQQg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/docker-listener/key.pem b/ansible/roles/docker_client/files/certs/docker-listener/key.pem new file mode 100644 index 00000000..232c4616 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/docker-listener/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA1qdlcLUDuVyLuRpU/jUxtbzXe7mgdsIjSS9n0GXAKnK7Mr0p +j/YfiSoCThpgBm/pvOO7v4LvWwwM8SLr/nC5mmjrxxeEuaZQVEo0R1l0RgBAa9Hx +51zIEKkG7canb4XMPnxLjd4ijBgWOPuqmD3IX5qa5EKiEaCEtZGHPa0yI3r5XdY5 +Ek6dKNMQNqhJSC0oONmJRhEaSfsRgXk6uaouS7c03YUtkIqvIhRU5ZUHUnW5Zgv5 +jLIfFoVaF9tTYycM9GKExW8NQU3ItrcigDrTkb6yozkiqaNEvyQe1FH6t2PIeqId +kFgkGEzfR+XqJ2INp40xEUGS1fYQaQDjOGIQxwIDAQABAoIBAGzA2xBDZeOgE9pw +kOdKpCjjyHkTD02cBqypq/tV0gqvnlCSrmAV3txrwEc57bTVNqNQ7mqVOBf0/wmd +G9wJ2L92sR5puAEnVkOeZOZRWCuvLPRzphAGXgay3KPTpjlGnLh72PvDs9UuzvOH +7mK7yq1Fc3x6lgpAPXyQguiou2Jlmh1GQ7VAuDoaaX3lXzqa9avg/8tOa4zCDdwX +XcOUVUgP1kFdY0rC50kRg+lrqvsJtOMUh5iZgZgPnuW8dPZ3x6Ie0QyORWZ0Lnzd +p3PKQriBcIbG+MIMPy+Hb4Q++W9TtlyBh4XQcmv5pz2sR4Cqq4pEiT7V6zGG8Ww4 +vW3JSyECgYEA8GzUMEKOoQc8bSKZgAasRp9oz2F1jwOg4QvsmFXQEiVZIVjUTjPV +S8Ge0j1iOLgPM6Md9ExhgtwoL8Xwt9NzbNmYdjlCJKX/0lQ8b+UsDw4RHz1KkUu2 +prttuqEoKStlUh3s/CvUnj4FdkiY0kY60DQO6kBILZC2KITZrwfozZsCgYEA5I8u +f2TLciH/9SPrCMkmdQ2ADlRiYEppKD1B7SdMLeEvfZEPc+KBQN4QEjlNY0KgIa5B +my9YNfObvzv2rQ5DC2GUCDry8iFp/Qm4vp89GawTpadQ239gqEksi6VvmIGvSK/a +I0dmv2VtSYvHjGpPszh3L/Ao15julvlyWO5FUkUCgYEA5qjSTdac9dp2W5HVSKGe +SEcJi+RJn8JwZ1hETcYOUEpqn3aUgM5ztTF2COgBQr87PmQhCc75wk0rdOHnVsVT +uLtt7QZz8ascqxJLxWJNicgKHzlzP8Au5PluhZ21ZQIQO98gDLKUUpjYLqnpi9r6 +8+Lt6Elxtd1fFbKXEbh53j8CgYEAvXNdlGVFKca69kSehnrNcMsO8VHK9YvYXG1i +XyRqxcDZb5i1GVaP0uf+pExxNCy+tQxO/0YgNSBHYh1fHBUtR5rLbIMIa+F8jCzp +wMAXklcLNg1ZPYf446U70kkAFoawd2ykQQfnSBDXpNXDQ0W/6IuOZ+MGx6bRZb1A +pixis40CgYAqNumRtBkAin+rJ7/B755rugYtGLjVDrqqZR0et0rrZU4PyHEr4gA3 +AmWbnrBn6roSMFyRFoqUf1Rl0QyBWrplCtaLYJeEvxkaKgJa3gzyVEcZ1d+LQL1u +E+MGvD2I9SLDTSCaugUAtoKCA7oHwNINy2IXfAeFRN856FFxnjcmNg== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/khronos/cert.pem b/ansible/roles/docker_client/files/certs/khronos/cert.pem new file mode 100644 index 00000000..bfb96079 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/khronos/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxRMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIzMVoXDTI3MDUxNTIxNDIzMVowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3PDIwGrm8nz4t+bgZZHlAGQR +M5TbOGuTbNkX268+b2M4kBD4xOoWn/iQIjWOerGjAdCH0ACSOXwncVnep4y/oF6J +TMCkR1/hRT4Ey/4op0FJdOYKwK5rMaMwcEBNdSOxoZsqDscZocRYOFI7X+DHM0K4 +TSuqypAgx+B73vOrBSGYc2h0Lyn5LO7vflfCDsgX90/cJ77d1VFm2QvWfs79ih6p +c6wnIx3fOfI7kE47tc/HQWth4bUKkCWUYF/JgP1I5KvTIJFMjcF8Db9NeWxELvON +EeYC45Aw2vWxxJVCGChWHP8YS4r3PGM0nZOeP2IER66DPIcVMajdPNDjjWuGAQID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEAU7o5 +T05mZyC5Vo8nu1YKeYxItDlTodlHt4bt3st1g+l62kSnACo3Cw3UpwbP9bl+4ld+ +QBG9HwJv4GWdNOBetxfIZZKpciZUGU57L286zhQXxr54hRnBst3Hb+g9FoPKfF5z +AWmHqBElEkgAv99WIdKlhwmTBx98ZwF2wHwGu2p1AxwYm4g9egIbAar4aPfJu7xO +Gg1PXMriwEqRWSYbEnUW7cZ2h5wp2W/vpQo4MitL1RwUzRbs3SyUWwgyOR4Z6epk +eMGKjOWlrnfzBOp5aEJt5nSkf2B9FvFPuSmpKmCHc6SLYk0ZNQtlw2bj1R+1k/u1 +3dksn0PI1SDeNi4cQg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/khronos/key.pem b/ansible/roles/docker_client/files/certs/khronos/key.pem new file mode 100644 index 00000000..1af9ffad --- /dev/null +++ b/ansible/roles/docker_client/files/certs/khronos/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA3PDIwGrm8nz4t+bgZZHlAGQRM5TbOGuTbNkX268+b2M4kBD4 +xOoWn/iQIjWOerGjAdCH0ACSOXwncVnep4y/oF6JTMCkR1/hRT4Ey/4op0FJdOYK +wK5rMaMwcEBNdSOxoZsqDscZocRYOFI7X+DHM0K4TSuqypAgx+B73vOrBSGYc2h0 +Lyn5LO7vflfCDsgX90/cJ77d1VFm2QvWfs79ih6pc6wnIx3fOfI7kE47tc/HQWth +4bUKkCWUYF/JgP1I5KvTIJFMjcF8Db9NeWxELvONEeYC45Aw2vWxxJVCGChWHP8Y +S4r3PGM0nZOeP2IER66DPIcVMajdPNDjjWuGAQIDAQABAoIBAQCztrfUrCAHBlhZ +XlcQx0d6teq4BlD656YXV87sRvOGLzMHHIb9xVt56/lxmha2/l8VTsXqD0zhrXd3 +4B4YM4h7reoIoHoABKF8bSIBvE7dHWHe0G/WowlWauMvq15V9h660Te4pYLX7pnD +1j+tkWz26UJ4oxhdhDbl3ldj1tg5ZsNYWaALf30O0FPBY6g37i1UJ+DObs5CFC4r +QK2YPkOGdilI138iI3TR04R8jAdS1m78DnARpjNry38iX5LclqNb88p0PRqRKmcb +ozwA8kbhUtNj91zD8Yx5yODvZN/lN18HMxC+T7bayzcYQ87GMBRzJ9p4k7Kn25sc +5oTsMzGxAoGBAPMUlo96UOgJUuxjcNuHZ1Riw6Dnv/SHEXfCLzod6W+d69apfdvE +piA7Lr2G4lqy+KdwJi6GNloUiyVAhvrQqhh/LhxkzHSxSkyS5SbAWCzaoZMs3Xvu +3S1i9COO48XI+J6EfEs3itH0A7tkCXRYugQ3cIV0Jhz25mGcM9mrcDX7AoGBAOiu +9Kj17kB/bCFyf5YLaPXR9BwZ1D6b8F5Z7SVVy2SGBGCAEXsYiLtgsKztDAuVflmk +6Nyby88SyMWJiq2NYFj5D0TclIm6Dq+/peKMhYWXd5WNT6yb7DxK5YQuZ4PAnqr7 +cnRTSt0QRblnqoQ2dNpXJD5I717dsyiy/R0n2D8zAoGAF5E9b7e6NGUDByw0+vPL +zuCseuM9NyYJSYUoMwhHY6z5+pwAOdh9Cs7yHyn+tDB57zHuYcBdcKphO0FgVNz6 +QNnDzLjxIbc2runnEbTAV8jytWQ6LRPhmc7oQLJP3xM3F5dcvaYq7WQtIHP4frFD +h7kgTbAPKeu+NmSAmnAGtxUCgYEAlJL6iFSYsDhs3CKXU0Cv4Z8RZ0wa2WuLHUQA +iAFzYRmt+8xq3RX6YONJjPHD6jWnuyfwxMT6a5vL9U5w7YbDiaxm0oqikhrqu8/8 +5V7i9z2TTzwvLsFVI96TFUboNBmvdvAd0idJO0TjsJKAL/HxxWU1MEw0BHg9qPQ5 +aP+DNj8CgYBQTNXSxsGCSPcNDGNOhtY+71k+ceHfmMUcadgZhJuaCdiAnnVFwXvV +Z7QygH8dI2AnvnIORD6F6xRz5aJ7xKAYLbw2S+ixh+P5zGedY4zAuBDrt4PgEAx9 +kh96w2iQtJR5kSRz+WLDUqJJvZG5bP7f8nVfBRRRi/eT8myQLgZZQg== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/palantiri/cert.pem b/ansible/roles/docker_client/files/certs/palantiri/cert.pem new file mode 100644 index 00000000..ed05d911 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/palantiri/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxSMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIzMloXDTI3MDUxNTIxNDIzMlowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArnQ1Ax0MTsHG6T6xIjAKbbUd +AbvLtubOFVmKzBLCgZ2dlDMsepVkgtVyLRtwm4Q69gdlQvt+OyVbUnk+x89mNnRl +BFU+J1AHJRlA0lqRE6vgvVLjOlxbMtRx0Worl493PrkEfW7Rh/R72+Kt9Sht631P +AO13dFqTEt/FzLTHxyFMc831ooBaPUyauM4MW+gcrBWCh0XGiy0K8Y7J6aXoZfIz +CpKuZL1vz/ijyTmsUdLvCATaO+aVHMWDOL/QXJEM2AeFzd97YwDT7EHiyn8InUhB +duZfsUU3Pq8Gr7WnPDhgYQXmoFa3ZE3/2hUN/nF3OWbysN/+ZqiLMEE7ofITWwID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEALBuR +sz/X5YYfe50Dnd/4pxseE4UkVuQxAr23fSq/K4MjXy9M1HnBWAjfB/RifmuYs8iM +U3kaIz838FHlmF2AxPjNWztvjs6Hc7uB5YDR8uus4/Csrg9NRTKvgpC+jZQ8s5bP +ZZ4iEjkqhSx9YrI0H5P8YD+HTfNajl/vjHU/uSUbtAXifn9PEn3emRAxKV6J2w04 +mxM2phvNAEfgg1kerKInkByyVsCWcPtf8MGBKPPQEKnGMRFH3d+0/x5KLgPF1wPB +Uq7NASAeLxSKOpGodI8oapL7uNMWMpIuoh+388RjwYATISkfjpQFM7CveHkQqqWy +nZKNjmYVPQ9up3nhZA== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/palantiri/key.pem b/ansible/roles/docker_client/files/certs/palantiri/key.pem new file mode 100644 index 00000000..2f4b98fc --- /dev/null +++ b/ansible/roles/docker_client/files/certs/palantiri/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEArnQ1Ax0MTsHG6T6xIjAKbbUdAbvLtubOFVmKzBLCgZ2dlDMs +epVkgtVyLRtwm4Q69gdlQvt+OyVbUnk+x89mNnRlBFU+J1AHJRlA0lqRE6vgvVLj +OlxbMtRx0Worl493PrkEfW7Rh/R72+Kt9Sht631PAO13dFqTEt/FzLTHxyFMc831 +ooBaPUyauM4MW+gcrBWCh0XGiy0K8Y7J6aXoZfIzCpKuZL1vz/ijyTmsUdLvCATa +O+aVHMWDOL/QXJEM2AeFzd97YwDT7EHiyn8InUhBduZfsUU3Pq8Gr7WnPDhgYQXm +oFa3ZE3/2hUN/nF3OWbysN/+ZqiLMEE7ofITWwIDAQABAoIBACtkUi9FEWOhhUVM +x6hWRLHHA8EDogcLX6Ul0YUzAVhWCPTsyZpwAEEesDiSA69a84NwkXpYrawufYmF +CN2QM79Fsj6iqUQD/tL2dg4cYst+y/arYEOK51A8UVciTldzYtiiEoDUiJ+Fay1l +M5mwBa82SulTJ4w0XzKpoDxrlulrcMK2ADlTEfVuv98fNFCmlbBuHd3HyL6wo5tN +fm6RpKhIxskx9A3M2do1T93JwgoKKBAUv+FfRef0nLSKd5KQP2WWutab61aNuIMO +Q5HKAvmIUPaa5eMcYS+aVhRr78KZV4p8e//L0hmaIWerh7j2TV6oCsbGbeBp4xaL +yMQz3aECgYEA1dNl5IFedOZSsYZiKgq0al/TxFL8EIEZDoNbBL2gdesMrXc6ibmF +v6tXVuw+JmRtvDj/eJVmCax6Sa7YPOLq0HrnsatmxiWLbuDZqnjRq3w6v4iR8JUO +PROAFzMmq7A/9nV/Sp1Hsy5JyB9u/FStAGA3o5d2NBSKHfyab0cPLdECgYEA0NzT +YI7W/xJWD3yXxXxkG7jrcRizVOo5I20B0XOyGXvE6wMB//SXOW5wXRmjsLWwK6DC +Ipg9b0rgoKcEpLfjrDLQc3vFfQaqGSHaOas3KMb7iKNXvW2/pjpkklCyCEZVslYg +wfsthC5JRUD8jD7FmwA8TjkB8Qj0fM4nrj/lXWsCgYBdS86yhCvlj7VXFg5wz5jm +rQcmKxsmTF3VckTdbau8p6ExcstM2nDcL3N9TrjTYZBrxubZAz6xS9tTmQFql4vj +CS1c3S+Ll0geSedPNUYur5amiX073p2G0qJZ/QyLbuyJqbyvnq5xJOo2clinqn6N +om3EaEq/8JJPAS+VDAk4cQKBgAsfV1pTAPwAW/jjJ6nqn+IiDNeu+w3Qk05hpbEj +uh5FSw1A0cBxpAP4757lmKP2aPVJ0EzLUqo0kJW9jy++Yy9uXU9pDfFEg5ZnCRn9 +Fijjv81c9XUu/P+7KFPqXDC4niOrFn9+AsSBOoGnnktRpHBGqO95dbMCSRLEX7SV +zn8VAoGBAJ8QspHft1KN1bAqFetHAgvZwtjc94MJRvaTBVu7wf7dEAX6U64s0k8F +PT+q1fINa2fab8u1lweemEIehc8XXO3eiMFDjt3/ytqT1weI7F5x+OYJg7/tRdP3 +jrQ0xTMaXXoJRqN7qbR7yCi/kgCKrZKW/ez56r04ExW4O52Vg8LO +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/sauron/cert.pem b/ansible/roles/docker_client/files/certs/sauron/cert.pem new file mode 100644 index 00000000..f73a3e1d --- /dev/null +++ b/ansible/roles/docker_client/files/certs/sauron/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxTMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIzM1oXDTI3MDUxNTIxNDIzM1owETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5dj5OU4OGn99HEDBEYyp/JlV +3J3bcI+sLeJWQ2F39Kk0T7rrQkX6g5OnAlTqOI3v6oWZ/90QrduPqeGHxoNipzs1 +w2SYg2A0bHTKafvXRjdFOkcQbmHKn/gEWuoTMvNzUl0Dr793z+o/KUghMiTff+1c +K7RPIAyw0euIh3QyEg0KY8+VO46hiDGZyaD/J6mckKWmpt3R0WiPsT+cZsREXEXZ +i/j8YwvPtV0GT5BVKPOfKyblxzVDGVzoplwnivBebLyi8jzbxXArKLFePtPCCtjb +WvJa2gn2cBmiDCW6nAa/uzx3cp9S1kOth+3O55Hu1IsaacE0+aE0RizuUL1JEQID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEACM9z +Cq8BK5B7ZBWCqKq1xQjlDxoppvFWt53qk+/4UZttRThhL4EeUHA8TzrE1RlK2LP2 +rMN2WurvVJWdTyurfaYxV9xEe5d2h9jviQDEPhw2cTIpw6ieWCIm8YcNdwDcN8Cr +oLYFWFps406uPW7TjisigH0ddTz35Qdlr0nDplTsHnHinnii4YDMhyChI+2+iNNW +4VabYJjJfSD91f3hsTc+phQCksj2fE4/cYdCjSJFCIl6Nv8Z8aWEvlQQxPdNnzJ9 +pOEgiETEPAAvh/yKkUo8EpdH4KDIEyORIxO3/XRqEEATLY4+AdK5/3NySOHGUwCJ +nGNFkC3b8TWpX5F6MA== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/sauron/key.pem b/ansible/roles/docker_client/files/certs/sauron/key.pem new file mode 100644 index 00000000..83a8d899 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/sauron/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA5dj5OU4OGn99HEDBEYyp/JlV3J3bcI+sLeJWQ2F39Kk0T7rr +QkX6g5OnAlTqOI3v6oWZ/90QrduPqeGHxoNipzs1w2SYg2A0bHTKafvXRjdFOkcQ +bmHKn/gEWuoTMvNzUl0Dr793z+o/KUghMiTff+1cK7RPIAyw0euIh3QyEg0KY8+V +O46hiDGZyaD/J6mckKWmpt3R0WiPsT+cZsREXEXZi/j8YwvPtV0GT5BVKPOfKybl +xzVDGVzoplwnivBebLyi8jzbxXArKLFePtPCCtjbWvJa2gn2cBmiDCW6nAa/uzx3 +cp9S1kOth+3O55Hu1IsaacE0+aE0RizuUL1JEQIDAQABAoIBAGQ3cZsx1GZrVgKe +nU/olCmGNfDGh2qRylILUO96LRJpQFzPNEqmNm9uTrBuk91qzcPk0tvD6MTVadN5 +eXjtynL+FxgF4ZXOuazcU3FYlGmSKcYaPCjUzMNpIYT+cLaTfaHY2oWjpzrpePan +84lomGav+LM5oBJqViKBXvsoh7hkUazS0UK77NbaDxjMZT0As5EKaVmAXYxohsYJ +VdL48YRLW7o3kkxhDKS4UJYyy+UByDmJ5j153npg3FX5g21UWukw7bkDnJzVzpnI +TRo93KvUywXkuaDVOS4EIh5oxkLIbIrLsV3X0VdX6FZ9O3SXFuJCjkBjtSY1+tsM +gq1EBgECgYEA9E64364je0mcJM/g3B0YkkUW3L9mR8Q/hF4D4na8w7G9UXUn8Hje +/+zOzuQn89nLb1+dw6eNw2BYj1Ol6hda3beiN2+fyg+l7Iz+uTR5RLW59r7z0WTU +YOLyjcuu7PGaUB2jNf0y6XQDgNtrAEP2Misc1lClRZrInodXnn0xZTECgYEA8NkW +MFZ/u1TI6s3WsexCW7Tw9y7hd/coGhfhN0vlT6ZlO8Eas6uThBwdibPIeOQHkwVP +dT/uogGSaZW+Wmq+WBnUTvxTbjcrn6nV+Hg5ukzpAbOfAPSR8PBTHF0bSEyAYm9l +Rlq5cD6ug9KCadpw1myooOdTMpJgWOMz+UtmqeECgYEAoFAsFnORyuwRr0JN3oKl +Rg5aIz7hogRQXc7Ho8qgsZWnkpVbQPw/8RZ22uJyqRxo10uaHfKMHwHQ+zQ7snHi +6Yz/wHraEuDT03buC6A4V3McdaL2IZiVmII0AgH/ZnJf/WSFXf8pcnHzfpH9E+6E +1oTd5nCDEzqvnP9Y6qJiUHECgYBBxmei59yWYn9EcNaTXLx6ox2zw9/bjWosRh1B +pr6HVCANg50xOEhfKfZ/JtQ2/795zVd34+/d2qQLpdOIieJV2Sf8Mjjuvq5xOkav +RtYtJNf1mQJxlllM/bUU5/CNfCcLuvRjlF9UYiny4kivc/Kg/KfI2aQFCai6XcJR +8N+mwQKBgApvV7LQMq7t7AizlnjcI8/9k8iBQzB45+MJ7w1HX/Ftzd9HUMJhyPKj +K7UqKb0LYD1zK4h+f+64WWd8DO3D6dVeKDoJUNrZPcobUCTfj4MfxHmtVOrelLy9 +TFzgoOKof7xaQiu5yfv3ugSk73WePQC4JC40dHSbwuIrg8xkg3pE +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/swarm-manager/cert.pem b/ansible/roles/docker_client/files/certs/swarm-manager/cert.pem new file mode 100644 index 00000000..8e5606b3 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/swarm-manager/cert.pem @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDaTCCAlGgAwIBAgIJANNqTOArCKxVMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE3 +MDUxNzIxNDIzNFoXDTI3MDUxNTIxNDIzNFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAq6H2/VkzMgBu05RvyaF8cOWG +j+bFl5bfZx5FGbi0+v/kxP6BABcC2jxR9+JQ7jf+CNaOmkKzOXTvBQrG5inlKdN/ +FOp9YA/7AkqaxqGuQMg3Fxa5497qaZskbr6r0XP9xytJwAkeLCubGsJj5nCPl5HP +uThkGhRdOlWsz1JgEgfN+2tUslsKYYOnQCaZH4VH0YvMB/UP91pq7HKiLkO5Zu6H +1rj/l0vZxZZZPvPb2qP9YM6/t9IfA6aqNT8BsbJJCEGeiWZTN6hiXy60aKTm97T4 +AB0cGEBAu+CsDHf8DTdkvouZLmLSV3SQl/0XnPyVsimmzcYQryMERmZ0RUhAVQID +AQABo0QwQjAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwEwIQYDVR0RBBow +GIcEfwAAAYIJbG9jYWxob3N0ggVzd2FybTANBgkqhkiG9w0BAQsFAAOCAQEAcd8N +FazFT6aQ9Sqxj6dYPzVfTQFMw1o32pNjfY0mzCf3AHuzCeIBr4Xd5LrpT9pmPfqt +2Dq2JQZ6UgxIM+vaXN7WljdfuMLXDXimHglJ6SXgdO+dpcUMiwen7xsSPwQesVqZ +HJs5IzwexdvzDulmD9JcCbEbQwKXOfyQputyQwH3u6Z1EofD4vO6xQxMJ/j7mPyb +uX2gQ7M61lCocu7/+NhESn0uhiuuube9XWx507kBzEfJSzzcLbcshQrLeoHt5dIW +AaPvl+rpV63fJJZGKWoQh29xK1FPKn48b3zLnDESehGQz1um9H6ijeYqRBBbq4ag +75yBdFobnROjwaV6wA== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/swarm-manager/key.pem b/ansible/roles/docker_client/files/certs/swarm-manager/key.pem new file mode 100644 index 00000000..4ff3da4a --- /dev/null +++ b/ansible/roles/docker_client/files/certs/swarm-manager/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAq6H2/VkzMgBu05RvyaF8cOWGj+bFl5bfZx5FGbi0+v/kxP6B +ABcC2jxR9+JQ7jf+CNaOmkKzOXTvBQrG5inlKdN/FOp9YA/7AkqaxqGuQMg3Fxa5 +497qaZskbr6r0XP9xytJwAkeLCubGsJj5nCPl5HPuThkGhRdOlWsz1JgEgfN+2tU +slsKYYOnQCaZH4VH0YvMB/UP91pq7HKiLkO5Zu6H1rj/l0vZxZZZPvPb2qP9YM6/ +t9IfA6aqNT8BsbJJCEGeiWZTN6hiXy60aKTm97T4AB0cGEBAu+CsDHf8DTdkvouZ +LmLSV3SQl/0XnPyVsimmzcYQryMERmZ0RUhAVQIDAQABAoIBAGFBwWUk88bEyghp +F8OJF6wd1Fec7KEO5H9ZfsyphxyofulGUrFLWNlzzvsJqtSis/gsJNizLT5EzeDs +j3MDkJFYAK5Ihk02IQXWYshl/AlHqzgRL7s5qVm4iTecIl7pPIhtNjh0sj1TZGwQ +dW2yoRvBi5UoqEjC+2VAAVHmGvx7s/mVrTGJVdUL0jWU8eXfxBLOi9Re+twk2oyT +NvmpBWtNCejAIZ+++WINEWgRbtNLQKMtH6mrQ4nDGVDxe4LG1gNysMGw5WDg91RD +LGfp19tv+oKpys2eDaou0Yk/v6pEZi7mGbigtPsT3DcpB+tKcpo3TosXX8ngqUgC +gTVs+AECgYEA2aPq68iIpT6t67habcjPOZPmWu56LN9uvVT1VBjL1iSyT4Uc2yCh +WuTbjkLexgHHpQLZRiLmofUtT/dtGByr1LALgYWOVQfI1WclXOA7aI1Tf9KWVaR7 +nZE+pA+wAxtG3TyYaFLjY9zEF/d0BBvvdajXaM6u8zsKPDkrVdqyIlUCgYEAyeIm +njRpYOhlJBYxffNNx5ReJx7EwU7c6iBoX3TBd59/7OuYJEV4eevcAIuhq4Lw38qt +H3rGE51wS0aP6ziRkIsiiqUacdwWqzctMJryW1qTSA65hWrHtbqT1S7BnEUpv5AJ +wWuw3CJL4z19TT2vvbYa1bCTOXy5Z0HEWBgHpgECgYEAnuMpbUswXxr8U9t5wS5a +Atnz0SzK6xmPOg/dLrEPlt2hg6ZVY7BVvAraMuikxH+mJdSHlMmEAHcCxp+G+JRt +6c9AByXevDr7prgvsbdOB80tSWMPN6TmTiksRw/g2tO+D2PejEL/GPeh/TE9dIaE +cix6XlyNoqrWlRLTjVEkvSUCgYBOjGJQLb5OiT1I9CWfhrndjFmxyOPu/psvsRpL +AaeAvptMeEIC4tDER0ObJrjpfZHw0O/ZDrDg0uHtN/ApKiJg5fGzlhea83ISfe5k +OfVQHAawA6KlSW2YxJZeXMnR3osm0IkrK5ZtoB58tOD626R0INWCDSjLNj8A8PLO +/9KCAQKBgEsgo1IIRlOLpwScr8cWsYB4KR7Z+qtndJwozzkiX8YCJVyJQs1EaxcB +t5b9001bQ6iSuOXGksTAI4dSw2JI0JZsDPinS2iQEAGIHjsbwd3yl4NUS5q4RRwA +f+A6yeXOMO6KDF/7cFLECzZXCUKN8Jv0Phe6SCawY/r60sXlfsz1 +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/scripts/genClientCert.sh b/ansible/roles/docker_client/scripts/genClientCert.sh new file mode 100755 index 00000000..cf9047e4 --- /dev/null +++ b/ansible/roles/docker_client/scripts/genClientCert.sh @@ -0,0 +1,56 @@ +#!/bin/bash +if [[ $1 = '' ]]; then + echo 'script requires a client name' + exit 1 +fi + +echo 'WARN: hard coded alpha-api-old gamma-services and beta-services for SWARM' +if [[ $2 = '' ]]; then + echo 'script requires a path for secrets' + exit 1 +fi + +CERT_PATH=$2 +if [ ! -d "$CERT_PATH" ]; then + echo 'The specified directory for certs does not exist' +fi + +CLIENT=./$CERT_PATH/$1 +mkdir $CLIENT + +# generate key for client +openssl genrsa -out "$CLIENT/key.pem" 2048 +chmod 400 "$CLIENT/key.pem" + +# generate CSR for client +openssl req \ + -subj '/CN=client' \ + -new \ + -key "$CLIENT/key.pem" \ + -out "$CLIENT/client.csr" + +chmod 400 "$CLIENT/client.csr" + +echo extendedKeyUsage=clientAuth,serverAuth > "$CLIENT/extfile.cnf" +echo subjectAltName=IP:127.0.0.1,DNS:localhost,DNS:swarm >> "$CLIENT/extfile.cnf" + +# generate cert for client +openssl x509 \ + -req \ + -days 3650 \ + -sha256 \ + -in "$CLIENT/client.csr" \ + -CA $CERT_PATH/ca.pem \ + -CAkey $CERT_PATH/ca-key.pem \ + -passin file:$CERT_PATH/pass \ + -CAcreateserial \ + -out "$CLIENT/cert.pem" \ + -extfile "$CLIENT/extfile.cnf" + +# set permissions for deploy +chmod 644 "$CLIENT/cert.pem" +chmod 644 "$CLIENT/key.pem" + +# cleanup files we do not need +rm $CLIENT/extfile.cnf +rm $CLIENT/client.csr diff --git a/ansible/roles/docker_client/tasks/main.yml b/ansible/roles/docker_client/tasks/main.yml new file mode 100644 index 00000000..ffe5f0a8 --- /dev/null +++ b/ansible/roles/docker_client/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- set_fact: + ca_data: "{{ lookup('file', '{{ docker_client_root }}/ca.pem') }}" + cert_data: "{{ lookup('file', '{{ docker_client_root }}/{{ name }}/cert.pem') }}" + key_data: "{{ lookup('file', '{{ docker_client_root }}/{{ name }}/key.pem') }}" + +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create docker ssl cert config maps + template: + dest: "{{ config_maps_path }}/{{ name }}-docker-ssl-certs.yml" + src: certs.yml diff --git a/ansible/roles/docker_client/templates/certs.yml b/ansible/roles/docker_client/templates/certs.yml new file mode 100644 index 00000000..228710b4 --- /dev/null +++ b/ansible/roles/docker_client/templates/certs.yml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-docker-ssl-certs +data: + ca.pem: | + {{ ca_data | indent(4) }} + cert.pem: | + {{ cert_data | indent(4) }} + key.pem: | + {{ key_data | indent(4) }} diff --git a/ansible/roles/docker_squash/tasks/main.yml b/ansible/roles/docker_squash/tasks/main.yml new file mode 100644 index 00000000..db727e42 --- /dev/null +++ b/ansible/roles/docker_squash/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Check to see if pip is already installed. + become: true + delegate_to: "{{ builder }}" + command: "pip --version" + ignore_errors: true + changed_when: false # read-only task + check_mode: no + register: pip_is_installed + +- name: Download pip. + become: true + delegate_to: "{{ builder }}" + get_url: url=https://bootstrap.pypa.io/get-pip.py dest=./ + when: pip_is_installed.rc != 0 + +- name: Install pip. + delegate_to: "{{ builder }}" + command: "python ./get-pip.py" + become: true + when: pip_is_installed.rc != 0 + +- name: Install docker-squash + delegate_to: "{{ builder }}" + become: true + pip: + name: docker-squash + executable: pip diff --git a/ansible/roles/docks-psad/README.md b/ansible/roles/docks-psad/README.md new file mode 100644 index 00000000..e54c80b2 --- /dev/null +++ b/ansible/roles/docks-psad/README.md @@ -0,0 +1,6 @@ +iptables +=========== + +This role installs and configure psad and syscfg + +This role is to be run on docks to effectivly help limit ratelimiting and stop containers from accessing things they shouldn't diff --git a/ansible/roles/docks-psad/defaults/main.yml b/ansible/roles/docks-psad/defaults/main.yml new file mode 100644 index 00000000..b08ddcc8 --- /dev/null +++ b/ansible/roles/docks-psad/defaults/main.yml @@ -0,0 +1,3 @@ +error_mail: ops@runnable.com + +psad_script_folder: /opt/runnable/psad diff --git a/ansible/roles/docks-psad/handlers/main.yml b/ansible/roles/docks-psad/handlers/main.yml new file mode 100644 index 00000000..2215c843 --- /dev/null +++ b/ansible/roles/docks-psad/handlers/main.yml @@ -0,0 +1,11 @@ +--- +- name: apply sysctl by service procps start + become: true + command: service procps start + +- name: restart psad + become: true + service: + name=psad + state=restarted + enabled=true diff --git a/ansible/roles/docks-psad/tasks/main.yml b/ansible/roles/docks-psad/tasks/main.yml new file mode 100644 index 00000000..b092936b --- /dev/null +++ b/ansible/roles/docks-psad/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: create folder + become: true + file: + path={{ psad_script_folder }} + state=directory + recurse=yes + +- name: create sysctl config file + become: true + tags: [ sysctl ] + template: + src=60-runnable_sysctl.conf.j2 + dest=/etc/sysctl.d/60-runnable_sysctl.conf + mode=644 + group=root + owner=root + notify: + - apply sysctl by service procps start + +- name: installing psad to latest version + tags: [ psad ] + become: true + apt: + pkg=psad + state=latest + update_cache=yes + cache_valid_time=3600 + notify: + - restart psad + +- name: psad signature update + become: true + shell: psad --sig-update && psad -H + register: command_result + failed_when: "'FAILED' in command_result.stderr" + + +- name: copy psad detected script + become: true + tags: [ psad, psad_script ] + template: + src=psad-script.sh + dest={{ psad_script_folder }}/psad-script.sh + mode=u+x + +- name: copy pasd config script + become: true + tags: [ psad, psad_config ] + template: + dest=/etc/psad/psad.conf + src=psad.conf.j2 + owner=root + mode=0644 + notify: + - restart psad diff --git a/ansible/roles/docks-psad/templates/60-runnable_sysctl.conf.j2 b/ansible/roles/docks-psad/templates/60-runnable_sysctl.conf.j2 new file mode 100644 index 00000000..0ba78208 --- /dev/null +++ b/ansible/roles/docks-psad/templates/60-runnable_sysctl.conf.j2 @@ -0,0 +1,37 @@ +# Disable ping +net.ipv4.icmp_echo_ignore_all=1 + +# Turn on Source Address Verification in all interfaces to prevent some +# spoofing attacks +net.ipv4.conf.default.rp_filter=1 +net.ipv4.conf.all.rp_filter=1 + +# Do not accept IP source route packets (we are not a router) +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_source_route=0 +net.ipv6.conf.default.accept_source_route=0 +net.ipv6.conf.all.accept_source_route=0 + +# Disable ICMP redirects. ICMP redirects are rarely used but can be used in +# MITM (man-in-the-middle) attacks. Disabling ICMP may disrupt legitimate +# traffic to those sites. +net.ipv4.conf.default.accept_redirects=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 + +# Ignore bogus ICMP errors +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_echo_ignore_all=0 + +# Do not log Martian Packets (impossible packets) +net.ipv4.conf.default.log_martians=0 +net.ipv4.conf.all.log_martians=0 + +# Change to 1 to enable TCP/IP SYN cookies This disables TCP Window Scaling +# (http://lkml.org/lkml/2008/2/5/167) +net.ipv4.tcp_syncookies=0 +net.ipv4.tcp_max_syn_backlog = 2048 +net.ipv4.tcp_synack_retries = 2 +net.ipv4.tcp_syn_retries = 5 diff --git a/ansible/roles/docks-psad/templates/psad-script.sh b/ansible/roles/docks-psad/templates/psad-script.sh new file mode 100644 index 00000000..4292aab9 --- /dev/null +++ b/ansible/roles/docks-psad/templates/psad-script.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +alerted_ipaddress="${1}" +echo "looking for container with ip = ${alerted_ipaddress}" + +for container_id in $(docker ps -qa); do + container_ipaddress="$(docker inspect --format "{{ '{{' }} .NetworkSettings.IPAddress {{ '}}' }}" ${container_id})" + echo "checking ${container_id}: ${container_ipaddress}" + + if [[ "${container_ipaddress}" == "${alerted_ipaddress}" ]]; then + psad_logs="" + psad_logs_files="$(ls /var/log/psad/$alerted_ipaddress/*_email_alert)" + echo "found container_id, getting logs for ${container_id}: ${container_ipaddress} from ${psad_logs_files}" + + for log_file in "${psad_logs_files}"; do + psad_logs="${psad_logs}$(sed '/Whois Information/,$d' ${log_file})" + done + + data_file=`tempfile` + echo "generating data file at ${data_file}" + echo '{' > ${data_file} + echo '"containerId": "'"${container_id}"'",' >> ${data_file} + echo '"hostnames": "'`hostname -I | cut -d' ' -f1`'",' >> ${data_file} + echo '"logs": "'${psad_logs}'"'>> ${data_file} + echo '}' >> ${data_file} + + echo "sending alert" `cat ${data_file}` + curl --header "Content-Type: application/json" \ + -X POST \ + --data "@${data_file}" \ + "http://{{ drake_hostname }}/psad" + + rm "${data_file}" + fi +done diff --git a/ansible/roles/docks-psad/templates/psad.conf.j2 b/ansible/roles/docks-psad/templates/psad.conf.j2 new file mode 100644 index 00000000..c4dbf69b --- /dev/null +++ b/ansible/roles/docks-psad/templates/psad.conf.j2 @@ -0,0 +1,582 @@ +### Supports multiple email addresses (as a comma separated +### list). +EMAIL_ADDRESSES {{ error_mail }}; + +### Machine hostname +HOSTNAME {{ ansible_hostname }}; + +### Specify the home and external networks. Note that by default the +### ENABLE_INTF_LOCAL_NETS is enabled, so psad automatically detects +### all of the directly connected subnets and uses this information as +### the HOME_NET variable. +HOME_NET any; +EXTERNAL_NET any; + +### The FW_SEARCH_ALL variable controls how psad will parse iptables +### messages. If it is set to "Y" then psad will parse all iptables +### messages for evidence of scan activity. If it is set to "N" then +### psad will only parse those iptables messages that contain logging +### prefixes specified by the FW_MSG_SEARCH variable below. Logging +### prefixes are set with the --log-prefix command line option to iptables. +### Setting FW_SEARCH_ALL to "N" is useful for having psad only analyze +### iptables messages that are logged out of a specific iptables chain +### (multiple strings can be searched for, see the comment above the +### FW_MSG_SEARCH variable below) or a specific logging rule for example. +### FW_SEARCH_ALL is set to "Y" by default since usually people want psad +### to parse all iptables messages. +FW_SEARCH_ALL Y; + +### The FW_MSG_SEARCH variable can be modified to look for logging messages +### that are specific to your firewall configuration (specified by the +### "--log-prefix" option. For example, if your firewall uses the +### string "Audit" for packets that have been blocked, then you could +### set FW_MSG_SEARCH to "Audit"; The default string to search for is +### "DROP". Both psad and kmsgsd reference this file. NOTE: You can +### specify this variable multiple times to have psad search for multiple +### strings. For example to have psad search for the strings "Audit" and +### "Reject", you would use the following two lines: +#FW_MSG_SEARCH Audit; +#FW_MSG_SEARCH REJECT; +FW_MSG_SEARCH DROP; + +### Set the type of syslog daemon that is used. The SYSLOG_DAEMON +### variable accepts four possible values: syslogd, syslog-ng, ulogd, +### or metalog. Note: this variable is only used if ENABLE_SYSLOG_FILE is +### disabled, and this in turn will mean that the legacy kmsgsd daemon will +### collect firewall logs from syslog via the old named pipe mechanism. +SYSLOG_DAEMON syslogd; + +### What type of interface configuration do you use? Set this variable to +### "iproute2" if you want to use the iproute2 type configuration. +### iproute2 does not use aliases for multi-homed interfaces and +### ifconfig does not show secondary addresses for multi-homed interfaces. +#IFCFGTYPE iproute2; +IFCFGTYPE ifconfig; + +### Danger levels. These represent the total number of +### packets required for a scan to reach each danger level. +### A scan may also reach a danger level if the scan trips +### a signature or if the scanning ip is listed in +### auto_ips so a danger level is automatically +### assigned. +DANGER_LEVEL1 5; ### Number of packets. +DANGER_LEVEL2 15; +DANGER_LEVEL3 150; +DANGER_LEVEL4 1500; +DANGER_LEVEL5 10000; + +### Set the interval (in seconds) psad will use to sleep before +### checking for new iptables log messages +CHECK_INTERVAL 5; + +### Search for snort "sid" values generated by fwsnort +### or snort2iptables +SNORT_SID_STR SID; + +### Set the minimum range of ports that must be scanned before +### psad will send an alert. The default is 1 so that at +### least two port must be scanned (p2-p1 >= 1). This can be set +### to 0 if you want psad to be extra paranoid, or 30000 if not. +PORT_RANGE_SCAN_THRESHOLD 5; + +### For IP protocol scan detection (nmap -sO). While it may be relatively +### common for a host to trigger on tcp, udp, and icmp, it is more unusual if +### a host triggers on, say, five different IP protocols +PROTOCOL_SCAN_THRESHOLD 5; + +### If "Y", means that scans will never timeout. This is useful +### for catching scans that take place over long periods of time +### where the attacker is trying to slip beneath the IDS thresholds. +ENABLE_PERSISTENCE Y; + +### This is used only if ENABLE_PERSISTENCE = "N"; +SCAN_TIMEOUT 3600; ### seconds + +### Specify how often to timeout old scan data relative to CHECK_INTERVAL +### iterations. This feature is only used if ENABLE_PERSISTENCE is disabled. +### Note that for psad processes that have tracked a lot of scans, it is +### advisable to leave this threshold at the default value of 5 or greater +### because the scan tracking hash may be quite large. +PERSISTENCE_CTR_THRESHOLD 5; + +### Limit the number of src->dst IP pairs that psad will track. The default +### is zero (i.e. unlimited), but if psad is running on a system with limited +### memory, this can be handy to restrict psad's memory usage. It is best to +### combine this option with disabling ENABLE_PERSISTENCE so that older scans +### are deleted and therefore newer scans will on average continue to be +### tracked. A good non-zero value is, say, 50000, but this will vary +### depending on available system memory. +MAX_SCAN_IP_PAIRS 0; + +### If "Y", means all signatures will be shown since +### the scan started instead of just the current ones. +SHOW_ALL_SIGNATURES N; + +### Allow reporting methods to be enabled/restricted. This keyword can +### accept values of "nosyslog" (don't write any messages to syslog), +### "noemail" (don't send any email messages), or "ALL" (to generate both +### syslog and email messages). "ALL" is the default. Both "nosyslog" +### and "noemail" can be combined with a comma to disable all logging +### and alerting. +ALERTING_METHODS noemail; + +### By default, psad acquires iptables log data from the /var/log/messages +### file which the local syslog daemon (usually) writes iptables log messages +### to. If the ENABLE_SYSLOG_FILE variable below is set to "N", then psad +### reconfigures syslog to write iptables log data to the +### /var/lib/psad/psadfifo fifo file where the messages are picked up by kmsgsd +### written to the file /var/log/psad/fwdata for analysis by psad. On some +### systems, having syslog communicate log data to kmsgsd can be problematic +### (syslog configs and external factors such as Apparmor and SELinux can play +### a role here), so leaving the ENABLE_SYSLOG_FILE variable set to "Y" is +### usually recommended. +ENABLE_SYSLOG_FILE Y; +IPT_WRITE_FWDATA Y; +IPT_SYSLOG_FILE /var/log/syslog; + +### When enabled, this instructs psad to write the "msg" field +### associated with Snort rule matches to syslog. +ENABLE_SIG_MSG_SYSLOG Y; +SIG_MSG_SYSLOG_THRESHOLD 10; +SIG_SID_SYSLOG_THRESHOLD 10; + +### TTL values are decremented depending on the number of hops +### the packet has taken before it hits the firewall. We will +### assume packets will not jump through more than 20 hops on +### average. +MAX_HOPS 20; + +### Do not include any timestamp included within kernel logging +### messages (Ubuntu systems commonly have this) +IGNORE_KERNEL_TIMESTAMP Y; + +### FIXME: try to mitigate the affects of the iptables connection +### tracking bug by ignoring tcp packets that have the ack bit set. +### Read the "BUGS" section of the psad man page. Note that +### if a packet matches a snort SID generated by fwsnort (see +### http://www.cipherdyne.org/fwsnort/) +### then psad will see it even if the ack bit is set. See the +### SNORT_SID_STR variable. +IGNORE_CONNTRACK_BUG_PKTS Y; + +### define a set of ports to ignore (this is useful particularly +### for port knocking applications since the knock sequence will +### look to psad like a scan). This variable may be defined as +### a comma-separated list of port numbers or port ranges and +### corresponding protocol, For example, to have psad ignore all +### tcp in the range 61000-61356 and udp ports 53 and 5000, use: +### IGNORE_PORTS tcp/61000-61356, udp/53, udp/5000; +IGNORE_PORTS NONE; + +### allow entire protocols to be ignored. This keyword can accept +### a comma separated list of protocols. Each protocol must match +### the protocol that is specified in an iptables log message (case +### insensitively, so both "TCP" or "tcp" is ok). +### IGNORE_PROTOCOL tcp,udp; +IGNORE_PROTOCOLS NONE; + +### allow packets to be ignored based on interface (this is the +### "IN" interface in iptables logging messages). +IGNORE_INTERFACES NONE; + +### Ignore these specific logging prefixes +IGNORE_LOG_PREFIXES NONE; + +### Minimum danger level a scan must reach before any logging or +### alerting is done. The EMAIL_ALERT_DANGER_LEVEL variable below +### only refers to email alerts; the MIN_DANGER_LEVEL variable +### applies to everything from email alerts to whether or not the +### IP directory is created within /var/log/psad/. Hence +### MIN_DANGER_LEVEL should be set less than or equal to the value +### assigned to the EMAIL_ALERT_DANGER_LEVEL variable. +MIN_DANGER_LEVEL 1; + +### Only send email alert if danger level >= to this value. +EMAIL_ALERT_DANGER_LEVEL 1; + +### Enable detection of malicious activity that is delivered via IPv6. If +### ip6tables is not logging any traffic, then psad won't know anything +### about IPv6, or this variable can be set to "N" (this would be slightly +### faster if ip6tables isn't logging anything). +ENABLE_IPV6_DETECTION Y; + +### Treat all subnets on local interfaces as part of HOME_NET (this +### means that these networks do not have to be manually defined) +ENABLE_INTF_LOCAL_NETS Y; + +### Include MAC addresses in email alert +ENABLE_MAC_ADDR_REPORTING N; + +### Look for the iptables logging rule (fwcheck_psad is executed) +ENABLE_FW_LOGGING_CHECK Y; + +### Send no more than this number of emails for a single +### scanning source IP. Note that enabling this feature may cause +### alerts for real attacks to not be generated if an attack is sent +### after the email threshold has been reached for an IP address. +### This is why the default is set to "0". +EMAIL_LIMIT 0; + +### By default, psad maintains a counter for each scanning source address, +### but by enabling this variable psad will maintain email counters for +### each victim address that is scanned as well. +ENABLE_EMAIL_LIMIT_PER_DST N; + +### If "Y", send a status email message when an IP has reached the +### EMAIL_LIMIT threshold. +EMAIL_LIMIT_STATUS_MSG Y; + +### This variable is used to have psad throttle the email alerts it sends, +### and implemented as a per-IP threshold. That is, if EMAIL_THROTTLE +### is set to "10", then psad will only send 1/10th as many emails for each +### scanning IP as it would have normally. All other variables also apply, +### so this throttle value is taken into account after everything else. The +### default of zero means to not apply any throttling. +EMAIL_THROTTLE 0; + +### If "Y", send email for all newly logged packets from the same +### source ip instead of just when a danger level increases. +ALERT_ALL Y; + +### If "Y", then psad will import old scan source ip directories +### as current scans instead of moving the directories into the +### archive directory. +IMPORT_OLD_SCANS N; + +### syslog facility and priority (the defaults are usually ok) +### The SYSLOG_FACILITY variable can be set to one of LOG_LOCAL{0-7}, and +### SYSLOG_PRIORITY can be set to one of LOG_INFO, LOG_DEBUG, LOG_NOTICE, +### LOG_WARNING, LOG_ERR, LOG_CRIT, LOG_ALERT, or LOG_EMERG +SYSLOG_IDENTITY psad; +SYSLOG_FACILITY LOG_LOCAL7; +SYSLOG_PRIORITY LOG_INFO; + +### Port thresholds for logging and -S and -A output. +TOP_PORTS_LOG_THRESHOLD 500; +STATUS_PORTS_THRESHOLD 20; + +### Signature thresholds for logging and -S and -A output. +TOP_SIGS_LOG_THRESHOLD 500; +STATUS_SIGS_THRESHOLD 50; + +### Attackers thresholds for logging and -S and -A output. +TOP_IP_LOG_THRESHOLD 500; +STATUS_IP_THRESHOLD 25; + +### Specify how often to log the TOP_* information (i.e. how many +### CHECK_INTERVAL iterations before the data is logged again). +TOP_SCANS_CTR_THRESHOLD 1; + +### Send scan logs to dshield.org. This is disabled by default, +### but is a good idea to enable it (subject to your site security +### policy) since the DShield service helps to track the bad guys. +### For more information visit http://www.dshield.org +ENABLE_DSHIELD_ALERTS N; + +### dshield.org alert email address; this should not be changed +### unless the guys at DShield have changed it. +DSHIELD_ALERT_EMAIL reports@dshield.org; + +### Time interval (hours) to send email alerts to dshield.org. +### The default is 6 hours, and cannot be less than 1 hour or +### more than 24 hours. +DSHIELD_ALERT_INTERVAL 6; ### hours + +### If you have a DShield user id you can set it here. The +### default is "0". +DSHIELD_USER_ID 0; + +### If you want the outbound DShield email to appear as though it +### is coming from a particular user address then set it here. +DSHIELD_USER_EMAIL NONE; + +### Threshold danger level for DShield data; a scan must reach this +### danger level before associated packets will be included in an +### alert to DShield. Note that zero is the default since this +### will allow DShield to apply its own logic to determine what +### constitutes a scan (_all_ iptables log messages will be included +### in DShield email alerts). +DSHIELD_DL_THRESHOLD 0; + +### List of servers. Fwsnort supports the same variable resolution as +#### Snort. +HTTP_SERVERS $HOME_NET; +SMTP_SERVERS $HOME_NET; +DNS_SERVERS $HOME_NET; +SQL_SERVERS $HOME_NET; +TELNET_SERVERS $HOME_NET; + +#### AOL AIM server nets +AIM_SERVERS [64.12.24.0/24, 64.12.25.0/24, 64.12.26.14/24, 64.12.28.0/24, 64.12.29.0/24, 64.12.161.0/24, 64.12.163.0/24, 205.188.5.0/24, 205.188.9.0/24]; + +### Configurable port numbers +HTTP_PORTS 80; +SHELLCODE_PORTS !80; +ORACLE_PORTS 1521; + +### If this is enabled, then psad will die if a rule in the +### /etc/psad/signatures file contains an unsupported option (otherwise +### a syslog warning will be generated). +ENABLE_SNORT_SIG_STRICT Y; + +### If "Y", enable automated IDS response (auto manages +### firewall rulesets). +ENABLE_AUTO_IDS N; + +### Block all traffic from offending IP if danger +### level >= to this value +AUTO_IDS_DANGER_LEVEL 5; + +### Set the auto-blocked timeout in seconds (the default +### is one hour). +AUTO_BLOCK_TIMEOUT 3600; + +### Set the auto-blocked timeout in seconds for each danger +### level - zero means to block permanently. Each of these +### can be set independently +AUTO_BLOCK_DL1_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL2_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL3_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL4_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL5_TIMEOUT 0; ### permanent + +### Enable regex checking on log prefixes for active response +ENABLE_AUTO_IDS_REGEX N; + +### Only block if the iptables log message matches the following regex +AUTO_BLOCK_REGEX ESTAB; ### from fwsnort logging prefixes + +### Control whether "renew" auto-block emails get sent. This is disabled +### by default because lots of IPs could have been blocked, and psad +### should not generate a renew email for each of them. +ENABLE_RENEW_BLOCK_EMAILS N; + +### By setting this variable to N, all auto-blocking emails can be +### suppressed. +ENABLE_AUTO_IDS_EMAILS Y; + +### Enable iptables blocking (only gets enabled if +### ENABLE_AUTO_IDS is also set) +IPTABLES_BLOCK_METHOD Y; + +### Specify chain names to which iptables blocking rules will be +### added with the IPT_AUTO_CHAIN{n} keyword. There is no limit on the +### number of IPT_AUTO_CHAIN{n} keywords; just increment the {n} number +### to add an additional IPT_AUTO_CHAIN requirement. The format for this +### variable is: ,,,,, \ +### ,. +### "Target": Can be any legitimate iptables target, but should usually +### just be "DROP". +### "Direction": Can be "src", "dst", or "both", which correspond to the +### INPUT, OUTPUT, and FORWARD chains. +### "Table": Can be any iptables table, but the default is "filter". +### "From_chain": Is the chain from which packets will be jumped. +### "Jump_rule_position": Defines the position within the From_chain where +### the jump rule is added. +### "To_chain": Is the chain to which packets will be jumped. This is the +### main chain where psad rules are added. +### "Rule_position": Defines the position where rule are added within the +### To_chain. +### +### The following defaults make sense for most installations, but note +### it is possible to include blocking rules in, say, the "nat" table +### using this functionality as well. The following three lines provide +### usage examples: +#IPT_AUTO_CHAIN1 DROP, src, filter, INPUT, 1, PSAD_BLOCK_INPUT, 1; +#IPT_AUTO_CHAIN2 DROP, dst, filter, OUTPUT, 1, PSAD_BLOCK_OUTPUT, 1; +#IPT_AUTO_CHAIN3 DROP, both, filter, FORWARD, 1, PSAD_BLOCK_FORWARD, 1; +IPT_AUTO_CHAIN1 DROP, src, filter, INPUT, 1, PSAD_BLOCK_INPUT, 1; +IPT_AUTO_CHAIN2 DROP, dst, filter, OUTPUT, 1, PSAD_BLOCK_OUTPUT, 1; +IPT_AUTO_CHAIN3 DROP, both, filter, FORWARD, 1, PSAD_BLOCK_FORWARD, 1; + +### Flush all existing rules in the psad chains at psad start time. +FLUSH_IPT_AT_INIT Y; + +### Prerequisite check for existence of psad chains and jump rules +IPTABLES_PREREQ_CHECK 1; + +### Enable tcp wrappers blocking (only gets enabled if +### ENABLE_AUTO_IDS is also set) +TCPWRAPPERS_BLOCK_METHOD N; + +### Set the whois timeout +WHOIS_TIMEOUT 60; ### seconds + +### Set the number of times an ip can be seen before another whois +### lookup is issued. +WHOIS_LOOKUP_THRESHOLD 20; + +### Use this option to force all whois information to contain ascii-only data. +### Sometime whois information for IP addresses in China and other countries +### can contain non-ascii data. If this option is enabled, then any non- +### ascii characters will be replaced with "NA". +ENABLE_WHOIS_FORCE_ASCII N; + +### This variable forces all whois lookups to be done against the source IP +### even when they are associated with a directly connected local network. IT +### is usually a good idea to leave this setting as the default of 'N'. +ENABLE_WHOIS_FORCE_SRC_IP N; + +### Set the number of times an ip can be seen before another dns +### lookup is issued. +DNS_LOOKUP_THRESHOLD 20; + +### Enable psad to run an external script or program (use at your +### own risk!) +ENABLE_EXT_SCRIPT_EXEC Y; + +### Define an external program to run after a scan is caught. +### Note that the scan source ip can be specified on the command +### line to the external program through the use of the "SRCIP" +### string (along with some appropriate switch for the program). +### Of course this is only useful if the external program knows +### what to do with this information. +### Example: EXTERNAL_SCRIPT /path/to/script --ip SRCIP -v; +EXTERNAL_SCRIPT {{ psad_script_folder }}/psad-script.sh SRCIP >> {{ psad_script_folder }}/psad-script.log 2>&1; + +### Control execution of EXTERNAL_SCRIPT (only once per IP, or +### every time a scan is detected for an ip). +EXEC_EXT_SCRIPT_PER_ALERT Y; + +### Disk usage variables +DISK_CHECK_INTERVAL 300; ### seconds + +### This can be set to 0 to disable disk checking altogether +DISK_MAX_PERCENTAGE 95; + +### This can be set to 0 to have psad not place any limit on the +### number of times it will attempt to remove data from +### /var/log/psad/. +DISK_MAX_RM_RETRIES 10; + +### Enable archiving of old scan directories at psad startup. +ENABLE_SCAN_ARCHIVE N; + +### Truncate fwdata file at startup +TRUNCATE_FWDATA Y; + +### Only archive scanning IP directories that have reached a danger +### level greater than or equal to this value. Archiving old +### scanning ip directories only takes place at psad startup. +MIN_ARCHIVE_DANGER_LEVEL 1; + +### Email subject line config. Change these prefixes if you want +### psad to generate email alerts that say something other than +### the following. +MAIL_ALERT_PREFIX [psad-alert]; +MAIL_STATUS_PREFIX [psad-status]; +MAIL_ERROR_PREFIX [psad-error]; +MAIL_FATAL_PREFIX [psad-fatal]; + +### URL for getting the latest psad signatures +SIG_UPDATE_URL http://www.cipherdyne.org/psad/signatures; + +### These next two are psadwatchd vars +PSADWATCHD_CHECK_INTERVAL 5; ### seconds +PSADWATCHD_MAX_RETRIES 10; + +### Directories +INSTALL_ROOT /; +PSAD_DIR $INSTALL_ROOT/var/log/psad; +PSAD_RUN_DIR $INSTALL_ROOT/var/run/psad; +PSAD_FIFO_DIR $INSTALL_ROOT/var/lib/psad; +PSAD_LIBS_DIR $INSTALL_ROOT/usr/lib/psad; +PSAD_CONF_DIR $INSTALL_ROOT/etc/psad; +PSAD_ERR_DIR $PSAD_DIR/errs; +CONF_ARCHIVE_DIR $PSAD_CONF_DIR/archive; +SCAN_DATA_ARCHIVE_DIR $PSAD_DIR/scan_archive; +ANALYSIS_MODE_DIR $PSAD_DIR/ipt_analysis; +SNORT_RULES_DIR $PSAD_CONF_DIR/snort_rules; +FWSNORT_RULES_DIR /etc/fwsnort/snort_rules; ### may not exist + +### Files +FW_DATA_FILE $PSAD_DIR/fwdata; +ULOG_DATA_FILE $PSAD_DIR/ulogd.log; +FW_CHECK_FILE $PSAD_DIR/fw_check; +DSHIELD_EMAIL_FILE $PSAD_DIR/dshield.email; +SIGS_FILE $PSAD_CONF_DIR/signatures; +PROTOCOLS_FILE $PSAD_CONF_DIR/protocols; +ICMP_TYPES_FILE $PSAD_CONF_DIR/icmp_types; +ICMP6_TYPES_FILE $PSAD_CONF_DIR/icmp6_types; +AUTO_DL_FILE $PSAD_CONF_DIR/auto_dl; +SNORT_RULE_DL_FILE $PSAD_CONF_DIR/snort_rule_dl; +POSF_FILE $PSAD_CONF_DIR/posf; +P0F_FILE $PSAD_CONF_DIR/pf.os; +IP_OPTS_FILE $PSAD_CONF_DIR/ip_options; +PSAD_FIFO_FILE $PSAD_FIFO_DIR/psadfifo; +ETC_HOSTS_DENY_FILE /etc/hosts.deny; +ETC_SYSLOG_CONF /etc/syslog.conf; +ETC_RSYSLOG_CONF /etc/rsyslog.conf; +ETC_SYSLOGNG_CONF /etc/syslog-ng/syslog-ng.conf; +ETC_METALOG_CONF /etc/metalog/metalog.conf; +STATUS_OUTPUT_FILE $PSAD_DIR/status.out; +ANALYSIS_OUTPUT_FILE $PSAD_DIR/analysis.out; +INSTALL_LOG_FILE $PSAD_DIR/install.log; + +### PID files +PSAD_PID_FILE $PSAD_RUN_DIR/psad.pid; +PSAD_CMDLINE_FILE $PSAD_RUN_DIR/psad.cmd; +KMSGSD_PID_FILE $PSAD_RUN_DIR/kmsgsd.pid; +PSADWATCHD_PID_FILE $PSAD_RUN_DIR/psadwatchd.pid; + +### List of ips that have been auto blocked by iptables +### or tcpwrappers (the auto blocking feature is disabled by +### default, see the psad man page and the ENABLE_AUTO_IDS +### variable). +AUTO_BLOCK_IPT_FILE $PSAD_DIR/auto_blocked_iptables; +AUTO_BLOCK_TCPWR_FILE $PSAD_DIR/auto_blocked_tcpwr; + +### File used internally by psad to add iptables blocking +### rules to a running psad process +AUTO_IPT_SOCK $PSAD_RUN_DIR/auto_ipt.sock; + +FW_ERROR_LOG $PSAD_ERR_DIR/fwerrorlog; +PRINT_SCAN_HASH $PSAD_DIR/scan_hash; + +### /proc interface for controlling ip forwarding +PROC_FORWARD_FILE /proc/sys/net/ipv4/ip_forward; + +### Packet counters for tcp, udp, and icmp protocols +PACKET_COUNTER_FILE $PSAD_DIR/packet_ctr; + +### Top scanned ports +TOP_SCANNED_PORTS_FILE $PSAD_DIR/top_ports; + +### Top signature matches +TOP_SIGS_FILE $PSAD_DIR/top_sigs; + +### Top attackers +TOP_ATTACKERS_FILE $PSAD_DIR/top_attackers; + +### Counter file for Dshield alerts +DSHIELD_COUNTER_FILE $PSAD_DIR/dshield_ctr; + +### Counter file for iptables prefixes +IPT_PREFIX_COUNTER_FILE $PSAD_DIR/ipt_prefix_ctr; + +### iptables command output and error collection files; these are +### used by IPTables::ChainMgr +IPT_OUTPUT_FILE $PSAD_DIR/psad.iptout; +IPT_ERROR_FILE $PSAD_DIR/psad.ipterr; + +### system binaries +iptablesCmd /sbin/iptables; +ip6tablesCmd /sbin/ip6tables; +shCmd /bin/sh; +wgetCmd /usr/bin/wget; +gzipCmd /bin/gzip; +mknodCmd /bin/mknod; +psCmd /bin/ps; +mailCmd /bin/mail; +sendmailCmd /usr/sbin/sendmail; +ifconfigCmd /sbin/ifconfig; +ipCmd /sbin/ip; +killallCmd /usr/bin/killall; +netstatCmd /bin/netstat; +unameCmd /bin/uname; +whoisCmd $INSTALL_ROOT/usr/bin/whois_psad; +dfCmd /bin/df; +fwcheck_psadCmd $INSTALL_ROOT/usr/sbin/fwcheck_psad; +psadwatchdCmd $INSTALL_ROOT/usr/sbin/psadwatchd; +kmsgsdCmd $INSTALL_ROOT/usr/sbin/kmsgsd; +psadCmd $INSTALL_ROOT/usr/sbin/psad; diff --git a/ansible/roles/git/README.md b/ansible/roles/git/README.md new file mode 100644 index 00000000..3c7dcd9f --- /dev/null +++ b/ansible/roles/git/README.md @@ -0,0 +1,26 @@ +Role Name +======== + +Ansible Role to Install Docker on CentOS 6.5 + +Role Variables +-------------- + +``` +docker_centos_packages: + - { package: "docker" } +``` + +Example Playbook +------------------------- + + - hosts: docker-servers + roles: + - { role: docker-centos, + tags: ["docker"] } + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/git/tasks/main.yml b/ansible/roles/git/tasks/main.yml new file mode 100644 index 00000000..16d60db9 --- /dev/null +++ b/ansible/roles/git/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: install git + apt: name=git state=present + become: true + when: "ansible_distribution == 'Ubuntu'" diff --git a/ansible/roles/git_repo/meta/main.yml b/ansible/roles/git_repo/meta/main.yml new file mode 100644 index 00000000..1a46c351 --- /dev/null +++ b/ansible/roles/git_repo/meta/main.yml @@ -0,0 +1,3 @@ +--- + dependencies: + - { role: git } diff --git a/ansible/roles/git_repo/tasks/main.yml b/ansible/roles/git_repo/tasks/main.yml new file mode 100644 index 00000000..39a370b0 --- /dev/null +++ b/ansible/roles/git_repo/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: Ensure Tag Deploy For Prod + tags: deploy + when: node_env=="production-delta" and not git_branch | match("^v([0-9]+)\.([0-9]+)\.([0-9]+)$") + fail: msg="only tag can be deployed on prod not {{ git_branch }}" + +- name: Display Git Repo Name + debug: + msg: "application Installed: {{ app_name }}, branch : {{ git_branch }} " + +- name: create repository dir + become: true + file: + path=/opt/runnable/{{ app_name }} + state=directory + owner={{ ansible_env.USER }} + +- name: pull the git repository + tags: deploy + git: + repo={{ app_repo }} + dest=/opt/runnable/{{ app_name }} + version={{ git_branch }} + update=yes + accept_hostkey=True + force=yes diff --git a/ansible/roles/install-ssm/tasks/main.yml b/ansible/roles/install-ssm/tasks/main.yml new file mode 100644 index 00000000..01a710ad --- /dev/null +++ b/ansible/roles/install-ssm/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: get amazon simple systems management + get_url: + url=https://amazon-ssm-us-west-2.s3.amazonaws.com/latest/debian_amd64/amazon-ssm-agent.deb + dest=/tmp + +- name: start amazon simple systems management + command: dpkg -i amazon-ssm-agent.deb + become: true + args: + chdir: /tmp diff --git a/ansible/roles/k8-deployment/tasks/main.yml b/ansible/roles/k8-deployment/tasks/main.yml new file mode 100644 index 00000000..28a44c5c --- /dev/null +++ b/ansible/roles/k8-deployment/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: create deployment folder + file: + state: directory + path: "{{ deployments_path }}" + +- name: create deployment yaml + template: + dest: "{{ deployments_path }}/{{ name }}.yml" + src: deployment.yml diff --git a/ansible/roles/k8-deployment/templates/deployment.yml b/ansible/roles/k8-deployment/templates/deployment.yml new file mode 100644 index 00000000..ead87a60 --- /dev/null +++ b/ansible/roles/k8-deployment/templates/deployment.yml @@ -0,0 +1,102 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: {{ name }} +spec: + replicas: {% if env == "delta" %} {{ num_replicas | default(1) }} {% else %} 1 {% endif %} + + template: + metadata: + labels: + app: {{ name }} + spec: + imagePullSecrets: + - name: {{ image_pull_secret_name }} + hostname: {{ name }} + containers: + - image: {{ container_image }}:{{ container_tag }} + imagePullPolicy: Always + name: {{ name }} + resources: + requests: + cpu: "250m" + memory: {{ memory_request | default("1500M") }} + limits: + cpu: "1550m" + memory: {{ memory_hard_limit | default("1500M") }} +{% if post_start_command is defined %} + lifecycle: + postStart: + exec: + command: ["/bin/bash", "-c", "{{ post_start_command }}"] +{% endif %} +{% if tcp_readiness_probe_port is defined %} + readinessProbe: + tcpSocket: + port: {{ tcp_readiness_probe_port }} + initialDelaySeconds: 5 + periodSeconds: 5 +{% endif %} +{% if container_run_args != '' %} + args: +{% if advance_arg is defined and advance_arg == true %} + - bash + - -c + - "{{ container_run_args }}" +{% else %} +{% for arg in container_run_args.split(' ') %} + - {{ arg }} +{% endfor %} +{% endif %} +{% endif %} +{% if container_envs is defined %} + env: +{% for env in container_envs %} +{% if (env.value is defined and env.value != 'ansible_undefined') or env.valueFrom is defined %} + - name: {{ env.name }} +{% if env.value is defined %} + value: "{{ env.value }}" +{% endif %} +{% if env.valueFrom is defined %} + valueFrom: + fieldRef: + fieldPath: {{ env.valueFrom }} +{% endif %} +{% endif %} +{% endfor %} +{% endif %} +{% if add_capabilities is defined %} + securityContext: + capabilities: + add: +{% for cap in add_capabilities %} + - {{ cap }} +{% endfor %} +{% endif %} +{% if hosted_ports is defined %} + ports: +{% for port in hosted_ports %} + - containerPort: {{ port }} +{% endfor %} +{% endif %} +{% if volume_mounts is defined %} + volumeMounts: +{% for volume in volume_mounts %} + - name: {{ volume.name }} + mountPath: {{ volume.path }} +{% endfor %} +{% endif %} +{% if volume_mounts is defined %} + volumes: +{% for volume in volume_mounts %} + - name: {{ volume.name }} +{% if volume.kind == "configMap" %} + configMap: + name: {{ volume.name }} +{% endif %} +{% if volume.kind == "persistent" %} + persistentVolumeClaim: + claimName: {{ volume.name }} +{% endif %} +{% endfor %} +{% endif %} diff --git a/ansible/roles/k8-job/tasks/main.yml b/ansible/roles/k8-job/tasks/main.yml new file mode 100644 index 00000000..3d4a70bf --- /dev/null +++ b/ansible/roles/k8-job/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: create job folder + file: + state: directory + path: "{{ jobs_path }}" + +- name: create job yaml + template: + dest: "{{ jobs_path }}/{{ name }}.yml" + src: job.yml diff --git a/ansible/roles/k8-job/templates/job.yml b/ansible/roles/k8-job/templates/job.yml new file mode 100644 index 00000000..3f4afaf5 --- /dev/null +++ b/ansible/roles/k8-job/templates/job.yml @@ -0,0 +1,96 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ name }} + labels: + type: job +spec: + template: + metadata: + labels: + app: {{ name }} + spec: + imagePullSecrets: + - name: {{ image_pull_secret_name }} + hostname: {{ name }} + restartPolicy: {{ restart_policy | default("OnFailure") }} + containers: + - image: {{ container_image }}:{{ container_tag }} + imagePullPolicy: Always + name: {{ name }} + resources: + requests: + cpu: "250m" + memory: "500M" + limits: + cpu: "1550m" + memory: {{ memory_hard_limit | default("1500M") }} +{% if post_start_command is defined %} + lifecycle: + postStart: + exec: + command: ["/bin/bash", "-c", "{{ post_start_command }}"] +{% endif %} +{% if container_run_args != '' %} + args: +{% if advance_arg is defined and advance_arg == true %} + - bash + - -c + - "{{ container_run_args }}" +{% else %} +{% for arg in container_run_args.split(' ') %} + - {{ arg }} +{% endfor %} +{% endif %} +{% endif %} +{% if container_envs is defined %} + env: +{% for env in container_envs %} +{% if (env.value is defined and env.value != 'ansible_undefined') or env.valueFrom is defined %} + - name: {{ env.name }} +{% if env.value is defined %} + value: "{{ env.value }}" +{% endif %} +{% if env.valueFrom is defined %} + valueFrom: + fieldRef: + fieldPath: {{ env.valueFrom }} +{% endif %} +{% endif %} +{% endfor %} +{% endif %} +{% if add_capabilities is defined %} + securityContext: + capabilities: + add: +{% for cap in add_capabilities %} + - {{ cap }} +{% endfor %} +{% endif %} +{% if hosted_ports is defined %} + ports: +{% for port in hosted_ports %} + - containerPort: {{ port }} +{% endfor %} +{% endif %} +{% if volume_mounts is defined %} + volumeMounts: +{% for volume in volume_mounts %} + - name: {{ volume.name }} + mountPath: {{ volume.path }} +{% endfor %} +{% endif %} +{% if volume_mounts is defined %} + volumes: +{% for volume in volume_mounts %} + - name: {{ volume.name }} +{% if volume.kind == "configMap" %} + configMap: + name: {{ volume.name }} +{% endif %} +{% if volume.kind == "persistent" %} + persistentVolumeClaim: + claimName: {{ volume.name }} +{% endif %} +{% endfor %} +{% endif %} diff --git a/ansible/roles/k8-service/tasks/main.yml b/ansible/roles/k8-service/tasks/main.yml new file mode 100644 index 00000000..8cbbe947 --- /dev/null +++ b/ansible/roles/k8-service/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: create service folder + file: + state: directory + path: "{{ services_path }}" + +- name: create service yaml + template: + dest: "{{ services_path }}/{{ name }}.yml" + src: service.yml diff --git a/ansible/roles/k8-service/templates/service.yml b/ansible/roles/k8-service/templates/service.yml new file mode 100644 index 00000000..833a9ae7 --- /dev/null +++ b/ansible/roles/k8-service/templates/service.yml @@ -0,0 +1,23 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ service_name | default(name) }} +spec: + selector: + app: {{ name }} + ports: +{% for port in hosted_ports %} + - port: {{ port }} + protocol: {{ protocol | default("TCP") }} + name: '{{ port }}' +{% endfor %} +{% if service_type is defined %} + type: {{ service_type }} +{% endif %} +{% if service_load_balancer_ranges is defined %} + loadBalancerSourceRanges: +{% for range in service_load_balancer_ranges %} + - {{ range }} +{% endfor %} +{% endif %} + diff --git a/ansible/roles/kartographer/tasks/main.yml b/ansible/roles/kartographer/tasks/main.yml new file mode 100644 index 00000000..7d305f27 --- /dev/null +++ b/ansible/roles/kartographer/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create prometheus config + tags: [ deploy ] + template: + dest: "{{ config_maps_path }}/{{ name }}.yml" + src: keys.yml diff --git a/ansible/roles/kartographer/templates/keys.yml b/ansible/roles/kartographer/templates/keys.yml new file mode 100644 index 00000000..f0d684b5 --- /dev/null +++ b/ansible/roles/kartographer/templates/keys.yml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-keys +data: + {{ kube_config_name }}: | + {{ lookup('file', '{{ secrets_path }}/{{ name }}/{{ kube_config_name }}') | indent(4) }} + {{ google_key_name }}: | + {{ lookup('file', '{{ secrets_path }}/{{ name }}/{{ google_key_name }}') | indent(4) }} diff --git a/ansible/roles/khronos/README.md b/ansible/roles/khronos/README.md new file mode 100644 index 00000000..b36230c3 --- /dev/null +++ b/ansible/roles/khronos/README.md @@ -0,0 +1,3 @@ +Run Khronos CLI tool out of cron once daily. + +`tasks/main.yml` - install cron entry to run `/khronos/bin/cli.js` and output to `{{ app_log_dir }}/khonos_cron.log` diff --git a/ansible/roles/khronos/tasks/main.yml b/ansible/roles/khronos/tasks/main.yml new file mode 100644 index 00000000..ffe20ff8 --- /dev/null +++ b/ansible/roles/khronos/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: create cron folder + file: + state: directory + path: "{{ cron_jobs_path }}" + +- name: create cron_queues + template: + dest: "{{ cron_jobs_path }}/{{ item.cron_queue }}.yml" + src: queue-cron.yml + with_items: "{{ cron_queues }}" + +- name: create cron_queues + when: node_env=="production-delta" + template: + dest: "{{ cron_jobs_path }}/{{ item.cron_queue }}.yml" + src: queue-cron.yml + with_items: "{{ prod_cron_queues }}" + +- name: create cron_events + template: + dest: "{{ cron_jobs_path }}/{{ item.cron_event }}.yml" + src: event-cron.yml + with_items: "{{ cron_events }}" diff --git a/ansible/roles/khronos/templates/event-cron.yml b/ansible/roles/khronos/templates/event-cron.yml new file mode 100644 index 00000000..70995f55 --- /dev/null +++ b/ansible/roles/khronos/templates/event-cron.yml @@ -0,0 +1,31 @@ +apiVersion: batch/v2alpha1 +kind: CronJob +metadata: + name: {{ item.cron_event }} + labels: + type: cron-job +spec: + successfulJobsHistoryLimit: 0 + failedJobsHistoryLimit: 1 + schedule: "{{ item.cron_scedule }}" + jobTemplate: + spec: + template: + spec: + imagePullSecrets: + - name: {{ image_pull_secret_name }} + restartPolicy: Never + containers: + - name: {{ item.cron_event | replace('.', '-') }} + image: {{ container_image }}:{{ container_tag }} + env: + - name: DATADOG_HOST + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: DATADOG_PORT + value: "{{ datadog_port }}" + args: + - bash + - -c + - /khronos/bin/cli.js --event {{ item.cron_event }} --job '{}' --host {{ cron_rabbit_host_address }} {{ cron_rabbit_auth }} diff --git a/ansible/roles/khronos/templates/queue-cron.yml b/ansible/roles/khronos/templates/queue-cron.yml new file mode 100644 index 00000000..4d6ffff1 --- /dev/null +++ b/ansible/roles/khronos/templates/queue-cron.yml @@ -0,0 +1,31 @@ +apiVersion: batch/v2alpha1 +kind: CronJob +metadata: + name: {{ item.cron_queue }} + labels: + type: cron-job +spec: + successfulJobsHistoryLimit: 0 + failedJobsHistoryLimit: 1 + schedule: "{{ item.cron_scedule }}" + jobTemplate: + spec: + template: + spec: + imagePullSecrets: + - name: {{ image_pull_secret_name }} + restartPolicy: Never + containers: + - name: {{ item.cron_queue | replace('.', '-') }} + image: {{ container_image }}:{{ container_tag }} + env: + - name: DATADOG_HOST + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: DATADOG_PORT + value: "{{ datadog_port }}" + args: + - bash + - -c + - /khronos/bin/cli.js --queue {{ item.cron_queue }} --job '{}' --host {{ cron_rabbit_host_address }} {{ cron_rabbit_auth }} diff --git a/ansible/roles/load/files/test.sh b/ansible/roles/load/files/test.sh new file mode 100755 index 00000000..dcd2e9ae --- /dev/null +++ b/ansible/roles/load/files/test.sh @@ -0,0 +1,15 @@ +#!/bin/bash +for i in `seq 1 $CUR`; do + CUR=1 + cd /runnable/node-hello-world + echo "" > data + for i in `seq 1 $CUR`; do + ./load.sh & + done + + for job in `jobs -p`; do + wait $job + done + + cat data +done \ No newline at end of file diff --git a/ansible/roles/load/tasks/main.yml b/ansible/roles/load/tasks/main.yml new file mode 100644 index 00000000..b154e4e7 --- /dev/null +++ b/ansible/roles/load/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: create node-hello-world dir + become: true + file: + path=/runnable/node-hello-world/ + state=directory + owner={{ ansible_env.USER }} + +- name: git pull repo + git: + repo=https://github.com/anandkumarpatel/node-hello-world.git + dest=/runnable/node-hello-world + version=master + update=yes + accept_hostkey=True + +- script: test.sh + register: out + tags: run + +- debug: var=out + tags: run diff --git a/ansible/roles/local-vault/handlers/main.yml b/ansible/roles/local-vault/handlers/main.yml new file mode 100644 index 00000000..7c055ec5 --- /dev/null +++ b/ansible/roles/local-vault/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: stop vault + local_action: + shell kill $(ps aux | grep "vault server" | grep -v grep | cut -d' ' -f3) + +- name: remove vault config + local_action: + command rm -f /tmp/vault.pid /tmp/vault.hcl diff --git a/ansible/roles/local-vault/tasks/main.yml b/ansible/roles/local-vault/tasks/main.yml new file mode 100644 index 00000000..29befa21 --- /dev/null +++ b/ansible/roles/local-vault/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: create vault config + run_once: true + local_action: + template + dest=/tmp/vault.hcl + src=vault.hcl.j2 + notify: + - remove vault config + +- name: start vault daemon + run_once: true + local_action: + shell vault server --config=/tmp/vault.hcl > /tmp/log 2>&1 & + notify: + - stop vault + +- name: pause for start + pause: + seconds: 5 + +- name: check vault seal + tags: [ unseal ] + run_once: true + local_action: + command vault status + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + ignore_errors: True + register: seal_data + changed_when: "'Sealed: true' in seal_data.stdout" + +- name: unseal vault + tags: [ unseal ] + run_once: true + when: seal_data.changed + local_action: + command vault unseal {{ item }} + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + with_items: + - "{{ vault_unseal_tokens.one }}" + - "{{ vault_unseal_tokens.two }}" + - "{{ vault_unseal_tokens.three }}" + +- name: pause to unseal + pause: + seconds: 1 diff --git a/ansible/roles/local-vault/templates/vault.hcl.j2 b/ansible/roles/local-vault/templates/vault.hcl.j2 new file mode 100644 index 00000000..f753aa83 --- /dev/null +++ b/ansible/roles/local-vault/templates/vault.hcl.j2 @@ -0,0 +1,13 @@ +disable_mlock = true + +backend "s3" { + bucket = "runnable.vault" + access_key = "{{ vault_aws_access_key }}" + secret_key = "{{ vault_aws_secret_key }}" + region = "us-west-2" +} + +listener "tcp" { + address = "127.0.0.1:{{ vault_local_port }}" + tls_disable = 1 +} diff --git a/ansible/roles/local-vault/vars/main.yml b/ansible/roles/local-vault/vars/main.yml new file mode 100644 index 00000000..dbf9d1f6 --- /dev/null +++ b/ansible/roles/local-vault/vars/main.yml @@ -0,0 +1,34 @@ +$ANSIBLE_VAULT;1.1;AES256 +35373162356535346134653633636333643665633235316564326661643333303038636666323635 +6535373634323066383834393134656362393963626666340a616534346338313735646265353964 +39363062316130666133643866346338366439626236666639383933616164336435353134373833 +3535636133396331370a386633383861336133643534313037353937396662333533303362393235 +35333039623438313030393233373961666464666436626434343332653661373266373736356664 +61663033623038613231373165373362393937356433623865656362626333643461663436396231 +66346264623238613661666463336635646232366639643331626363353131323838643763396635 +66393935643863383531613231346337326662396230313361616563346632353139636337373030 +34663062613565363035663763383065353866356330386639626264666361656436316639663863 +61303464303266316463356163636662353236376566383136333334393462356638316236613565 +33623536663763633263666234653336633962663737303835336161393430316334313031623035 +32393230633064373839363562366533663838656331323836303735303733313739306362313433 +33343030333535393664616334383430656336633636326233366233656332366665653733623064 +61353933303530306566363533303031333234373038623963323738666135303434373233326537 +37653661626536653230303433663534666439373331346535316438373063323634643662663438 +62383234346336633863343461336662613662323939663263356537393537303661363534333563 +35306262656432343861666261333631323666616636313034323931613937653130313163343633 +61643931303563626561396337303830646663616638366632376432363963353536636633656364 +31363661303635643462643563333733356431353861623432386130646434653264646561366363 +30396263613130643463646464383763623565376533303936646362376437643835303638316232 +35623531366530346335343461653636613930633664646433376665313431363139663530623337 +37343362373535383766373232626337336530356636373138333133383832653831396433393733 +32623963303764663538353335663136383964643031343434663134663964646164393230353337 +66636566346363626434363432323731656630353266653263323231376632613133393965643233 +31303831383463383565666665353136353439613161613864383766333133643837626239643962 +33393733326334323534383564613934323432326536326138356437653363313062366636366261 +65626137303561366630623933353064363438363735363438303735636434353530353563333935 +61396537666464646562323262663164316437623331343430633630343962383533366163316234 +65366134396130383364626436326434646566303330626330623830653162376266316336613365 +38653336363365393064666130363536333638643333663330346132323133663037383032363530 +66653537376665613065353166373433313634656665383964366264613235386165346264306265 +61623335346264393937333761303765613433336531353764386437373432646139313732633131 +6365 diff --git a/ansible/roles/log-mkdir/tasks/main.yml b/ansible/roles/log-mkdir/tasks/main.yml new file mode 100644 index 00000000..942b5d69 --- /dev/null +++ b/ansible/roles/log-mkdir/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: create application logs directory + tags: rsyslog + when: app_log_dir != "/var/log" + become: true + file: + path="{{ app_log_dir }}" + state=directory + owner=root + group=syslog + mode=0775 + diff --git a/ansible/roles/loggly/files/20-loggly-tls.conf b/ansible/roles/loggly/files/20-loggly-tls.conf new file mode 100644 index 00000000..1f98fa12 --- /dev/null +++ b/ansible/roles/loggly/files/20-loggly-tls.conf @@ -0,0 +1,6 @@ +#RsyslogGnuTLS +$DefaultNetstreamDriverCAFile /etc/rsyslog.d/keys/ca.d/logs-01.loggly.com_sha12.crt +$ActionSendStreamDriver gtls +$ActionSendStreamDriverMode 1 +$ActionSendStreamDriverAuthMode x509/name +$ActionSendStreamDriverPermittedPeer *.loggly.com diff --git a/ansible/roles/loggly/files/50-default.conf b/ansible/roles/loggly/files/50-default.conf new file mode 100644 index 00000000..fe332701 --- /dev/null +++ b/ansible/roles/loggly/files/50-default.conf @@ -0,0 +1,71 @@ +# Default rules for rsyslog. +# +# For more information see rsyslog.conf(5) and /etc/rsyslog.conf + +# No buffering +$ActionQueueType Direct + +# +# First some standard log files. Log by facility. +# +auth,authpriv.* /var/log/auth.log +*.*;auth,authpriv.none -/var/log/syslog +#cron.* /var/log/cron.log +#daemon.* -/var/log/daemon.log +kern.* -/var/log/kern.log +#lpr.* -/var/log/lpr.log +mail.* -/var/log/mail.log +#user.* -/var/log/user.log + +# +# Logging for the mail system. Split it up so that +# it is easy to write scripts to parse these files. +# +#mail.info -/var/log/mail.info +#mail.warn -/var/log/mail.warn +mail.err /var/log/mail.err + +# +# Logging for INN news system. +# +news.crit /var/log/news/news.crit +news.err /var/log/news/news.err +news.notice -/var/log/news/news.notice + +# +# Some "catch-all" log files. +# +#*.=debug;\ +# auth,authpriv.none;\ +# news.none;mail.none -/var/log/debug +#*.=info;*.=notice;*.=warn;\ +# auth,authpriv.none;\ +# cron,daemon.none;\ +# mail,news.none -/var/log/messages + +# +# Emergencies are sent to everybody logged in. +# +*.emerg :omusrmsg:* + +# +# I like to have messages displayed on the console, but only on a virtual +# console I usually leave idle. +# +#daemon,mail.*;\ +# news.=crit;news.=err;news.=notice;\ +# *.=debug;*.=info;\ +# *.=notice;*.=warn /dev/tty8 + +# The named pipe /dev/xconsole is for the `xconsole' utility. To use it, +# you must invoke `xconsole' with the `-file' option: +# +# $ xconsole -file /dev/xconsole [...] +# +# NOTE: adjust the list below, or you'll go crazy if you have a reasonably +# busy site.. +# +daemon.*;mail.*;\ + news.err;\ + *.=debug;*.=info;\ + *.=notice;*.=warn |/dev/xconsole diff --git a/ansible/roles/loggly/files/archiveOldLogs.sh b/ansible/roles/loggly/files/archiveOldLogs.sh new file mode 100644 index 00000000..54bda4cf --- /dev/null +++ b/ansible/roles/loggly/files/archiveOldLogs.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# +# This is to be run only for legacy logs in /var/log, not application logs in /var/log/runnable +# + +# we only want this run as root +if [ "root" != `whoami` ] ; then + echo "${0}: ERROR - This script needs to be run as root." + exit 127 +fi + +# legacy log path +logdir=/var/log +# store log archives here, purge manually +archdir=/docker/archive +datetime=`date +%Y%m%d%H%m` + +# these logfiles haven't been modified in > 24 hours, so moving them without cleanup up filehandles first should be OK: +echo "Compressing logs > 24h" +find "${logdir}" -maxdepth 2 -type f -mmin +1440 -name '*.log' -exec bzip2 -9 {} \; -print +echo "Restarting rsyslogd" +# but we do need to clean the filehandles after, just in case +service rsyslog restart + +# archive anything > 6h +echo "Archiving logs > 6h" +mkdir -p "${archdir}" 2>&1 +find "${logdir}" -maxdepth 2 -type f -mtime +6 -name '*z' | xargs tar jcvpf "${archdir}"/log-archive-"${datetime}".tbz +echo "Purging logs > 1wk" +find "${logdir}" -maxdepth 2 -type f -mtime +6 -exec rm -f {} \; -print diff --git a/ansible/roles/loggly/files/logs-01.loggly.com_sha12.crt b/ansible/roles/loggly/files/logs-01.loggly.com_sha12.crt new file mode 100644 index 00000000..31a70521 --- /dev/null +++ b/ansible/roles/loggly/files/logs-01.loggly.com_sha12.crt @@ -0,0 +1,197 @@ +-----BEGIN CERTIFICATE----- +MIIFYTCCBEmgAwIBAgIIB1i8CkNiTSUwDQYJKoZIhvcNAQELBQAwgcYxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUw +IwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTMwMQYDVQQLEypo +dHRwOi8vY2VydHMuc3RhcmZpZWxkdGVjaC5jb20vcmVwb3NpdG9yeS8xNDAyBgNV +BAMTK1N0YXJmaWVsZCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIw +HhcNMTUwOTIzMjI1NjM4WhcNMTgwNDEwMDAxMDQ3WjBAMSEwHwYDVQQLExhEb21h +aW4gQ29udHJvbCBWYWxpZGF0ZWQxGzAZBgNVBAMTEmxvZ3MtMDEubG9nZ2x5LmNv +bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM17p0KBWDUaWuDmjD6U +B3geyK1/LfpZprDHSjJ+7HfdJX22KpEaO6FuLJra9CaSwyq8vwvVko93Is0BkdWt +hcFgE89I5UX16ieFCVb/WfADmEM8lIB3EkTmSkHglbaeI+wYEh5hUzOhavpQVTZ9 +XGvZZLyPvwUaNPwr/PfjTqWIlKLyNYZIfQwuADeb0C5meoSwmI+yC7ca4rCvMDLQ +EyzCJdDYV9/ugJUqGq2uhA2c+EFOP/Mvc51N11upWIMFgGoPWgGTuqbOAKf7w46x +oSNxgNaMFA0VsHW+HmxKnAQ5PuPNsheECQT3NqxvXMqez0voc8QuyBlrKVRrxeu5 +h00CAwEAAaOCAdYwggHSMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYwFAYIKwYBBQUH +AwEGCCsGAQUFBwMCMA4GA1UdDwEB/wQEAwIFoDA8BgNVHR8ENTAzMDGgL6Athito +dHRwOi8vY3JsLnN0YXJmaWVsZHRlY2guY29tL3NmaWcyczEtMTYuY3JsMFkGA1Ud +IARSMFAwTgYLYIZIAYb9bgEHFwEwPzA9BggrBgEFBQcCARYxaHR0cDovL2NlcnRp +ZmljYXRlcy5zdGFyZmllbGR0ZWNoLmNvbS9yZXBvc2l0b3J5LzCBggYIKwYBBQUH +AQEEdjB0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5zdGFyZmllbGR0ZWNoLmNv +bS8wRgYIKwYBBQUHMAKGOmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuc3RhcmZpZWxkdGVj +aC5jb20vcmVwb3NpdG9yeS9zZmlnMi5jcnQwHwYDVR0jBBgwFoAUJUWBaFAmOD07 +LSy+zWrZtj2zZmMwNQYDVR0RBC4wLIISbG9ncy0wMS5sb2dnbHkuY29tghZ3d3cu +bG9ncy0wMS5sb2dnbHkuY29tMB0GA1UdDgQWBBRyVaLpluYAt1XsqWNl1g8v68GW +gDANBgkqhkiG9w0BAQsFAAOCAQEAEWHC5d39uw1r2qORpNBLLubB5N0R6dHCzwmH +E1b/TPXIkP3KZXbBGQqezRbRjj5NjxjNIt7C0PI2IzOBpgqscPYQ1JiApok8qiFJ +/pEOgnDwhI4Ao0dd/g0urnD6AmD6l5YjoXdFZtA1s0pa1EUar9/rIkeHbcpRY+je +Z9hi+Yt+NvllAgbML+EyFCp/hB9OSNmYrZGxkz7MeptC2iM4aNtFObmnl1sFec49 +hO766oe06grj/ToKLcCDcKgtsfiTt6yWWcN5c+NPp6LbciUE5VE8qcUSbyu6XjDK +m/vYlGMCycwqmXC/6xXjsfBh+R3smpSFIJvRjqVKzXdx8yc21Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFADCCA+igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTExMDUwMzA3MDAw +MFoXDTMxMDUwMzA3MDAwMFowgcYxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTMwMQYDVQQLEypodHRwOi8vY2VydHMuc3RhcmZpZWxk +dGVjaC5jb20vcmVwb3NpdG9yeS8xNDAyBgNVBAMTK1N0YXJmaWVsZCBTZWN1cmUg +Q2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDlkGZL7PlGcakgg77pbL9KyUhpgXVObST2yxcT+LBxWYR6ayuF +pDS1FuXLzOlBcCykLtb6Mn3hqN6UEKwxwcDYav9ZJ6t21vwLdGu4p64/xFT0tDFE +3ZNWjKRMXpuJyySDm+JXfbfYEh/JhW300YDxUJuHrtQLEAX7J7oobRfpDtZNuTlV +Bv8KJAV+L8YdcmzUiymMV33a2etmGtNPp99/UsQwxaXJDgLFU793OGgGJMNmyDd+ +MB5FcSM1/5DYKp2N57CSTTx/KgqT3M0WRmX3YISLdkuRJ3MUkuDq7o8W6o0OPnYX +v32JgIBEQ+ct4EMJddo26K3biTr1XRKOIwSDAgMBAAGjggEsMIIBKDAPBgNVHRMB +Af8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUJUWBaFAmOD07LSy+ +zWrZtj2zZmMwHwYDVR0jBBgwFoAUfAwyH6fZMH/EfWijYqihzqsHWycwOgYIKwYB +BQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5zdGFyZmllbGR0ZWNo +LmNvbS8wOwYDVR0fBDQwMjAwoC6gLIYqaHR0cDovL2NybC5zdGFyZmllbGR0ZWNo +LmNvbS9zZnJvb3QtZzIuY3JsMEwGA1UdIARFMEMwQQYEVR0gADA5MDcGCCsGAQUF +BwIBFitodHRwczovL2NlcnRzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkv +MA0GCSqGSIb3DQEBCwUAA4IBAQBWZcr+8z8KqJOLGMfeQ2kTNCC+Tl94qGuc22pN +QdvBE+zcMQAiXvcAngzgNGU0+bE6TkjIEoGIXFs+CFN69xpk37hQYcxTUUApS8L0 +rjpf5MqtJsxOYUPl/VemN3DOQyuwlMOS6eFfqhBJt2nk4NAfZKQrzR9voPiEJBjO +eT2pkb9UGBOJmVQRDVXFJgt5T1ocbvlj2xSApAer+rKluYjdkf5lO6Sjeb6JTeHQ +sPTIFwwKlhR8Cbds4cLYVdQYoKpBaXAko7nv6VrcPuuUSvC33l8Odvr7+2kDRUBQ +7nIMpBKGgc0T0U7EPMpODdIm8QC3tKai4W56gf0wrHofx1l7 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEoDCCA4igAwIBAgIDORSEMA0GCSqGSIb3DQEBCwUAMGgxCzAJBgNVBAYTAlVT +MSUwIwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQL +EylTdGFyZmllbGQgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NDAxMDEwNzAwMDBaFw0zMTA1MzAwNzAwMDBaMIGPMQswCQYDVQQGEwJVUzEQMA4G +A1UECBMHQXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTElMCMGA1UEChMcU3Rh +cmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UEAxMpU3RhcmZpZWxkIFJv +b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQC97cED/PaP/AKxb1ufSNmdeeKitwNhVhjDR7bXyj01LolD +96Fpm96KGv0TIJy0SXcyKVb9ueyM3SL6ctwnYZfu9lqE7G4ZuYks3IRb1XT7a1/F +iaUQUolGVfS4dRzmf+RUrkv4VXJXAhn4F3FZ6x4oB3TFnUi+bLT0pLDzZDd5ksDs +Rl5/4W1TTGKvzR8LY7s6nfv8eQCYYXTPJoJAY/OycmoZDZnK1A51zDf7i4nBWfFi +f1+zX2Uw+Ke3TXZaHnZeNMDollaZirPwf6TNvdwyMXyRz+BfEfhrqklc0ZmU0aLj +Y1sJdrVWYuFLdB2W1CbUCARZ0JgODube/MPsH5DxAgMBAAGjggEpMIIBJTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfAwyH6fZMH/E +fWijYqihzqsHWycwHwYDVR0jBBgwFoAUv1+30c7dH4b0W1Ws3NcQwg6piOcwOgYI +KwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5zdGFyZmllbGR0 +ZWNoLmNvbS8wOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL2NybC5zdGFyZmllbGR0 +ZWNoLmNvbS9zZnJvb3QuY3JsMEwGA1UdIARFMEMwQQYEVR0gADA5MDcGCCsGAQUF +BwIBFitodHRwczovL2NlcnRzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkv +MA0GCSqGSIb3DQEBCwUAA4IBAQCFY8HZ3bn/qb2mGdy/EzoROCJUsawFEPt8s5Y/ +MYtm/4jz4b/7xx8A/0Zqi2EyyQFRdvuaxvogUchGxJjXeaPjBHI/i000U2fsMyx7 +6JQBKHw6NFsCdxaNQCUzsLxsl9cFev+Mhc5voFMAF24ebL0i1wqIN/Z965lB7yfL +jGBrTAF+ZVALT7iVmppuNP1zOjPxkdXzTi106O/TkDXxBmhk1NAT/VLTxm3BOoox +3QUmNUqMZbhSa4Hs0py1NBCXnD7GL+2OQkIkLulzmiX5EfHyI2nL5ZRpoNLcsPxE +iawXqMzVN3cWxYC5DI9XAlWZhXtJ8C5boMJXU12i6KY3wwH6 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFBzCCA++gAwIBAgICAgEwDQYJKoZIhvcNAQEFBQAwaDELMAkGA1UEBhMCVVMx +JTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsT +KVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2 +MTExNjAxMTU0MFoXDTI2MTExNjAxMTU0MFowgdwxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTkwNwYDVQQLEzBodHRwOi8vY2VydGlm +aWNhdGVzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkxMTAvBgNVBAMTKFN0 +YXJmaWVsZCBTZWN1cmUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxETAPBgNVBAUT +CDEwNjg4NDM1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4qddo+1m +72ovKzYf3Y3TBQKgyg9eGa44cs8W2lRKy0gK9KFzEWWFQ8lbFwyaK74PmFF6YCkN +bN7i6OUVTVb/kNGnpgQ/YAdKym+lEOez+FyxvCsq3AF59R019Xoog/KTc4KJrGBt +y8JIwh3UBkQXPKwBR6s+cIQJC7ggCEAgh6FjGso+g9I3s5iNMj83v6G3W1/eXDOS +zz4HzrlIS+LwVVAv+HBCidGTlopj2WYN5lhuuW2QvcrchGbyOY5bplhVc8tibBvX +IBY7LFn1y8hWMkpQJ7pV06gBy3KpdIsMrTrlFbYq32X43or174Q7+edUZQuAvUdF +pfBE2FM7voDxLwIDAQABo4IBRDCCAUAwHQYDVR0OBBYEFElLUifRG7zyoSFqYntR +QnqK19VWMB8GA1UdIwQYMBaAFL9ft9HO3R+G9FtVrNzXEMIOqYjnMBIGA1UdEwEB +/wQIMAYBAf8CAQAwOQYIKwYBBQUHAQEELTArMCkGCCsGAQUFBzABhh1odHRwOi8v +b2NzcC5zdGFyZmllbGR0ZWNoLmNvbTBMBgNVHR8ERTBDMEGgP6A9hjtodHRwOi8v +Y2VydGlmaWNhdGVzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkvc2Zyb290 +LmNybDBRBgNVHSAESjBIMEYGBFUdIAAwPjA8BggrBgEFBQcCARYwaHR0cDovL2Nl +cnRpZmljYXRlcy5zdGFyZmllbGR0ZWNoLmNvbS9yZXBvc2l0b3J5MA4GA1UdDwEB +/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAhlK6sx+mXmuQpmQq/EWyrp8+s2Kv +2x9nxL3KoS/HnA0hV9D4NiHOOiU+eHaz2d283vtshF8Mow0S6xE7cV+AHvEfbQ5f +wezUpfdlux9MlQETsmqcC+sfnbHn7RkNvIV88xe9WWOupxoFzUfjLZZiUTIKCGhL +Indf90XcYd70yysiKUQl0p8Ld3qhJnxK1w/C0Ty6DqeVmlsFChD5VV/Bl4t0zF4o +aRN+0AqNnQ9gVHrEjBs1D3R6cLKCzx214orbKsayUWm/EheSYBeqPVsJ+IdlHaek +KOUiAgOCRJo0Y577KM/ozS4OUiDtSss4fJ2ubnnXlSyokfOGASGRS7VApA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFfzCCBGegAwIBAgIILqvAG0gVC3QwDQYJKoZIhvcNAQEFBQAwgdwxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUw +IwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTkwNwYDVQQLEzBo +dHRwOi8vY2VydGlmaWNhdGVzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkx +MTAvBgNVBAMTKFN0YXJmaWVsZCBTZWN1cmUgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkxETAPBgNVBAUTCDEwNjg4NDM1MB4XDTE1MDQwNjA1NDIzOFoXDTE2MDQwNjA1 +MzgzOFowQDEhMB8GA1UECxMYRG9tYWluIENvbnRyb2wgVmFsaWRhdGVkMRswGQYD +VQQDExJsb2dzLTAxLmxvZ2dseS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCo6JQSJCK8y67Wfg3bVCBndVmjzF52shm/Qn+dDxk6ge06zVgfZ5cP +D35YDQqdbHdyu1Jq59Ak4pu/Ta5uWvOhUuqsDUuYyfu9Bh6NGyCzvUiNFwOa9dH9 +W7JpFz/CUJqpsKAmwYNDeXuB0VFrLRxYCQTzqWBDuXnDvtfiMmvBvFiUKFfm4lUh +WV37ixUiXtx7xu+qJOqBeRwo0X5En5pk1oSTzHZaTtEExbdezV3vOQixHtchkrRN +KlbohdkrUpZZn9Z21K+FhOTmp/u03DhgiQXav6bxkW1Po8ZBPlyJRlHXe27XbqZm +o1yJJn2F33M7gNuKlspO3cdS1UqDqWelAgMBAAGjggHeMIIB2jAMBgNVHRMBAf8E +AjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAOBgNVHQ8BAf8EBAMC +BaAwOQYDVR0fBDIwMDAuoCygKoYoaHR0cDovL2NybC5zdGFyZmllbGR0ZWNoLmNv +bS9zZnMxLTI3LmNybDBZBgNVHSAEUjBQME4GC2CGSAGG/W4BBxcBMD8wPQYIKwYB +BQUHAgEWMWh0dHA6Ly9jZXJ0aWZpY2F0ZXMuc3RhcmZpZWxkdGVjaC5jb20vcmVw +b3NpdG9yeS8wgY0GCCsGAQUFBwEBBIGAMH4wKgYIKwYBBQUHMAGGHmh0dHA6Ly9v +Y3NwLnN0YXJmaWVsZHRlY2guY29tLzBQBggrBgEFBQcwAoZEaHR0cDovL2NlcnRp +ZmljYXRlcy5zdGFyZmllbGR0ZWNoLmNvbS9yZXBvc2l0b3J5L3NmX2ludGVybWVk +aWF0ZS5jcnQwHwYDVR0jBBgwFoAUSUtSJ9EbvPKhIWpie1FCeorX1VYwNQYDVR0R +BC4wLIISbG9ncy0wMS5sb2dnbHkuY29tghZ3d3cubG9ncy0wMS5sb2dnbHkuY29t +MB0GA1UdDgQWBBT0MM8oRzFYKeB0kuQkvExGRBXwWjANBgkqhkiG9w0BAQUFAAOC +AQEAQ9JNeNIPx+DacFSPG+AV3blBhgfZQXfLO2Wbls2Vuol7PtDKHuaoBSQE1RYE +A/iyXI3OJnNivGU/V2p4weHgitpNpQ8AJ7uZVERIUCOlCYJaDSevpFfoALQK2rWr +gegZZ6gVkdFanhHCRW4a2apLCRnUbt//7k1G6Fw8v+YCzyVtf31AnY/bhknWAfDc +oldME9cCeAPT8WvCC3Xmrrd1FxlVkEGyshAzEpA1BNeVQM4iB17Up2tXQIv+ehsU +cUJz4IKut0lglszuanEfAazOzEn37n/2Q3cNx5IDEHv3z4fBLwNfd9yT14izqKLJ +ODRffuOanfiyg+bXxdmuhfXUqQ== +-----END CERTIFICATE----- + diff --git a/ansible/roles/loggly/files/purgeLogs.sh b/ansible/roles/loggly/files/purgeLogs.sh new file mode 100644 index 00000000..cb000fc2 --- /dev/null +++ b/ansible/roles/loggly/files/purgeLogs.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# +# This is to be run logs in {{ app_log_dir }}. +# Runs from crontab. +# + +logdir="${1}" + +# We can compress anything older than 6 hours +find "${logdir}" -mindepth 2 -type f -mmin +360 -name '*.log' -exec bzip2 -9 {} \; + +# We automatically purge anything > 1wk +find "${logdir}" -maxdepth 2 -type f -mtime +7 -exec rm -f {} \; diff --git a/ansible/roles/loggly/files/rsyslog.conf b/ansible/roles/loggly/files/rsyslog.conf new file mode 100644 index 00000000..2301c962 --- /dev/null +++ b/ansible/roles/loggly/files/rsyslog.conf @@ -0,0 +1,71 @@ +# /etc/rsyslog.conf Configuration file for rsyslog. +# +# For more information see +# /usr/share/doc/rsyslog-doc/html/rsyslog_conf.html +# +# Default logging rules can be found in /etc/rsyslog.d/50-default.conf +$MaxMessageSize 1024k + +################# +#### MODULES #### +################# + +$ModLoad imuxsock # provides support for local system logging +$ModLoad imklog # provides kernel logging support +#$ModLoad immark # provides --MARK-- message capability + +# provides UDP syslog reception +$ModLoad imudp +$UDPServerRun 514 + +# provides TCP syslog reception +$ModLoad imtcp +$InputTCPServerRun 514 + +# Enable non-kernel facility klog messages +$KLogPermitNonKernelFacility on + +########################### +#### GLOBAL DIRECTIVES #### +########################### + +# +# Set main message queue to use direct queuing (not queued) mode +# + +$MainMsgQueueType Direct + +# +# Use traditional timestamp format. +# To enable high precision timestamps, comment out the following line. +# +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat + +# Runnable JSON logging format +# Creates an "output formatter" template that accepts as input JSON and prints it out without any further processing ("raw JSON"). +# The formatting around the %msg% string is as such: start printing at the second character "2" until the end of the line "$" using the raw JSON format type. +$template RunnableJSON,"%msg:2:$:%\n" + +# Filter duplicated messages +$RepeatedMsgReduction on + +# +# Set the default permissions for all log files. +# +$FileOwner syslog +$FileGroup adm +$FileCreateMode 0644 +$DirCreateMode 0755 +$Umask 0022 +$PrivDropToUser syslog +$PrivDropToGroup adm + +# +# Where to place spool and state files +# +$WorkDirectory /var/spool/rsyslog + +# +# Include all config files in /etc/rsyslog.d/ +# +$IncludeConfig /etc/rsyslog.d/*.conf diff --git a/ansible/roles/loggly/meta/main.yml b/ansible/roles/loggly/meta/main.yml new file mode 100644 index 00000000..1d1930df --- /dev/null +++ b/ansible/roles/loggly/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: bash_aliases } diff --git a/ansible/roles/loggly/tasks/main.yml b/ansible/roles/loggly/tasks/main.yml new file mode 100644 index 00000000..2d29fd14 --- /dev/null +++ b/ansible/roles/loggly/tasks/main.yml @@ -0,0 +1,156 @@ +--- +- name: Install rsyslog-gnutls + become: yes + tags: loggly + apt: + pkg=rsyslog-gnutls + state=latest + update_cache=yes + cache_valid_time=604800 + install_recommends=yes + +- name: copy rsyslog default config + tags: loggly + become: true + copy: + src=50-default.conf + dest=/etc/rsyslog.d/50-default.conf + owner=syslog + group=syslog + +- name: copy loggly TLS config + tags: loggly + become: true + copy: + src=20-loggly-tls.conf + dest=/etc/rsyslog.d/20-loggly-tls.conf + owner=syslog + group=syslog + +- name: create loggly TLS cert dir + tags: loggly + become: true + file: + path=/etc/rsyslog.d/keys/ca.d + state=directory + owner=syslog + group=syslog + +- name: copy loggly TLS certs + tags: loggly + become: true + copy: + src=logs-01.loggly.com_sha12.crt + dest=/etc/rsyslog.d/keys/ca.d/logs-01.loggly.com_sha12.crt + owner=syslog + group=syslog + mode=400 + +- name: copy loggly config + tags: loggly + become: true + template: + src=22-loggly.conf.j2 + dest=/etc/rsyslog.d/22-loggly.conf + owner=syslog + group=syslog + +- name: copy docker upstart logs config + when: node_env != 'default' + tags: loggly + become: true + template: + src=21-rotated-docker.conf.j2 + dest=/etc/rsyslog.d/21-rotated-docker.conf + owner=syslog + group=syslog + +- name: copy app config + when: name is defined + tags: loggly + become: true + template: + src=21-output-syslog.conf.j2 + dest=/etc/rsyslog.d/21-rotated-{{ name }}.conf + owner=syslog + group=syslog + +- name: copy rsyslog config + tags: loggly + become: true + copy: + src=rsyslog.conf + dest=/etc/rsyslog.conf + owner=syslog + group=syslog + +- name: stop rsyslog + tags: [loggly, deploy] + become: true + service: name=rsyslog state=stopped + +- name: clear rsyslog state file + when: name is defined + tags: [loggly, deploy] + become: true + file: + path=/var/spool/rsyslog/stat-{{ name }} + state=absent + +- name: ensure log path + tags: loggly + become: true + file: + path="{{ app_log_dir }}" + state=directory + owner=syslog + group=adm + mode=0755 + recurse=yes + +- name: restart rsyslog + tags: [ loggly, deploy ] + become: true + service: name=rsyslog state=restarted + +- name: add loggly monitor + tags: cron + cron: + name="loggly monitor" + minute="*/2" + job="echo 'loggly monitoring' 2>&1 | /usr/bin/logger -t LogMonitor" + state=present + +- name: runnable bin directory + tags: [ loggly, clean ] + become: true + file: + path=/opt/runnable/bin + state=directory + owner=ubuntu + group=ubuntu + mode=0700 + +- name: copy log purge script + tags: [ loggly, clean ] + copy: + src=purgeLogs.sh + dest=/opt/runnable/bin/purgeLogs.sh + owner=ubuntu + group=ubuntu + mode=0700 + +- name: purge log files + tags: [ loggly, clean ] + become: true + cron: + name="purge log files" + minute=0 + job="/opt/runnable/bin/purgeLogs.sh {{ app_log_dir }}" + state=present + +- name: clean legacy log files + tags: [ loggly, clean ] + become: true + script: archiveOldLogs.sh + register: purge_out diff --git a/ansible/roles/loggly/templates/21-output-syslog.conf.j2 b/ansible/roles/loggly/templates/21-output-syslog.conf.j2 new file mode 100644 index 00000000..b384c89a --- /dev/null +++ b/ansible/roles/loggly/templates/21-output-syslog.conf.j2 @@ -0,0 +1,12 @@ +$WorkDirectory /var/spool/rsyslog + +# Rotate per hour +$ActionQueueType Direct +$template RotateHourly_{{ name }},"{{ app_log_dir }}/%$YEAR%/%$MONTH%/%$DAY%/%$HOUR%/{{ name }}.log" +if $syslogtag contains '{{ name }}' and $syslogfacility-text == 'local7' then { action (type="omfile" DynaFile="RotateHourly_{{ name }}" template="RunnableJSON" dirCreateMode="0755" FileCreateMode="0644") } + +# Loggly: Add a tag for {{ name }} events +$ActionQueueType LinkedList +$template LogglyFormat_{{ name }},"<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %syslogtag% [{{ loggly_token }}@41058 tag=\"runnable\" tag=\"{{ node_env }}\"] %msg%\n" +if $syslogtag contains '{{ name }}' and $syslogfacility-text == 'local7' then @@logs-01.loggly.com:6514;LogglyFormat_{{ name }} +if $syslogtag contains '{{ name }}' and $syslogfacility-text == 'local7' then stop diff --git a/ansible/roles/loggly/templates/21-rotated-docker.conf.j2 b/ansible/roles/loggly/templates/21-rotated-docker.conf.j2 new file mode 100644 index 00000000..abcbaad2 --- /dev/null +++ b/ansible/roles/loggly/templates/21-rotated-docker.conf.j2 @@ -0,0 +1,12 @@ +$WorkDirectory /var/spool/rsyslog + +# Rotate per hour +$ActionQueueType Direct +$template RotateHourly_docker_engine,"{{ app_log_dir }}/%$YEAR%/%$MONTH%/%$DAY%/%$HOUR%/docker_engine.log" +if $syslogtag contains 'docker_engine' and $syslogfacility-text == 'local7' then { action (type="omfile" DynaFile="RotateHourly_docker_engine" template="RunnableJSON" dirCreateMode="0755" FileCreateMode="0644") } + +# Loggly: Add a tag for docker_engine events +$ActionQueueType LinkedList +$template LogglyFormat_docker_engine,"<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %syslogtag% [{{ loggly_token }}@41058 tag=\"runnable\" tag=\"{{ node_env }}\"] %msg%\n" +if $syslogtag contains 'docker_engine' and $syslogfacility-text == 'local7' then @@logs-01.loggly.com:6514;LogglyFormat_docker_engine +if $syslogtag contains 'docker_engine' and $syslogfacility-text == 'local7' then stop diff --git a/ansible/roles/loggly/templates/22-loggly.conf.j2 b/ansible/roles/loggly/templates/22-loggly.conf.j2 new file mode 100644 index 00000000..3ee1a95b --- /dev/null +++ b/ansible/roles/loggly/templates/22-loggly.conf.j2 @@ -0,0 +1,18 @@ +# ------------------------------------------------------- +# Syslog Logging Directives for Loggly (sandboxes.loggly.com) +# ------------------------------------------------------- + +# Define the template used for sending logs to Loggly. Do not change this format. +$template LogglyFormat,"<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %msgid% [{{ loggly_token }}@41058 tag=\"{{ node_env }}\"] %msg%\n" + +$WorkDirectory /var/spool/rsyslog # where to place spool files +$ActionQueueFileName fwdRule1 # unique name prefix for spool files +$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) +$ActionQueueSaveOnShutdown on # save messages to disk on shutdown +$ActionQueueType LinkedList # run asynchronously +$ActionResumeRetryCount -1 # infinite retries if host is down + +# Send messages to Loggly over TCP using the template. +*.* @@logs-01.loggly.com:6514;LogglyFormat + +# ------------------------------------------------------- diff --git a/ansible/roles/mongo-server/meta/main.yml b/ansible/roles/mongo-server/meta/main.yml new file mode 100644 index 00000000..a86d80f9 --- /dev/null +++ b/ansible/roles/mongo-server/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- { role: tls-server-cert, name: mongodb } diff --git a/ansible/roles/mongo-server/tasks/main.yml b/ansible/roles/mongo-server/tasks/main.yml new file mode 100644 index 00000000..9368e698 --- /dev/null +++ b/ansible/roles/mongo-server/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: make folder for certificates + tags: [ tls ] + become: true + file: + dest: /opt/ssl/mongo-server + state: directory + owner: mongodb + group: mongodb + +- name: create server pem file + tags: [ tls ] + become: true + copy: + dest: /opt/ssl/mongo-server/mongo.pem + content: "{{ new_certs.data.private_key }}\n{{ new_certs.data.certificate }}\n" + owner: mongodb + group: mongodb + mode: 0440 + +- name: create server CA file + tags: [ tls ] + become: true + copy: + dest: /opt/ssl/mongo-server/ca.pem + content: "{{ new_certs.data.issuing_ca }}\n" + owner: mongodb + group: mongodb + mode: 0440 diff --git a/ansible/roles/nginx/files/genNginxConf.js b/ansible/roles/nginx/files/genNginxConf.js new file mode 100644 index 00000000..19eefd46 --- /dev/null +++ b/ansible/roles/nginx/files/genNginxConf.js @@ -0,0 +1,138 @@ +var fs = require('fs'); +var xml2js = require('xml2js'); +var request = require('request'); +var exists = require('101/exists'); +var async = require('async'); +var parser = new xml2js.Parser(); + +var paths = { + 'login': true, + 'signup': true, + 'jobs': true, + 'about': true, + 'privacy': true +}; + +async.waterfall([ + nginxWebServicesRewrite, + nginxConfigHeader, + nginxManualLocations, + fs.readFile.bind(fs, 'sitemap.xml'), + parser.parseString.bind(parser), + function (result, cb) { + result.urlset.url.forEach(function (o) { + var path = o.loc[0].replace('http://runnable.com/', ''); + if (path.indexOf('/') !== -1) { + path = path.split('/').shift(); + } + // escape " + path = path.replace(/"/g, '\\"'); + // drop any including $ (messes w/ nginx) + if (path.indexOf('$') !== -1) { return; } + // drop ones where path now is undefined + if (!exists(path) || path.length === 0) { return; } + // monthly are our manual pages + if (o.changefreq[0] === 'monthly') { return; } + // keep track of dupes + if (paths[path]) { return; } + else { paths[path] = true; } + if (o.changefreq[0] === 'daily') { + if (o.priority[0] === '0.5') { + console.log('# CHANNEL:', path); + locationEqualsDirective(path); + } + } + else if (o.changefreq[0] === 'weekly') { + if (o.priority[0] === '0.6') { + console.log('# EXAMPLE:', path); + locationRegexDirective(path); + } + } + }); + cb(); + }, + nginxConfigFooter +], function (err) { + if (err) { + console.error('error:', err); + return process.exit(1); + } + process.exit(0); +}); + +function nginxWebServicesRewrite (cb) { + console.log([ + 'server {', + '\tserver_name ~^web-(?.+)\.runnable.com$;', + '\tlocation = / {', + '\t\treturn 301 "$scheme://web-$token.runnablecodesnippets.com$request_uri";', + '\t}', + '}', + 'server {', + '\t server_name ~^services-(?.+)\.runnable.com$;', + '\tlocation = / {', + '\t\treturn 301 "$scheme://services-$token.runnablecodesnippets.com$request_uri";', + '\t}', + '}', + '' + ].join('\n')); + cb(); +} + +function nginxConfigHeader (cb) { + console.log([ + 'server {', + '\tserver_name runnable.com;' + ].join('\n')); + cb(); +} + +function nginxManualLocations (cb) { + console.log([ + '### Directives we do not want to move to sandbox app yet', + 'location = / {', + '\treturn 302 $scheme://runnable.io;', + '}', + 'location = /login {', + '\treturn 302 $scheme://code.runnable.com/login;', + '}', + 'location = /signup {', + '\treturn 302 $scheme://code.runnable.com/signup;', + '}', + 'location = /about {', + '\treturn 302 $scheme://code.runnable.com/about;', + '}', + 'location = /jobs {', + '\treturn 302 $scheme://code.runnable.com/jobs;', + '}', + 'location = /privacy {', + '\treturn 302 $scheme://code.runnable.com/privacy;', + '}', + ].join('\n')); + cb(); +} + +function nginxConfigFooter (cb) { + console.log([ + '} # !server', + ].join('\n')); + cb(); +} + +function locationEqualsDirective (path) { + var quote = (path.indexOf(' ') !== -1) ? '"' : ''; + console.log([ + '\tlocation = ' + quote + '/' + path + quote + ' {', + '\t\treturn 301 ' + quote + '$scheme://code.runnable.com/' + path + quote + ';', + '\t}' + ].join('\n')); +} + +function locationRegexDirective (path) { + // these won't need any quotes to escape + console.log([ + '\tlocation ~ ^\\/' + path + '(\\/.+)?$ {', + '\t\treturn 301 $scheme://code.runnable.com/' + path + '$1;', + '\t}' + ].join('\n')); +} diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml new file mode 100644 index 00000000..d315b284 --- /dev/null +++ b/ansible/roles/nginx/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: install nginx + become: true + apt: + update_cache=yes + cache_valid_time=604800 + pkg=nginx + state=present + +- name: install node dependencies (build-essential) + become: true + apt: + update_cache=yes + cache_valid_time=604800 + pkg=build-essential + state=present + +- name: get node install script + become: true + get_url: + url=https://deb.nodesource.com/setup + dest=~/node-install.sh + mode=744 + +- name: check for node PPA + become: true + register: ppa_check + file: + path=/etc/apt/sources.list.d/nodesource.list + +- name: install node PPA + become: true + when: ppa_check.changed + shell: ~/node-install.sh + +- name: install node + become: true + apt: + update_cache=yes + cache_valid_time=604800 + pkg=nodejs + state=present + +- name: copy node script for nginx + become: true + copy: + src=genNginxConf.js + dest=/tmp/genNginxConf.js + +# - name: download sitemap +# become: true +# get_url: +# url=http://runnable.com/sitemap.xml +# dest=/tmp/sitemap.xml +# mode=444 +# timeout=30 +# force=yes + +- name: install packages + become: true + shell: + npm install 101 request xml2js async + chdir=/tmp + +- name: generate nginx config + become: true + shell: node /tmp/genNginxConf.js > /etc/nginx/conf.d/runnable-migration.conf + chdir=/tmp + +- name: remove default nginx config + become: true + file: + path=/etc/nginx/sites-enabled/default + state=absent + +- name: test nginx config + become: true + shell: nginx -t + +- name: reload nginx + become: true + shell: nginx -s reload diff --git a/ansible/roles/node/README.md b/ansible/roles/node/README.md new file mode 100644 index 00000000..3c7dcd9f --- /dev/null +++ b/ansible/roles/node/README.md @@ -0,0 +1,26 @@ +Role Name +======== + +Ansible Role to Install Docker on CentOS 6.5 + +Role Variables +-------------- + +``` +docker_centos_packages: + - { package: "docker" } +``` + +Example Playbook +------------------------- + + - hosts: docker-servers + roles: + - { role: docker-centos, + tags: ["docker"] } + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/node/tasks/main.yml b/ansible/roles/node/tasks/main.yml new file mode 100644 index 00000000..752373c8 --- /dev/null +++ b/ansible/roles/node/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: install node.js + become: true + nodejs: + version={{node_version|default('0.10.40')}} + +- name: upgrade npm + become: true + npm: + name=npm + version={{npm_version|default('2.15.3')}} + global=yes diff --git a/ansible/roles/node_service/meta/main.yml b/ansible/roles/node_service/meta/main.yml new file mode 100644 index 00000000..26ed4014 --- /dev/null +++ b/ansible/roles/node_service/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: node } diff --git a/ansible/roles/node_service/tasks/main.yml b/ansible/roles/node_service/tasks/main.yml new file mode 100644 index 00000000..48c77851 --- /dev/null +++ b/ansible/roles/node_service/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: remove node_modules + tags: deploy + when: remove_node_modules is defined + file: + path=/opt/runnable/{{ app_name }}/node_modules + state=absent + +- name: npm install {{ app_name }} + tags: deploy + npm: + path=/opt/runnable/{{ app_name }} + state=latest + production=yes + +- name: make override file + tags: deploy + become: true + lineinfile: + dest="/etc/init/{{ app_name }}.override" + line="manual" + create=yes + +- name: create new config file + tags: deploy,render_node_service_config + become: true + template: + src=upstart.conf + dest=/etc/init/{{ app_name }}.conf + backup=yes + +- name: restart service {{ app_name }} + tags: deploy,render_node_service_config + become: true + when: dock is not defined + service: + name={{ app_name }} + state=restarted + enabled=yes diff --git a/ansible/roles/node_service/templates/upstart.conf b/ansible/roles/node_service/templates/upstart.conf new file mode 100644 index 00000000..7fac67c5 --- /dev/null +++ b/ansible/roles/node_service/templates/upstart.conf @@ -0,0 +1,41 @@ +#!upstart +description "{{ app_name }}" +author "Anandkumar Patel" + +env NPM_BIN=/usr/local/bin/npm +env APP_DIR=/opt/runnable/{{ app_name }} +env LOG_FILE={{ app_log_dir }}/{{ app_name }}.log +env NODE_ENV={{ node_env }} + +{% if enviroment_vars is defined %} +{% for name, value in enviroment_vars.iteritems() %} +env {{ name }}={{ value }} +{% endfor %} +{% endif %} + +start on (local-filesystems and net-device-up IFACE=eth0) +stop on shutdown + +script + touch $LOG_FILE + chdir $APP_DIR + echo $$ > /var/run/{{ app_name }}.pid + exec $NPM_BIN start >> $LOG_FILE 2>&1 +end script + +pre-start script + # Date format same as (new Date()).toISOString() for consistency + echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Starting" >> $LOG_FILE +end script + +pre-stop script + rm /var/run/{{ app_name }}.pid + echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Stopping" >> $LOG_FILE +end script + +post-start script + echo "===== App restarted =====" >> $LOG_FILE +end script + +respawn +respawn limit 5 1 # give up restart after 5 respawns in 1 seconds diff --git a/ansible/roles/notify/handlers/main.yml b/ansible/roles/notify/handlers/main.yml new file mode 100644 index 00000000..dd956a7a --- /dev/null +++ b/ansible/roles/notify/handlers/main.yml @@ -0,0 +1,51 @@ +--- +- name: send done message + tags: + - always + slack: + token={{ slack_token }} + channel={{ item }} + msg="{{ local_username.stdout }} is {{ notify_end_message }}" + with_items: + - "{{ slack_channels }}" + - "{{ extra_channels | default([]) }}" + changed_when: true + +- name: send rollbar message for deploy + when: rollbar_token is defined and node_env is defined and git_branch is defined + tags: + - rollbar + - always + rollbar_deployment: + token={{ rollbar_token }} + environment={{ node_env }} + revision={{ git_branch }} + rollbar_user={{ local_username.stdout }} + changed_when: true + +- name: send datadog message for deploy + when: datadog_api_key is defined and datadog_tags is defined and git_branch is defined + ignore_errors: true + tags: + - datadog + - always + datadog_event: + api_key={{ datadog_api_key }} + title="{{ name }} Deployment" + text="{{ local_username.stdout }} deployed {{ name }}@{{ git_branch }} to {{ node_env }}" + alert_type=success + tags="{{ datadog_tags }},host:{{ ansible_hostname }}" + changed_when: true + +- name: put deployed version in consul + delegate_to: "{{ groups['consul'][0] }}" + run_once: true + when: git_branch is defined and name is defined and consul_host_address is defined + tags: + - consul-environment + - always + uri: + method=PUT + url=http://{{ consul_host_address }}:8500/v1/kv/runnable/environment/{{ name }} + body="{{ git_branch }}" + changed_when: true diff --git a/ansible/roles/notify/tasks/main.yml b/ansible/roles/notify/tasks/main.yml new file mode 100644 index 00000000..b73f0cb2 --- /dev/null +++ b/ansible/roles/notify/tasks/main.yml @@ -0,0 +1,25 @@ +--- +# - name: get the username running the deploy +# tags: +# - always +# local_action: command whoami +# register: local_username +# delegate_to: 127.0.0.1 + +# - name: send start message +# run_once: yes +# changed_when: True +# tags: +# - always +# slack: +# token={{ slack_token }} +# channel={{ item }} +# msg="{{ local_username.stdout }} is {{ notify_start_message }}" +# with_items: +# - "{{ slack_channels }}" +# - "{{ extra_channels | default([]) }}" +# notify: +# - send done message +# - send rollbar message for deploy +# - send datadog message for deploy +# - put deployed version in consul diff --git a/ansible/roles/notify/vars/main.yml b/ansible/roles/notify/vars/main.yml new file mode 100644 index 00000000..a544062c --- /dev/null +++ b/ansible/roles/notify/vars/main.yml @@ -0,0 +1,4 @@ +# message to say when starting deploy +notify_start_message: "starting to deploy {{ container_image | default(app_name) }} {{ container_tag | default(git_branch) }} to {{ ansible_hostname }}" +# message printed when deploy finished +notify_end_message: "finished deploying {{ container_image | default(app_name) }} {{ container_tag | default(git_branch) }} to {{ ansible_hostname }}" diff --git a/ansible/roles/package-aws/tasks/main.yml b/ansible/roles/package-aws/tasks/main.yml new file mode 100644 index 00000000..34ac6d50 --- /dev/null +++ b/ansible/roles/package-aws/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: Install ec2-metadata in /usr/local/bin + become: true + get_url: + url=http://s3.amazonaws.com/ec2metadata/ec2-metadata + dest=/usr/local/bin + mode=0755 + +- name: Download the zip file for ec2-api-tools + become: true + get_url: + url=http://s3.amazonaws.com/ec2-downloads/ec2-api-tools.zip + dest=/usr/local + mode=0755 + +- name: Unzip the ec2-api-tool + become: true + unarchive: + src: /usr/local/ec2-api-tools.zip + dest: /usr/local + copy: no + mode: 0755 + +- name: remove old ec2 dir/link + become: true + file: + path: /usr/local/ec2 + state: absent + +- name: Link to ec2 + become: true + file: + src=/usr/local/ec2-api-tools-1.7.5.1 + dest=/usr/local/ec2 + state=link diff --git a/ansible/roles/package-dock/tasks/main.yml b/ansible/roles/package-dock/tasks/main.yml new file mode 100644 index 00000000..880f453f --- /dev/null +++ b/ansible/roles/package-dock/tasks/main.yml @@ -0,0 +1,69 @@ +--- +- name: Install make + become: true + apt: + pkg=make + state=latest + update_cache=yes + cache_valid_time=604800 + +- name: Install unzip + become: true + apt: + pkg=unzip + state=latest + update_cache=yes + cache_valid_time=604800 + +- name: Install openjdk-7-jdk + become: true + apt: + pkg=openjdk-7-jdk + state=latest + update_cache=yes + cache_valid_time=604800 + +- name: Install jq + become: true + tags: "Install jq" + apt: + pkg=jq + state=latest + update_cache=yes + cache_valid_time=604800 + + +- name: Download Vault 041 + become: true + get_url: + url=https://releases.hashicorp.com/vault/0.4.1/vault_0.4.1_linux_amd64.zip + dest=/usr/local/bin + +- name: unzip vault_0.4.1_linux_amd64.zip + become: true + unarchive: + src: /usr/local/bin/vault_0.4.1_linux_amd64.zip + dest: /usr/local/bin + copy: no + mode: 0755 + +- name: Download Consul-Template + become: true + get_url: + url=https://releases.hashicorp.com/consul-template/0.11.1/consul-template_0.11.1_linux_amd64.zip + dest=/usr/local/bin + +- name: unzip + become: true + unarchive: + src: /usr/local/bin/consul-template_0.11.1_linux_amd64.zip + dest: /usr/local/bin + copy: no + mode: 0755 + +- name: Download weave 1.5.0 + become: true + get_url: + url=https://github.com/weaveworks/weave/releases/download/v1.5.0/weave + dest=/usr/local/bin + mode=0755 diff --git a/ansible/roles/package_ntp/defaults/main.yml b/ansible/roles/package_ntp/defaults/main.yml new file mode 100644 index 00000000..6cf338f4 --- /dev/null +++ b/ansible/roles/package_ntp/defaults/main.yml @@ -0,0 +1,6 @@ +--- +ntp_servers: +- 0.north-america.pool.ntp.org +- 1.north-america.pool.ntp.org +- 2.north-america.pool.ntp.org +- 3.north-america.pool.ntp.org diff --git a/ansible/roles/package_ntp/handlers/main.yml b/ansible/roles/package_ntp/handlers/main.yml new file mode 100644 index 00000000..067ffc8d --- /dev/null +++ b/ansible/roles/package_ntp/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: Start and Enable ntp + become: true + service: + name=ntp + state=started + enabled=yes diff --git a/ansible/roles/package_ntp/tasks/main.yml b/ansible/roles/package_ntp/tasks/main.yml new file mode 100644 index 00000000..f9a2a526 --- /dev/null +++ b/ansible/roles/package_ntp/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Install ntp package + become: true + apt: + name=ntp + state=present + force=yes + update_cache=yes + cache_valid_time=604800 + +- name: Copy the ntp.conf template file + become: true + template: + src=ntp.conf.j2 + dest=/etc/ntp.conf + mode=0644 + notify: Start and Enable ntp + diff --git a/ansible/roles/package_ntp/templates/ntp.conf.j2 b/ansible/roles/package_ntp/templates/ntp.conf.j2 new file mode 100644 index 00000000..149c6c52 --- /dev/null +++ b/ansible/roles/package_ntp/templates/ntp.conf.j2 @@ -0,0 +1,19 @@ +# {{ ansible_managed }} + +driftfile /var/lib/ntp/ntp.drift + +statsdir /var/log/ntpstats/ +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + +restrict -4 default kdo notrap nomodify nopeer noquery +restrict -6 default kdo notrap nomodify nopeer noquery + +restrict 127.0.0.1 +restrict ::1 + +{% for server in ntp_servers %} +server {{ server }} iburst +{% endfor %} diff --git a/ansible/roles/postgres/tasks/main.yml b/ansible/roles/postgres/tasks/main.yml new file mode 100644 index 00000000..f35993d7 --- /dev/null +++ b/ansible/roles/postgres/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create cert config map + tags: [ configure_proxy, configure_files ] + template: + src: init-user-db.sh + dest: "{{ config_maps_path }}/{{ name }}-init-user-db.yml" diff --git a/ansible/roles/postgres/templates/init-user-db.sh b/ansible/roles/postgres/templates/init-user-db.sh new file mode 100644 index 00000000..0838bde8 --- /dev/null +++ b/ansible/roles/postgres/templates/init-user-db.sh @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-init-user-db +data: + init-user-db.sh: | + #!/bin/bash + set -e + + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL + CREATE DATABASE {{ egret_pg_database }} IF NOT EXISTS; + GRANT ALL PRIVILEGES ON DATABASE {{ egret_pg_database }} TO {{ pg_user }}; + CREATE DATABASE {{ arithmancy_pg_database }} IF NOT EXISTS; + GRANT ALL PRIVILEGES ON DATABASE {{ arithmancy_pg_database }} TO {{ pg_user }}; + CREATE DATABASE {{ big_poppa_pg_database }} IF NOT EXISTS; + GRANT ALL PRIVILEGES ON DATABASE {{ big_poppa_pg_database }} TO {{ pg_user }}; + EOSQL diff --git a/ansible/roles/prometheus-alerts/tasks/main.yml b/ansible/roles/prometheus-alerts/tasks/main.yml new file mode 100644 index 00000000..8e0d2f42 --- /dev/null +++ b/ansible/roles/prometheus-alerts/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create prometheus config + tags: [ deploy ] + template: + dest: "{{ config_maps_path }}/{{ name }}.yml" + src: prometheus-alerts.yml diff --git a/ansible/roles/prometheus-alerts/templates/prometheus-alerts.yml b/ansible/roles/prometheus-alerts/templates/prometheus-alerts.yml new file mode 100644 index 00000000..1da9c399 --- /dev/null +++ b/ansible/roles/prometheus-alerts/templates/prometheus-alerts.yml @@ -0,0 +1,56 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-conf +data: + prometheus-alerts.yml: | + global: + # ResolveTimeout is the time after which an alert is declared resolved + # if it has not been updated. + resolve_timeout: 10m + + slack_api_url: "{{ ops_slack_channel_url }}" + + # The root node of the routing tree. + route: + # A default receiver + receiver: slack + + routes: + - match_re: + reportTo: .*pagerduty.* + receiver: pagerduty + + - match_re: + reportTo: .*drake.* + receiver: drake + + # A list of notification receivers. + receivers: + - name: "pagerduty" + pagerduty_configs: + - send_resolved: true + service_key: "{{ pager_duty_key }}" + description: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + slack_configs: + - send_resolved: true + channel: ops + title: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.summary{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + text: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + + - name: "slack" + slack_configs: + - send_resolved: true + channel: ops + title: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.summary{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + text: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + + - name: "drake" + webhook_configs: + - send_resolved: true + url: "http://{{ drake_hostname }}/prometheus" + slack_configs: + - send_resolved: true + channel: ops + title: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.summary{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + text: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" diff --git a/ansible/roles/prometheus/files/alerts.conf b/ansible/roles/prometheus/files/alerts.conf new file mode 100644 index 00000000..33a1238c --- /dev/null +++ b/ansible/roles/prometheus/files/alerts.conf @@ -0,0 +1,102 @@ +ALERT HookDockUnresponsive + IF up == 0 + FOR 10m + LABELS { + reportTo = "drake", + type = "unresponsive" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock unresponsive host={{ $labels.hostIp }} labels={{ $labels }}", + description = "(hook) Dock unresponsive host={{ $labels.hostIp }} labels={{ $labels }}" + } + +ALERT DockUnresponsive + IF up == 0 + FOR 1h + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock unresponsive host={{ $labels.hostIp }} labels={{ $labels }}", + description = "Dock unresponsive host={{ $labels.hostIp }} labels={{ $labels }" + } + +ALERT HookDockDockerDiskFull + IF (node_filesystem_size{device="/dev/xvdb"} - node_filesystem_free{device="/dev/xvdb"}) / node_filesystem_size{device="/dev/xvdb"} * 100 > 70 + FOR 5m + LABELS { + reportTo = "drake", + type = "disk_filled" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock /docker disk 70% host={{ $labels.hostIp }} labels={{ $labels }}", + description = "(hook) Dock /docker disk 70% host={{ $labels.hostIp }} labels={{ $labels }}" + } + +ALERT DockDockerDiskFull + IF (node_filesystem_size{device="/dev/xvdb"} - node_filesystem_free{device="/dev/xvdb"}) / node_filesystem_size{device="/dev/xvdb"} * 100 > 90 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock /docker disk 90% host={{ $labels.hostIp }} labels={{ $labels }}", + description = "Playbook here: https://github.com/CodeNow/devops-scripts/wiki/server-out-of-disk" + } + +ALERT DockRootDiskFull + IF (node_filesystem_size{device="/dev/xvda1"} - node_filesystem_free{device="/dev/xvda1"}) / node_filesystem_size{device="/dev/xvda1"} * 100 > 90 + FOR 5m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock root disk 90% host={{ $labels.hostIp }} labels={{ $labels }}", + description = "Playbook here: https://github.com/CodeNow/devops-scripts/wiki/server-out-of-disk" + } + +ALERT HookDockOutOfRam + IF (node_memory_MemFree + node_memory_Buffers + node_memory_Cached) < 150000000 + FOR 5m + LABELS { + reportTo = "drake", + type = "memory_exhausted" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock out of ram host={{ $labels.hostIp }} labels={{ $labels }}", + description = "(hook) Dock out of ram host={{ $labels.hostIp }} labels={{ $labels }}" + } + +ALERT DockOutOfRam + IF (node_memory_MemFree + node_memory_Buffers + node_memory_Cached) < 130000000 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock out of ram host={{ $labels.hostIp }} labels={{ $labels }}", + description = "unhealthy dock {{ $labels.hostIp } using dock-cli and message slack #customer channel with labels={{ $labels }}" + } + +ALERT DockHighLoad + IF node_load15 > 90 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock is experiencing high load host={{ $labels.hostIp }} labels={{ $labels }}", + description = "ssh {{ $labels.hostIp }} into dock make sure it is responsive, if it is not, unhealthy. `docks unhealthy -e delta {{ $labels.hostIp }}`" + } + +ALERT ContainerUsingTooMuchNetwork + IF container_network_transmit_bytes_total{interface="eth0",container_label_type="user-container"} / 1000000 > 200 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) container is using to much network id={{ $labels.id }} org={{ $labels.githubOrgId }} org name: {{ $labels.container_label_ownerUsername }} ", + description = "ssh {{ $labels.hostIp }} and docker rm and docker kill offending container {{ $labels.id }}" + } + diff --git a/ansible/roles/prometheus/tasks/main.yml b/ansible/roles/prometheus/tasks/main.yml new file mode 100644 index 00000000..f6431c13 --- /dev/null +++ b/ansible/roles/prometheus/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create prometheus config + tags: [ deploy ] + template: + dest: "{{ config_maps_path }}/{{ name }}.yml" + src: prometheus.yml diff --git a/ansible/roles/prometheus/templates/prometheus.yml b/ansible/roles/prometheus/templates/prometheus.yml new file mode 100644 index 00000000..cac8d457 --- /dev/null +++ b/ansible/roles/prometheus/templates/prometheus.yml @@ -0,0 +1,89 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-conf +data: + alerts.conf: | + {{ lookup('file', 'alerts.conf') | indent(4) }} + + prometheus.yml: | + # my global config + global: + scrape_interval: 1m + evaluation_interval: 1m + + rule_files: + - "alerts.conf" + + scrape_configs: + # monitor self to get metrics for prometheus + - job_name: prometheus + metrics_path: /metrics + static_configs: + - targets: [ 'localhost:{{ prometheus_port }}' ] + + + # pulls server list from ec2 and drops all servers that are not in env or a dock + - job_name: container_info + scrape_interval: 30m + # keys to access this region and port of prom + ec2_sd_configs: + - region: us-west-2 + access_key: {{ prometheus_aws_access_key }} + secret_key: {{ prometheus_aws_secret_key }} + port: {{ cadvisor_port }} + + # drop all servers not in this env and not a dock + relabel_configs: + - source_labels: [__meta_ec2_tag_aws_autoscaling_groupName] + regex: {{ env }}-asg-dock-pool + action: drop + + - source_labels: [__meta_ec2_tag_env] + regex: {{ node_env }} + action: keep + + - source_labels: [__meta_ec2_tag_role] + regex: dock + action: keep + + - source_labels: [__meta_ec2_tag_org] + target_label: githubOrgId + + - source_labels: [__meta_ec2_private_ip] + target_label: hostIp + + - source_labels: [__meta_ec2_tag_env] + target_label: env + + # pulls server list from ec2 and drops all servers that are not in env or a dock + - job_name: server_info + # keys to access this region and port of prom + ec2_sd_configs: + - region: us-west-2 + access_key: {{ prometheus_aws_access_key }} + secret_key: {{ prometheus_aws_secret_key }} + port: {{ node_exporter_port }} + + # drop all servers not in this env and not a dock + relabel_configs: + - source_labels: [__meta_ec2_tag_aws_autoscaling_groupName] + regex: {{ env }}-asg-dock-pool + action: drop + + - source_labels: [__meta_ec2_tag_env] + regex: {{ node_env }} + action: keep + + - source_labels: [__meta_ec2_tag_role] + regex: dock + action: keep + + - source_labels: [__meta_ec2_tag_org] + target_label: githubOrgId + + - source_labels: [__meta_ec2_private_ip] + target_label: hostIp + + - source_labels: [__meta_ec2_tag_env] + target_label: env diff --git a/ansible/roles/pull-image/tasks/main.yml b/ansible/roles/pull-image/tasks/main.yml new file mode 100644 index 00000000..088a7143 --- /dev/null +++ b/ansible/roles/pull-image/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: login to registry + tags: deploy + become: yes + command: docker login -u {{ registry_username }} -p {{ registry_token }} -e "info@runnable.com" {{ registry_host }} + +- name: pull image + tags: deploy + become: true + command: docker pull {{ container_image }}:{{ container_tag }} + +- name: logout of registry + tags: deploy + become: yes + command: docker logout {{ registry_host }} diff --git a/ansible/roles/rabbitmq/tasks/main.yml b/ansible/roles/rabbitmq/tasks/main.yml new file mode 100644 index 00000000..a5210b27 --- /dev/null +++ b/ansible/roles/rabbitmq/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create rabbit config yaml + template: + dest: "{{ config_maps_path }}/{{ name }}.yml" + src: rabbitmq.config diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.config b/ansible/roles/rabbitmq/templates/rabbitmq.config new file mode 100644 index 00000000..b732bca2 --- /dev/null +++ b/ansible/roles/rabbitmq/templates/rabbitmq.config @@ -0,0 +1,82 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-conf +data: + rabbitmq.config: | + [ + {rabbit, + [ + {default_vhost, <<"/">>}, + {default_user, <<"{{ rabbit_username }}">>}, + {default_pass, <<"{{ rabbit_password }}">>}, + {default_permissions, [<<".*">>, <<".*">>, <<".*">>]} + ]}, + {rabbitmq_management, + [ + {load_definitions, "/etc/rabbitmq/schema.json"} + ]} + ]. + + schema.json: | + { + "rabbit_version": "3.5.3", + "users": [], + "vhosts": [ + { + "name": "/" + } + ], + "permissions": [], + "parameters": [], + "policies": [], + "queues": [], + "exchanges": [ + { + "name": "application.url.visited", + "vhost": "/", + "type": "fanout", + "durable": true, + "auto_delete": false, + "internal": false, + "arguments": {} + }, + { + "name": "organization.integration.prbot.disabled", + "vhost": "/", + "type": "fanout", + "durable": true, + "auto_delete": false, + "internal": false, + "arguments": {} + }, + { + "name": "routing.cache.invalidated", + "vhost": "/", + "type": "fanout", + "durable": true, + "auto_delete": false, + "internal": false, + "arguments": {} + }, + { + "name": "asg.update.requested", + "vhost": "/", + "type": "fanout", + "durable": true, + "auto_delete": false, + "internal": false, + "arguments": {} + }, + { + "name": "organization.disallowed", + "vhost": "/", + "type": "fanout", + "durable": true, + "auto_delete": false, + "internal": false, + "arguments": {} + } + ], + "bindings": [] + } diff --git a/ansible/roles/redis_key/README.md b/ansible/roles/redis_key/README.md new file mode 100644 index 00000000..d1c259d0 --- /dev/null +++ b/ansible/roles/redis_key/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to setup redis key + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/redis_key/meta/main.yml b/ansible/roles/redis_key/meta/main.yml new file mode 100644 index 00000000..4a1f6483 --- /dev/null +++ b/ansible/roles/redis_key/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: docker } diff --git a/ansible/roles/redis_key/tasks/main.yml b/ansible/roles/redis_key/tasks/main.yml new file mode 100644 index 00000000..65819c6b --- /dev/null +++ b/ansible/roles/redis_key/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: check keys + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} LLEN {{ redis_key }} + register: key_length + changed_when: key_length.stdout == "0" + +- name: create key if not exist + when: key_length.changed + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} RPUSH {{ redis_key }} {{ name }} {{ name }} diff --git a/ansible/roles/runnable-domain-proxy/tasks/main.yml b/ansible/roles/runnable-domain-proxy/tasks/main.yml new file mode 100644 index 00000000..5199dc1b --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: create chained cert + tags: [ certs ] + shell: | + cat {{ domains_root }}/{{ domain }}/cert.pem > {{ domains_root }}/{{ domain }}/chained.pem + echo "" >> {{ domains_root }}/{{ domain }}/chained.pem # Add newline + cat {{ domains_root }}/{{ domain }}/ca.pem >> {{ domains_root }}/{{ domain }}/chained.pem + +- name: create dhparam.pem + tags: [ certs ] + command: openssl dhparam -out {{ domains_root }}/{{ domain }}/dhparam.pem 2048 + +- set_fact: + ca_data: "{{ lookup('file', '{{ domains_root }}/{{ domain }}/ca.pem') }}" +- set_fact: + cert_data: "{{ lookup('file', '{{ domains_root }}/{{ domain }}/cert.pem') }}" +- set_fact: + key_data: "{{ lookup('file', '{{ domains_root }}/{{ domain }}/key.pem') }}" +- set_fact: + chained_data: "{{ lookup('file', '{{ domains_root }}/{{ domain }}/chained.pem') }}" +- set_fact: + # NOTE: We currently have no way of generating this file + dhparam_data: "{{ lookup('file', '{{ domains_root }}/{{ domain }}/dhparam.pem') }}" + +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create cert config map + tags: [ configure_proxy, configure_files ] + template: + src: certs.yml + dest: "{{ config_maps_path }}/{{ name }}-certs.yml" + +- name: create proxy template + tags: [ configure_proxy, configure_files ] + template: + src: proxy-nginx.conf + dest: "{{ config_maps_path }}/{{ name }}-base-config.yml" + +- name: create mixpanel template + tags: [ configure_proxy, configure_files ] + template: + src: mixpanel.tmpl + dest: "{{ config_maps_path }}/{{ name }}-mixpanel-config.yml" + +- name: put api template in place + tags: [ configure_proxy, configure_files ] + template: + src: sites-enabled.tmpl + dest: "{{ config_maps_path }}/{{ name }}-sites-enabled-config.yml" diff --git a/ansible/roles/runnable-domain-proxy/templates/certs.yml b/ansible/roles/runnable-domain-proxy/templates/certs.yml new file mode 100644 index 00000000..a1127bd9 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/certs.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-{{ domain |replace('.', '-') }}-certs +data: + ca.pem: | + {{ ca_data | indent(4) }} + cert.pem: | + {{ cert_data | indent(4) }} + chained.pem: | + {{ chained_data | indent(4) }} + dhparam.pem: | + {{ dhparam_data | indent(4) }} + key.pem: | + {{ key_data | indent(4) }} diff --git a/ansible/roles/runnable-domain-proxy/templates/mixpanel.tmpl b/ansible/roles/runnable-domain-proxy/templates/mixpanel.tmpl new file mode 100644 index 00000000..78dd3ec8 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/mixpanel.tmpl @@ -0,0 +1,25 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-mixpanel-config +data: + mixpanel.conf: | + server { + listen 80; + listen [::]:80; + server_name mixpanel.{{ domain }}; + return 301 https://$server_name$request_uri; + } + + server { + listen 443 ssl; + server_name mixpanel.{{ domain }}; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + + location / { + proxy_pass https://api.mixpanel.com; + } + } diff --git a/ansible/roles/runnable-domain-proxy/templates/nginx-status.conf b/ansible/roles/runnable-domain-proxy/templates/nginx-status.conf new file mode 100644 index 00000000..52a85ab8 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/nginx-status.conf @@ -0,0 +1,10 @@ +server { + listen 80; + listen [::]:80; + server_name localhost; + + location /nginx_status { + access_log off; + stub_status on; + } +} diff --git a/ansible/roles/runnable-domain-proxy/templates/proxy-nginx.conf b/ansible/roles/runnable-domain-proxy/templates/proxy-nginx.conf new file mode 100644 index 00000000..5e78853a --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/proxy-nginx.conf @@ -0,0 +1,35 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-base-conf +data: + nginx.conf: | + user www-data; + worker_processes 4; + pid /run/nginx.pid; + + events { + worker_connections 5000; + } + + http { + ## + # Basic Settings + ## + tcp_nodelay on; + keepalive_timeout 65; + server_tokens off; + + ## + # Logging Settings + ## + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + ## + # Virtual Host Configs + ## + include /etc/nginx/sites-enabled/*; + include /etc/nginx/other-sites-enabled/*; + } diff --git a/ansible/roles/runnable-domain-proxy/templates/sites-enabled.tmpl b/ansible/roles/runnable-domain-proxy/templates/sites-enabled.tmpl new file mode 100644 index 00000000..0cd395b8 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/sites-enabled.tmpl @@ -0,0 +1,139 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }}-sites-enabled-config +data: + status.conf: | + server { + listen 80; + server_name status.{{ domain }}; + + location /nginx_status { + access_log off; + stub_status on; + } + } + + user-pixel.conf: | + server { + listen 80; + server_name blue.{{ domain }}; + location / { + return 404; + } + } + + server { + listen 443 ssl; + server_name blue.{{ domain }}; + gzip off; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + location = /pixel.gif { + add_header Set-Cookie "isModerating=1; Domain=.{{ domain }}; Path=/; HttpOnly;"; + empty_gif; + } + + location / { + return 404; + } + } + +{% for service in services %} +{% if 'include' not in service or service.include != false %} + {{ service.name }}.conf: | + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + +{% for upstream in service.upstreams %} + upstream {{ service.name }}_{{ upstream.name }} { + server {{ service.name }}:{{ upstream.port }} max_fails=0 fail_timeout=1s; + } + +{% endfor %} + server { + listen 80; + client_max_body_size 5g; + server_name {{ service.name }}.{{ domain }}; + access_log /var/log/nginx/{{ service.name }}.access.log; + +{% for upstream in service.upstreams %} + location {{ upstream.route }} { + proxy_pass http://{{ service.name }}_{{ upstream.name }}; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + } + +{% endfor %} + } + + server { + listen 443 ssl; + client_max_body_size 5g; + server_name {{ service.name }}.{{ domain }}; + access_log /var/log/nginx/{{ service.name }}.ssl.access.log; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + ssl_stapling on; + ssl_stapling_verify on; + resolver kube-dns.kube-system.svc.cluster.local valid=5s; + resolver_timeout 5s; + +{% for upstream in service.upstreams %} + location {{ upstream.route }} { + proxy_pass http://{{ service.name }}_{{ upstream.name }}; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + } + +{% endfor %} + } +{% endif %} +{% endfor %} diff --git a/ansible/roles/service_node/tasks/main.yml b/ansible/roles/service_node/tasks/main.yml new file mode 100644 index 00000000..1172a255 --- /dev/null +++ b/ansible/roles/service_node/tasks/main.yml @@ -0,0 +1,97 @@ +--- +#- name: Install service-tier node and utilities +# become: true +# script: nodeInstall.sh +- name: "remove ax25-node" + become: yes + apt: + name=node + force=yes + purge=yes + state=absent + +- stat: path=/usr/bin/node + register: bin_node + +- stat: path=/usr/bin/sbin + register: sbin_node + +- name: "remove any linked node in /usr/bin" + become: yes + file: + path=/usr/bin/node + state=absent + when: bin_node.stat.islnk is defined and bin_node.stat.islnk + +- name: "remove any linked node in /usr/sbin" + become: yes + file: + path=/usr/sbin/node + state=absent + when: sbin_node.stat.islnk is defined and sbin_node.stat.islnk + +- name: "download node 4.x dpkg update script" + get_url: + url=https://deb.nodesource.com/setup_4.x + dest=/tmp/setup_4.x + +- name: "update package repo for node 4.x" + become: yes + command: /usr/bin/env bash /tmp/setup_4.x + +- name: "clean up node 4.x package repo updater" + file: + path=/tmp/setup_4.x + state=absent + +- name: "install node 4.x" + become: yes + apt: + name=nodejs + state=latest + +- name: "link nodejs to node" + become: yes + file: + src=/usr/bin/nodejs + dest=/usr/bin/node + owner=root + group=root + state=link + +- name: "install build-essential" + become: yes + apt: + name: build-essential + state: latest + +- name: "install jq" + become: yes + apt: + name=jq + state=latest + +- name: "download nvm installer" + get_url: + url=https://raw.githubusercontent.com/creationix/nvm/v0.31.0/install.sh + dest=/tmp/install.sh + +- name: "install nvm" + command: /usr/bin/env bash /tmp/install.sh + +- name: "clean up nvm installer" + file: + path=/tmp/install.sh + state=absent + +- name: "install bunyan globally" + become: yes + npm: + name=bunyan + global=yes + +- name: "install json globally" + become: yes + npm: + name=json + global=yes diff --git a/ansible/roles/squash_image/tasks/main.yml b/ansible/roles/squash_image/tasks/main.yml new file mode 100644 index 00000000..39af03bf --- /dev/null +++ b/ansible/roles/squash_image/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: get image id for newly created image + delegate_to: "{{ builder }}" + become: true + shell: docker images -a | grep "{{ container_image }}" | grep "{{ container_tag }}" | awk '{print $3}' + register: unsquahed_image_id + +- name: squash newly created image + delegate_to: "{{ builder }}" + become: true + ignore_errors: true # Might already be squashed + shell: docker-squash "{{ unsquahed_image_id.stdout_lines[0] }}" -t "{{ container_image }}:{{ container_tag }}" diff --git a/ansible/roles/ssh-keys/README.md b/ansible/roles/ssh-keys/README.md new file mode 100644 index 00000000..56049f55 --- /dev/null +++ b/ansible/roles/ssh-keys/README.md @@ -0,0 +1,3 @@ +# SSH-KEYS + +This is fun. In the `vars` file, you can add groups that the user is added to (use comma seperated values) diff --git a/ansible/roles/ssh-keys/tasks/main.yml b/ansible/roles/ssh-keys/tasks/main.yml new file mode 100644 index 00000000..cb39a7f5 --- /dev/null +++ b/ansible/roles/ssh-keys/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- include_vars: users.yml + +- name: ensure runnable admin group exists + become: true + group: + name={{ runnable_admin_group }} + state=present + +- name: ensure 'sudoers' has runnable admin group + become: true + lineinfile: " + dest=/etc/sudoers + regexp='^%{{ runnable_admin_group }}' + line='%{{ runnable_admin_group }} ALL=(ALL) NOPASSWD: ALL' + state=present + " + +- name: make users + become: true + with_dict: "{{ users }}" + user: + name={{ item.key }} + append=yes + groups={{ item.value.groups | default("") }} + shell=/bin/bash + state=present + +- name: "set user's keys" + become: true + with_dict: "{{ users }}" + authorized_key: + exclusive=yes + user={{ item.key }} + key="{{ item.value.pub_key }}" + +- name: remove users + become: true + with_dict: "{{ delete }}" + user: + name={{ item.key }} + state="absent" diff --git a/ansible/roles/ssh-keys/vars/users.yml b/ansible/roles/ssh-keys/vars/users.yml new file mode 100644 index 00000000..13230c3b --- /dev/null +++ b/ansible/roles/ssh-keys/vars/users.yml @@ -0,0 +1,24 @@ +--- +runnable_admin_group: runnadmin + +users: + bkendall: + name: Bryan Kendall + groups: runnadmin + pub_key: > + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC40ubXFxuA9VmslDPtVSlzjtPNDbq5hcun5Td9Znjt7Lyk8dw2DQrM4wbzbB8dqn+J9yWJGv3xOj/043tpam69rf77VxZOrzNBL3fe9S+1LdQ9c8VBHZDYvSrQKdmAPO61RheCA6P96fUW4MgxsmTovXbF4Bl9RMw1vLE5xJaagtiF3L5LU3SpMt56oPrkUKP9lglVLymjqvguFTJBEfqzBJflxx0mMjQ6dX2HXA8iUTI3vZ6Q3Ti2vask90qgK5fREckkx6IhDgVeoeD/IOJNPYWkm7CgSgvrzkBxKLNRl7MZx85b524LhjVjVOEyfEIg9jDsEejtwVanOY7gdQ/vKmxXxAGvQJKEXBwqBSFI4zZnfJQ6D5f95Isxz8PCIqtnJScMmIKmTo7vSFWGhEWT8vfon67Y5nzNuNLrnW9cQIIZ60jeG2NesPYnsTCMRCS0lxRhb1nJdW1xtoeS7mnc8SHAyFDpKKarE8WXsi1XAfUTaSTgZ9BuAxMynvxUgqN7Ru9DHJ05QNu/ZNAgtvLCy5lvGjk9+6wuXy2gU27fvt6XIdWC064y/fURpq/qSd+irSNJ3Nk37X2rOJuRN9HlscxUtfbJTnvtIo026MXCn65xOh+Vj4Dv1ZugJs0mLyIK2BBi5SSHMBkiUOi4MOqEfIND604pXnG/yWsK9c33Fw== bryan@init.me + + anand: + name: Anandkumar Patel + groups: runnadmin + pub_key: > + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCv3cxgdayF7qF8vuGUMt5rWIfaTd3sqOhaK82CIe/aTSlyIuXqGKhTcHWzttAvTlfLFC5qXBJBScZPhXYE7t0YCxaDrmQvuhJ40fu8ZpROEmY9T7WzgBx1uB0mmqPXCrX1oyuUF2n4YVcG5dF1Y3p9d/1ZFCcwKEh8Pi3H4LMRHaph72yNFJUit0Bp8b3sTvQQZ62g9ztCzL5tgaY0jc8vnXGssOGm/TuNMPaXydDVGSK1JDqw0xUhbofZ4/I3JFEhZ9mlPlhu4f1Ts+uX6DnXExgfOM/uN7XuaK6VJBggJRielRlGWxztkzT2owtJsJE9Ave5cQfifjCwYB3TRLQx anandkumarpatel@gmail.com + + kahn: + name: Ryan Kahn + groups: runnadmin + pub_key: > + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtaCZTriyt5xsjIfKMqwimK+IgtSe8AokOcMnaAMMZ6LB9zeaq42/oWe0J6GqpqBAJ2EPFQkmdT5qVKbf5ragGhpNkkFFLzziMibeDXauc9aIryox2KoZBv8E8g3zPmV7caOrrLqz7NR9Gf4P3xYv5drUNUWsrDaUD0E022PEJHwmTa86PCcHmguI56KyicNi2MRFa4J+E4yIHcIZ3fHMXtvh7DxQnmRyDuZFjGb+Xl00HbyPzSBychq92cN49S3XupEPH7xtEkmar2DlOZaW3wDXgQ2VD+2ax11hefV/XgfI/ToXCNnR9gf+F5sLOpCrqszM57HoA0GWAkiNwrjmD kahn@runnable.com + +delete: + casey: diff --git a/ansible/roles/swarm-cloudwatch-reporter/tasks/main.yml b/ansible/roles/swarm-cloudwatch-reporter/tasks/main.yml new file mode 100644 index 00000000..fc79a19c --- /dev/null +++ b/ansible/roles/swarm-cloudwatch-reporter/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: create cron folder + file: + state: directory + path: "{{ cron_jobs_path }}" + +- name: create swarm cron + template: + dest: "{{ cron_jobs_path }}/{{ name }}.yml" + src: swarm-cron.yml diff --git a/ansible/roles/swarm-cloudwatch-reporter/templates/swarm-cron.yml b/ansible/roles/swarm-cloudwatch-reporter/templates/swarm-cron.yml new file mode 100644 index 00000000..06e12227 --- /dev/null +++ b/ansible/roles/swarm-cloudwatch-reporter/templates/swarm-cron.yml @@ -0,0 +1,44 @@ +apiVersion: batch/v2alpha1 +kind: CronJob +metadata: + name: {{ name }} +spec: + successfulJobsHistoryLimit: 0 + failedJobsHistoryLimit: 1 + schedule: "{{ cron_scedule }}" + concurrencyPolicy: Replace + jobTemplate: + spec: + template: + spec: + volumes: + - name: {{ name }}-docker-ssl-certs + configMap: + name: {{ name }}-docker-ssl-certs + imagePullSecrets: + - name: {{ image_pull_secret_name }} + restartPolicy: Never + containers: + - name: {{ name }} + volumeMounts: + - name: {{ name }}-docker-ssl-certs + mountPath: /etc/ssl/docker + image: {{ container_image }}:{{ container_tag }} + env: + - name: DATADOG_HOST + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: DATADOG_PORT + value: "{{ datadog_port }}" +{% if container_envs is defined %} +{% for env in container_envs %} +{% if env.value != 'ansible_undefined' %} + - name: {{ env.name }} + value: "{{ env.value }}" +{% endif %} +{% endfor %} +{% endif %} + args: + - npm + - start diff --git a/ansible/roles/tls-client-cert/meta/main.yml b/ansible/roles/tls-client-cert/meta/main.yml new file mode 100644 index 00000000..397baf22 --- /dev/null +++ b/ansible/roles/tls-client-cert/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: local-vault } + - { role: tls-server-ca } diff --git a/ansible/roles/tls-client-cert/tasks/main.yml b/ansible/roles/tls-client-cert/tasks/main.yml new file mode 100644 index 00000000..283dc01c --- /dev/null +++ b/ansible/roles/tls-client-cert/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: check for role for client + local_action: + command vault read --format=json {{ node_env }}/roles/{{ tls_service }}-client + ignore_errors: True + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_role + changed_when: raw_role.rc != 0 + +- name: create role for client + when: raw_role.rc != 0 + local_action: + command vault write --format=json {{ node_env }}/roles/{{ tls_service }}-client allow_any_name=true ttl=87600h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: create certificate for client + when: save_on_certs is undefined + local_action: + command vault write --format=json {{ node_env }}/issue/{{ tls_service }}-client common_name="{{ name }}.client.{{ tls_service }}.runnable" ttl=8760h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_new_certs + +- when: raw_new_certs is defined and save_on_certs is undefined + set_fact: + new_client_certs: "{{ raw_new_certs.stdout | from_json }}" + +- when: save_on_certs is defined + set_fact: + new_client_certs: + data: + certificate: fake certificate + issuing_ca: fake CA + private_key: fake private key diff --git a/ansible/roles/tls-client/meta/main.yml b/ansible/roles/tls-client/meta/main.yml new file mode 100644 index 00000000..c162aab8 --- /dev/null +++ b/ansible/roles/tls-client/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: tls-client-cert } diff --git a/ansible/roles/tls-client/tasks/main.yml b/ansible/roles/tls-client/tasks/main.yml new file mode 100644 index 00000000..4de959a0 --- /dev/null +++ b/ansible/roles/tls-client/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: make directory for client certificates + tags: [ tls_client ] + become: yes + file: + dest: /opt/ssl/{{ tls_service }}-client + state: directory + +- name: put client CA in place for service + tags: [ tls_client ] + become: yes + copy: + dest: /opt/ssl/{{ tls_service }}-client/ca.pem + content: "{{ new_client_certs.data.issuing_ca }}" + mode: 0400 + +- name: put client certificate in place for service + tags: [ tls_client ] + become: yes + copy: + dest: /opt/ssl/{{ tls_service }}-client/cert.pem + content: "{{ new_client_certs.data.certificate }}" + mode: 0400 + +- name: put client private key in place for service + tags: [ tls_client ] + become: yes + copy: + dest: /opt/ssl/{{ tls_service }}-client/key.pem + content: "{{ new_client_certs.data.private_key }}" + mode: 0400 diff --git a/ansible/roles/tls-server-ca/meta/main.yml b/ansible/roles/tls-server-ca/meta/main.yml new file mode 100644 index 00000000..cb0b2731 --- /dev/null +++ b/ansible/roles/tls-server-ca/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: local-vault } diff --git a/ansible/roles/tls-server-ca/tasks/main.yml b/ansible/roles/tls-server-ca/tasks/main.yml new file mode 100644 index 00000000..28f6c7ea --- /dev/null +++ b/ansible/roles/tls-server-ca/tasks/main.yml @@ -0,0 +1,75 @@ +--- +- name: check for pki endpoint for environment + local_action: + command vault mounts + register: mounts + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + changed_when: "'{{ node_env }}' not in mounts.stdout" + +- name: mount endpoint for environment + when: mounts.changed + local_action: + command vault mount --path={{ node_env }} pki + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: tune endpoint for environment + when: mounts.changed + local_action: + command vault mount-tune --max-lease-ttl=87600h {{ node_env }} + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: read root CA certificate + local_action: + command vault read --format=json {{ node_env }}/cert/ca + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_ca + +- set_fact: + ca_data: "{{ raw_ca.stdout | from_json }}" + +- name: set ca variable + set_fact: + ca: "{{ ca_data.data.certificate }}" + changed_when: "ca_data.data.certificate == ''" + +- name: generate root CA certificate + when: ca == "" + local_action: + command vault write --format=json {{ node_env }}/root/generate/internal common_name="runnable" ttl="87600h" + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_new_ca + +- when: ca == "" + set_fact: + ca_data: "{{ raw_new_ca.stdout | from_json }}" + +- when: ca == "" + set_fact: + ca: "{{ ca_data.data.certificate }}" + +- name: make sure CA directory is in place + when: ca_dest is defined + become: true + file: + dest: "{{ ca_dest | dirname }}" + state: directory + +- name: put CA in place + when: ca_dest is defined + become: true + copy: + content: "{{ ca }}" + dest: "{{ ca_dest }}" + mode: 0400 + owner: root + group: root diff --git a/ansible/roles/tls-server-cert/meta/main.yml b/ansible/roles/tls-server-cert/meta/main.yml new file mode 100644 index 00000000..397baf22 --- /dev/null +++ b/ansible/roles/tls-server-cert/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: local-vault } + - { role: tls-server-ca } diff --git a/ansible/roles/tls-server-cert/tasks/main.yml b/ansible/roles/tls-server-cert/tasks/main.yml new file mode 100644 index 00000000..dd6e4c70 --- /dev/null +++ b/ansible/roles/tls-server-cert/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: check for role for server + run_once: true + local_action: + command vault read --format=json {{ node_env }}/roles/server-{{ name }} + ignore_errors: True + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_role + changed_when: raw_role.rc != 0 + +- name: create role + when: raw_role.rc != 0 + run_once: true + local_action: + command vault write --format=json {{ node_env }}/roles/server-{{ name }} allow_any_name=true ttl=87600h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: create certificate for server + when: save_on_certs is undefined + run_once: true + local_action: + command vault write --format=json {{ node_env }}/issue/server-{{ name }} common_name="server.{{ name }}.runnable" alt_names="{{ ansible_fqdn }}" ip_sans="{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" ttl=8760h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_new_certs + +- when: raw_new_certs is defined and save_on_certs is undefined + run_once: true + set_fact: + new_certs: "{{ raw_new_certs.stdout | from_json }}" + +- when: save_on_certs is defined + run_once: true + set_fact: + new_certs: + data: + certificate: fake certificate + issuing_ca: fake CA + private_key: fake private key diff --git a/ansible/roles/ulimits/files/limits.conf b/ansible/roles/ulimits/files/limits.conf new file mode 100644 index 00000000..a51d3dbb --- /dev/null +++ b/ansible/roles/ulimits/files/limits.conf @@ -0,0 +1,56 @@ +# /etc/security/limits.conf +# +#Each line describes a limit for a user in the form: +# +# +# +#Where: +# can be: +# - a user name +# - a group name, with @group syntax +# - the wildcard *, for default entry +# - the wildcard %, can be also used with %group syntax, +# for maxlogin limit +# - NOTE: group and wildcard limits are not applied to root. +# To apply a limit to the root user, must be +# the literal username root. +# +# can have the two values: +# - "soft" for enforcing the soft limits +# - "hard" for enforcing hard limits +# +# can be one of the following: +# - core - limits the core file size (KB) +# - data - max data size (KB) +# - fsize - maximum filesize (KB) +# - memlock - max locked-in-memory address space (KB) +# - nofile - max number of open files +# - rss - max resident set size (KB) +# - stack - max stack size (KB) +# - cpu - max CPU time (MIN) +# - nproc - max number of processes +# - as - address space limit (KB) +# - maxlogins - max number of logins for this user +# - maxsyslogins - max number of logins on the system +# - priority - the priority to run user process with +# - locks - max number of file locks the user can hold +# - sigpending - max number of pending signals +# - msgqueue - max memory used by POSIX message queues (bytes) +# - nice - max nice priority allowed to raise to values: [-20, 19] +# - rtprio - max realtime priority +# - chroot - change root to directory (Debian-specific) +# +# +# + +root soft nofile 1048576 +root hard nofile 1048576 +* soft nofile 1048576 +* hard nofile 1048576 +root soft nproc 1048576 +root hard nproc 1048576 +* soft nproc 1048576 +* hard nproc 1048576 + +# End of file + diff --git a/ansible/roles/ulimits/files/sysctl.conf b/ansible/roles/ulimits/files/sysctl.conf new file mode 100644 index 00000000..65541618 --- /dev/null +++ b/ansible/roles/ulimits/files/sysctl.conf @@ -0,0 +1,5 @@ +fs.file-max = 100000000 +fs.inotify.max_user_watches = 524288 +fs.inotify.max_queued_events = 65536 +fs.inotify.max_user_instances = 8192 +net.ipv4.ip_local_port_range = 64535 65535 diff --git a/ansible/roles/ulimits/handlers/main.yml b/ansible/roles/ulimits/handlers/main.yml new file mode 100644 index 00000000..d024f257 --- /dev/null +++ b/ansible/roles/ulimits/handlers/main.yml @@ -0,0 +1,9 @@ +- name: reboot server + become: true + shell: + reboot + +- name: load kernel parameters + become: true + shell: + sysctl -p diff --git a/ansible/roles/ulimits/tasks/main.yml b/ansible/roles/ulimits/tasks/main.yml new file mode 100644 index 00000000..22307ca4 --- /dev/null +++ b/ansible/roles/ulimits/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: update sysctl.conf + become: true + copy: + src=sysctl.conf + dest=/etc/sysctl.conf + notify: load kernel parameters + +- name: update limits.conf + become: true + copy: + src=limits.conf + dest=/etc/security/limits.conf + +- name: force reboot server + become: true + command: echo rebooting + when: force_restart is defined + notify: reboot server diff --git a/ansible/roles/vault/additional-files/user-vault/README.md b/ansible/roles/vault/additional-files/user-vault/README.md new file mode 100644 index 00000000..944efa0e --- /dev/null +++ b/ansible/roles/vault/additional-files/user-vault/README.md @@ -0,0 +1,54 @@ +# Configuring Vault + +Vault is specifically designed to be manually setup. This is not automated for a reason. + +``` +kubectl port-forward INSTERT_VAULT_ID 8300:8200 +export VAULT_ADDR=http://localhost:8300 +``` + +The first time you setup vault we need to manually configure a bunch +of things so we don't pass around the root token. + +`vault init` + +Grab the keys, put them in 1password + +`vault unseal $key1` + +`vault unseal $key2` + +`vault unseal $key3` + +Verify the vault unsealed + +`vault auth` +Paste in the $rootToken + + +Now to setup the policies: + +``` +vault policy-write organizations-writeonly roles/vault/additional-files/user-vault/policies/organizations-writeonly.hcl +vault policy-write organizations-readonly roles/vault/additional-files/user-vault/policies/organizations-readonly.hcl +vault policy-write dock-user-creator roles/vault/additional-files/user-vault/policies/dock-user-creator.hcl +``` + +Now to setup the roles + +`vault write auth/token/roles/organizations-readonly allowed_policies="organizations-readonly"` + +Now to setup new token for starlord: + +`vault token-create -policy="organizations-writeonly" -ttl="8760h"` + +Take the response of this and save it in the configuration for the environment you want as the `starlord_vault_token` + +Create a new token for the docks, so they can create readonly tokens. + +`vault token-create -policy="dock-user-creator" -ttl="8760h"` + +Save that token as the `dock_vault_user_creation_access_token` + +This allows the vault user to create a new user using: +vault write -f auth/token/create/organizations-readonly diff --git a/ansible/roles/vault/additional-files/user-vault/policies/dock-user-creator.hcl b/ansible/roles/vault/additional-files/user-vault/policies/dock-user-creator.hcl new file mode 100644 index 00000000..27183b84 --- /dev/null +++ b/ansible/roles/vault/additional-files/user-vault/policies/dock-user-creator.hcl @@ -0,0 +1,6 @@ +path "auth/token/create/organizations-readonly" { + capabilities = ["create", "update"] +} +path "sys/policy" { + capabilities = ["create", "update"] +} diff --git a/ansible/roles/vault/additional-files/user-vault/policies/organizations-readonly.hcl b/ansible/roles/vault/additional-files/user-vault/policies/organizations-readonly.hcl new file mode 100644 index 00000000..90f54488 --- /dev/null +++ b/ansible/roles/vault/additional-files/user-vault/policies/organizations-readonly.hcl @@ -0,0 +1,3 @@ +path "secret/organization/*" { + capabilities = ["read"] +} diff --git a/ansible/roles/vault/additional-files/user-vault/policies/organizations-writeonly.hcl b/ansible/roles/vault/additional-files/user-vault/policies/organizations-writeonly.hcl new file mode 100644 index 00000000..8b2b56e9 --- /dev/null +++ b/ansible/roles/vault/additional-files/user-vault/policies/organizations-writeonly.hcl @@ -0,0 +1,3 @@ +path "secret/organization/*" { + capabilities = ["create","update"] +} diff --git a/ansible/roles/vault/tasks/main.yml b/ansible/roles/vault/tasks/main.yml new file mode 100644 index 00000000..0d59c388 --- /dev/null +++ b/ansible/roles/vault/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: create configMap folder + file: + state: directory + path: "{{ config_maps_path }}" + +- name: create vault config + tags: [ deploy ] + template: + dest: "{{ config_maps_path }}/{{ name }}.yml" + src: "{{ vault_config_file | default('vault.yml') }}" diff --git a/ansible/roles/vault/templates/user-vault.yml b/ansible/roles/vault/templates/user-vault.yml new file mode 100644 index 00000000..b3904171 --- /dev/null +++ b/ansible/roles/vault/templates/user-vault.yml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }} +data: + vault.hcl: | + storage "s3" { + access_key = "{{ user_vault_s3_access_key }}" + secret_key = "{{ user_vault_s3_secret_key }}" + bucket = "{{ user_vault_s3_bucket }}" + region = "{{ aws_region }}" + } + + listener "tcp" { + address = "0.0.0.0:{{ user_vault_port }}" + tls_disable = 1 + } + + max_lease_ttl = "8760h" diff --git a/ansible/roles/vault/templates/vault.yml b/ansible/roles/vault/templates/vault.yml new file mode 100644 index 00000000..665b8b99 --- /dev/null +++ b/ansible/roles/vault/templates/vault.yml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ name }} +data: + vault.hcl: | + backend "consul" { + advertise_addr = "http://vault:{{ vault_api_port }}" + address = "{{ vault_consul_address }}" + scheme = "http" + path = "vault" + } + + listener "tcp" { + address = "0.0.0.0:{{ vault_api_port }}" + tls_disable = 1 + } + + max_lease_ttl = "8760h" diff --git a/ansible/roles/wait_for_container_exit/files/findTagRunning.sh b/ansible/roles/wait_for_container_exit/files/findTagRunning.sh new file mode 100755 index 00000000..5f7f98a0 --- /dev/null +++ b/ansible/roles/wait_for_container_exit/files/findTagRunning.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +IMAGE_NAME="$1" +CONTAINERS=`docker ps | grep -v '^CONTAINER' | awk '{print $1}'` + +if [ "" = "${CONTAINERS}" ] ; then + exit 0 +else + for container in ${CONTAINERS} ; do + docker inspect "${container}" | grep -q '"Image": "'"${IMAGE_NAME}": + if [ ${?} -eq 0 ] ; then + if [ -z "${RUNNING_CONTAINERS}" ] ; then + RUNNING_CONTAINERS="${container}" + else + RUNNING_CONTAINERS="${RUNNING_CONTAINERS} ${container}" + fi + fi + done +fi + +if [ ! -z "${RUNNING_CONTAINERS}" ] ; then + echo "${RUNNING_CONTAINERS}" +fi diff --git a/ansible/roles/wait_for_container_exit/tasks/main.yml b/ansible/roles/wait_for_container_exit/tasks/main.yml new file mode 100644 index 00000000..db78998b --- /dev/null +++ b/ansible/roles/wait_for_container_exit/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: get containers + delegate_to: "{{ builder }}" + tags: deploy + become: yes + script: findTagRunning.sh {{ container_image }} + register: container_ids + +- name: wait for container to stop + delegate_to: "{{ builder }}" + become: yes + tags: deploy + command: docker wait {{ container_ids.stdout | trim }} + register: container_exit_code + +- name: get logs + delegate_to: "{{ builder }}" + tags: deploy + shell: source /home/ubuntu/.bash_aliases && loglast {{ name }} "n 200" + args: + executable: /bin/bash + when: container_exit_code.stdout != "0" + register: last_200_lines_of_logs + +- name: display logs for debugging purposes + delegate_to: "{{ builder }}" + tags: deploy + when: container_exit_code.stdout != "0" + debug: + var: last_200_lines_of_logs + +- name: assert container exited with no errors + delegate_to: "{{ builder }}" + tags: deploy + when: container_exit_code.stdout != "0" + fail: + msg: "Container did not exit with code 0 (Code: {{ container_exit_code.stdout }})" diff --git a/ansible/sauron.yml b/ansible/sauron.yml new file mode 100644 index 00000000..06526c1a --- /dev/null +++ b/ansible/sauron.yml @@ -0,0 +1,12 @@ +--- +- hosts: sauron + vars_files: + - group_vars/alpha-sauron.yml + roles: + - role: notify + rollbar_token: "{{ sauron_rollbar_token }}" + + - role: builder + + - role: docker_client + - role: k8-deployment diff --git a/ansible/shiva.yml b/ansible/shiva.yml new file mode 100644 index 00000000..7077b4c7 --- /dev/null +++ b/ansible/shiva.yml @@ -0,0 +1,11 @@ +--- +- hosts: shiva + vars_files: + - group_vars/alpha-shiva.yml + roles: + - role: notify + rollbar_token: "{{ shiva_rollbar_token }}" + + - role: builder + + - role: k8-deployment diff --git a/ansible/single-host-base.yml b/ansible/single-host-base.yml new file mode 100644 index 00000000..d0bea099 --- /dev/null +++ b/ansible/single-host-base.yml @@ -0,0 +1,196 @@ +--- +- hosts: localhost + connection: local + tasks: + - fail: msg="`host` (target host) needs to be defined to run this role" + when: host is not defined + - add_host: + name={{ host }} + groups=dock + +- hosts: "{{ host }}" + roles: + - { role: git } + +# Create an image for each service +# +# Each image (unfortunately) needs its own play in order to get variable scope. +# This is necessary because +# - Variables cannot be unset in same play +# - Variables are set globally and cannot be limited to roles: http://stackoverflow.com/questions/43033926/is-it-possible-to-limit-include-vars-scope-to-current-role + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ detention_branch }}" + vars_files: + - "group_vars/alpha-detention.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ palantiri_branch }}" + vars_files: + - "group_vars/alpha-palantiri.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ khronos_branch }}" + vars_files: + - "group_vars/alpha-khronos.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ docker_listener_branch }}" + vars_files: + - "group_vars/alpha-docker-listener.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ drake_branch }}" + vars_files: + - "group_vars/alpha-drake-http.yml" + roles: + - { role: builder } + - { role: squash_image } + - { role: add_image_tags, additional_tags: ['drake-worker']} + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ cream_branch }}" + vars_files: + - "group_vars/alpha-cream-http.yml" + roles: + - { role: builder } + - { role: squash_image } + - { role: add_image_tags, additional_tags: ['cream-worker']} + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ link_branch }}" + vars_files: + - "group_vars/alpha-link.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ navi_branch }}" + vars_files: + - "group_vars/alpha-navi.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ pheidi_branch }}" + vars_files: + - "group_vars/alpha-pheidi.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ big_poppa_branch }}" + vars_files: + - "group_vars/alpha-big-poppa-http.yml" + roles: + - { role: builder } + - { role: squash_image } + - { role: add_image_tags, additional_tags: ['big-poppa-worker']} + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ api_branch }}" + vars_files: + - "group_vars/alpha-api-base.yml" + - "group_vars/alpha-api.yml" + roles: + - { role: builder } + - { role: squash_image } + - { role: add_image_tags, additional_tags: ['api-worker', 'api-socket-server']} + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ sauron_branch }}" + vars_files: + - "group_vars/alpha-sauron.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ optimus_branch }}" + vars_files: + - "group_vars/alpha-optimus.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ clio_branch }}" + vars_files: + - "group_vars/alpha-clio.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ egret_branch }}" + vars_files: + - "group_vars/alpha-agreeable-egret.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ angular_branch }}" + vars_files: + - "group_vars/alpha-web.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ astral_branch }}" + vars_files: + - "group_vars/alpha-shiva.yml" + roles: + - { role: builder } + - { role: squash_image } + +- hosts: "{{ host }}" + vars: + - git_branch: "{{ enterprise_sign_in_branch }}" + vars_files: + - "group_vars/alpha-enterprise-sign-in.yml" + roles: + - { role: builder } + - { role: squash_image } + +# End building of images + +- hosts: "{{ host }}" + tasks: + - name: remove all dangling images + become: true + ignore_errors: true + shell: docker rmi $(docker images --quiet --filter "dangling=true") diff --git a/ansible/single-host-part-1-setup.yml b/ansible/single-host-part-1-setup.yml new file mode 100644 index 00000000..43d00a3f --- /dev/null +++ b/ansible/single-host-part-1-setup.yml @@ -0,0 +1,4 @@ +- include: consul-values.yml -e write_values="true" -e read_values="true" # Only run the first time +# TODO: Do we still need this? +# - include: consul-services.yml # Only run the first time +- include: vault-values.yml -e write_values="true" -e write_root_creds="true" diff --git a/ansible/single-host-part-1.yml b/ansible/single-host-part-1.yml new file mode 100644 index 00000000..861886fc --- /dev/null +++ b/ansible/single-host-part-1.yml @@ -0,0 +1,3 @@ +## Service Discovery: +- include: consul-single.yml +- include: vault-single.yml vault_config_file=vault.yml diff --git a/ansible/single-host-part-2.yml b/ansible/single-host-part-2.yml new file mode 100644 index 00000000..543eff23 --- /dev/null +++ b/ansible/single-host-part-2.yml @@ -0,0 +1,7 @@ +# Databases +- include: mongo.yml +- include: rabbitmq.yml +- include: redis.yml +- include: mongo-create-users.yml +- include: mongo-seed-db.yml +- include: rabbit-create-exchanges.yml diff --git a/ansible/single-host-part-3.yml b/ansible/single-host-part-3.yml new file mode 100644 index 00000000..0a1a2e7d --- /dev/null +++ b/ansible/single-host-part-3.yml @@ -0,0 +1,34 @@ +# Docks Services +- include: swarm-manager.yml +- include: palantiri.yml git_branch="{{ palantiri_branch }}" +- include: sauron.yml git_branch="{{ sauron_branch }}" +- include: khronos.yml git_branch="{{ khronos_branch }}" +- include: docker-listener.yml git_branch="{{ docker_listener_branch }}" + +## TODO: Add keymaker +## TODO: Add starloard +## TODO: Add furry-cactus for ASG auto-scaling + +# Proxies +- include: ingress-proxy.yml +- include: navi-proxy.yml +- include: github-varnish.yml git_branch="{{ github_varnish_branch }}" + +# Main +- include: big-poppa.yml git_branch="{{ big_poppa_branch }}" +- include: api.yml git_branch="{{ api_branch }}" +- include: drake.yml git_branch="{{ drake_branch }}" + +# Networking services +- include: detention.yml git_branch="{{ detention_branch }}" +- include: link.yml git_branch="{{ link_branch }}" +- include: navi.yml git_branch="{{ navi_branch }}" + +# Other +- include: optimus.yml git_branch="{{ optimus_branch }}" +- include: pheidi.yml git_branch="{{ pheidi_branch }}" +- include: clio.yml git_branch="{{ clio_branch }}" + +# Frontend +- include: web.yml git_branch="{{ runnable_angular_branch }}" +- include: enterprise-sign-in.yml git_branch="{{ enterprise_signin_branch }}" diff --git a/ansible/socket-server.yml b/ansible/socket-server.yml new file mode 100644 index 00000000..4ec60a02 --- /dev/null +++ b/ansible/socket-server.yml @@ -0,0 +1,12 @@ +--- +- hosts: socket-server + vars_files: + - group_vars/alpha-api-base.yml + - group_vars/alpha-socket-server.yml + roles: + - role: notify + rollbar_token: "{{ api_socket_server_rollbar_key }}" + + - role: docker_client + - role: k8-deployment + - role: k8-service diff --git a/ansible/stack-create.sh b/ansible/stack-create.sh new file mode 100755 index 00000000..466d26f6 --- /dev/null +++ b/ansible/stack-create.sh @@ -0,0 +1,18 @@ +TEST_FN=`util::get_latest_tag api 2>/dev/null` +if [ -z "$TEST_FN" ]; then + echo 'missing util function, run this like so ". ./stack-create.sh"' + exit 1 +fi + +for APP in agreeable-egret api arithmancy big-poppa clio cream datadog detention docker-listener drake eru github-varnish ingress-proxy khronos link mongo navi-proxy navi optimus palantiri pheidi prometheus-alerts prometheus sauron swarm-manager; do + GIT_BRANCH=`util::get_latest_tag $APP 2>/dev/null` + if [ -z "$GIT_BRANCH" ]; then + echo - include: $APP.yml + else + echo - include: $APP.yml -e git_branch="$GIT_BRANCH" + fi +done + +# add speical cases +echo - include: shiva.yml -e git_branch=`util::get_latest_tag astral 2>/dev/null` +echo - include: swarm-cloudwatch-reporter.yml -e git_branch=v2.0.0 diff --git a/ansible/stack.yml b/ansible/stack.yml new file mode 100644 index 00000000..15b215e0 --- /dev/null +++ b/ansible/stack.yml @@ -0,0 +1,27 @@ +- include: agreeable-egret.yml -e git_branch=v0.4.1 +- include: api.yml -e git_branch=v11.39.2 +- include: arithmancy.yml -e git_branch=v1.12.0 +- include: big-poppa.yml -e git_branch=v0.18.7 +- include: clio.yml -e git_branch=v1.1.1 +- include: cream.yml -e git_branch=v1.14.0 +- include: datadog.yml +- include: detention.yml -e git_branch=v1.3.4 +- include: docker-listener.yml -e git_branch=v10.8.3 +- include: drake.yml -e git_branch=v2.5.0 +- include: eru.yml -e git_branch=v6.2.4 +- include: github-varnish.yml -e git_branch=v2.0.2 +- include: ingress-proxy.yml +- include: khronos.yml -e git_branch=v6.5.12 +- include: link.yml -e git_branch=v2.2.2 +- include: mongo.yml +- include: navi-proxy.yml +- include: navi.yml -e git_branch=v10.5.4 +- include: optimus.yml -e git_branch=v5.1.2 +- include: palantiri.yml -e git_branch=v2.4.6 +- include: pheidi.yml -e git_branch=v1.38.7 +- include: prometheus-alerts.yml +- include: prometheus.yml +- include: sauron.yml -e git_branch=v7.5.1 +- include: swarm-manager.yml +- include: shiva.yml -e git_branch=v7.9.0 +- include: swarm-cloudwatch-reporter.yml -e git_branch=v2.0.0 diff --git a/ansible/starlord.yml b/ansible/starlord.yml new file mode 100644 index 00000000..4eb59bc8 --- /dev/null +++ b/ansible/starlord.yml @@ -0,0 +1,8 @@ +--- +- hosts: starlord + vars_files: + - group_vars/alpha-starlord.yml + roles: + - role: notify + - role: builder + - role: k8-deployment diff --git a/ansible/swarm-cloudwatch-reporter.yml b/ansible/swarm-cloudwatch-reporter.yml new file mode 100644 index 00000000..edac866a --- /dev/null +++ b/ansible/swarm-cloudwatch-reporter.yml @@ -0,0 +1,9 @@ +--- +- hosts: swarm-manager + vars_files: + - group_vars/alpha-swarm-manager-metrics.yml + roles: + - role: builder + + - role: docker_client + - role: swarm-cloudwatch-reporter diff --git a/ansible/swarm-daemon.yml b/ansible/swarm-daemon.yml new file mode 100644 index 00000000..367fc943 --- /dev/null +++ b/ansible/swarm-daemon.yml @@ -0,0 +1,8 @@ +--- +- hosts: "{{ dock | default('docks') }}" + vars_files: + - "group_vars/alpha-swarm-daemon.yml" + roles: + - { role: notify, tags: "notify" } + - { role: container_kill_start } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/swarm-manager.yml b/ansible/swarm-manager.yml new file mode 100644 index 00000000..a5cda7f3 --- /dev/null +++ b/ansible/swarm-manager.yml @@ -0,0 +1,11 @@ +--- +- hosts: swarm-manager + vars_files: + - group_vars/alpha-swarm-manager.yml + roles: + - role: notify + rollbar_token: "{{ docker_listener_rollbar_key }}" + + - role: docker_client + - role: k8-deployment + - role: k8-service diff --git a/ansible/user-vault.yml b/ansible/user-vault.yml new file mode 100644 index 00000000..b6a546de --- /dev/null +++ b/ansible/user-vault.yml @@ -0,0 +1,9 @@ +--- +- hosts: user-vault + vars_files: + - group_vars/alpha-user-vault.yml + roles: + - role: notify + - role: vault + - role: k8-deployment + - role: k8-service diff --git a/ansible/users.yml b/ansible/users.yml new file mode 100644 index 00000000..de825365 --- /dev/null +++ b/ansible/users.yml @@ -0,0 +1,4 @@ +--- +- hosts: all + roles: + - { role: ssh-keys, tags: ["ssh-keys"] } diff --git a/ansible/vault-single.yml b/ansible/vault-single.yml new file mode 100644 index 00000000..993f6434 --- /dev/null +++ b/ansible/vault-single.yml @@ -0,0 +1,11 @@ +--- +- hosts: vault + vars_files: + - group_vars/alpha-vault-single.yml + roles: + - role: notify + + - role: vault + + - role: k8-deployment + - role: k8-service diff --git a/ansible/vault-values.yml b/ansible/vault-values.yml new file mode 100644 index 00000000..ef660b4c --- /dev/null +++ b/ansible/vault-values.yml @@ -0,0 +1,120 @@ +--- +- hosts: vault + vars_files: + - group_vars/alpha-vault.yml + tasks: + - name: put values into vault + run_once: true + when: write_values is defined + uri: + method=PUT + url={{ vault_url }}/v1/{{ item.key }} + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item.data | to_json }}' + status_code=200,204 + with_items: "{{ vault_seed_values }}" + + - name: check for aws backend in vault + run_once: true + when: write_values is defined + uri: + method=GET + url={{ vault_url }}/v1/sys/mounts + HEADER_X-Vault-Token="{{ vault_auth_token }}" + return_content=yes + register: mounts + + - name: mount 1h aws backend in vault + run_once: true + when: write_values is defined and mounts.json['aws_1h/'] is not defined + uri: + method=POST + follow_redirects=all + url={{ vault_url }}/v1/sys/mounts/aws_1h + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + with_items: + - type: "aws" + config: + default_lease_ttl: "3600s" # 1 hour, in seconds + max_lease_ttl: "3600s" # 1 hour, in seconds + + - name: mount 1yr aws backend in vault + run_once: true + when: write_values is defined and mounts.json['aws_1yr/'] is not defined + uri: + method=POST + follow_redirects=all + url={{ vault_url }}/v1/sys/mounts/aws_1yr + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + with_items: + - type: "aws" + config: + default_lease_ttl: "8760h" # 1 year, in hours + max_lease_ttl: "8760h" # 1 year, in hours + + - name: configure 1h aws root credentials + run_once: true + when: (write_values is defined and write_root_creds is defined) or (write_values is defined and mounts.json['aws_1h/'] is not defined) + uri: + method=POST + follow_redirects=all + url={{ vault_url }}/v1/aws_1h/config/root + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + register: creds + with_items: + - access_key: "{{ vault_aws_access_key_id }}" + secret_key: "{{ vault_aws_secret_key }}" + region: "{{ vault_aws_region }}" + + - name: configure 1yr aws root credentials + run_once: true + when: (write_values is defined and write_root_creds is defined) or (write_values is defined and mounts.json['aws_1yr/'] is not defined) + uri: + method=POST + follow_redirects=all + url={{ vault_url }}/v1/aws_1yr/config/root + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + register: creds + with_items: + - access_key: "{{ vault_aws_access_key_id }}" + secret_key: "{{ vault_aws_secret_key }}" + region: "{{ vault_aws_region }}" + + - name: check for the dock-init role + run_once: true + when: write_values is defined + uri: + method=GET + follow_redirects=all + url={{ vault_url }}/v1/aws_1h/roles/dock-init + HEADER_X-Vault-Token="{{ vault_auth_token }}" + status_code=200,404 + register: role + + - name: write the dock-init role + run_once: true + when: write_values is defined and role.status == 404 + uri: + method=POST + follow_redirects=all + url={{ vault_url }}/v1/aws_1h/roles/dock-init + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json | replace("\\\\", "") }}' + status_code=204 + register: creds + with_items: + - policy: "{{ vault_seed_policy }}" diff --git a/ansible/vault.yml b/ansible/vault.yml new file mode 100644 index 00000000..c0c5a4f2 --- /dev/null +++ b/ansible/vault.yml @@ -0,0 +1,33 @@ +--- +- hosts: vault + vars_files: + - group_vars/alpha-vault.yml + roles: + - { role: notify, tags: notify } + - { role: database } + - { role: vault } + - { role: container_kill_start } + + tasks: + - name: get seal status + tags: [ deploy ] + uri: + method=GET + url=http://{{ ansible_default_ipv4.address }}:{{ vault_port }}/v1/sys/seal-status + HEADER_X-Vault-Token="{{ vault_auth_token }}" + return_content=yes + register: seal_status + + - name: unseal vault + tags: [ deploy ] + when: seal_status.json.sealed + uri: + method=PUT + url=http://{{ ansible_default_ipv4.address }}:{{ vault_port }}/v1/sys/unseal + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + with_items: + - key: "{{ vault_token_01 }}" + - key: "{{ vault_token_02 }}" + - key: "{{ vault_token_03 }}" diff --git a/ansible/web.yml b/ansible/web.yml new file mode 100644 index 00000000..dacb4529 --- /dev/null +++ b/ansible/web.yml @@ -0,0 +1,11 @@ +--- +- hosts: web + vars_files: + - group_vars/alpha-web.yml + roles: + - role: notify + rollbar_token: "{{ rollbar_web_token }}" + + - role: builder + + - role: k8-job diff --git a/ansible/workers.yml b/ansible/workers.yml new file mode 100644 index 00000000..8dd4f8bb --- /dev/null +++ b/ansible/workers.yml @@ -0,0 +1,11 @@ +--- +- hosts: worker + vars_files: + - group_vars/alpha-api-base.yml + - group_vars/alpha-workers.yml + roles: + - role: notify + rollbar_token: "{{ api_workers_rollbar_key }}" + + - role: docker_client + - role: k8-deployment diff --git a/deployer/README.md b/deployer/README.md new file mode 100644 index 00000000..14945be0 --- /dev/null +++ b/deployer/README.md @@ -0,0 +1,10 @@ +# Deployer +![Deployer](https://cloud.githubusercontent.com/assets/2194285/21335997/6f51847c-c617-11e6-999d-4db7794d6be0.jpg) + +## Purpose +Deployer is the application that is in charge of deploying code here at runnable. + + +## How it works +Deployer is just a runnable wrapper around ansible. It takes jobs from `deploy.requested` exchange and converts them into ansible playbook commands. + diff --git a/deployer/configs/.env b/deployer/configs/.env new file mode 100644 index 00000000..10217616 --- /dev/null +++ b/deployer/configs/.env @@ -0,0 +1,8 @@ +ANSIBLE_BIN=ansible-playbook +ANSIBLE_DIR=/ansible +APP_NAME=deployer +LOG_LEVEL=trace +SECRET_DIR=/root/.ssh + +# ponos vars +WORKER_PREFETCH=1 diff --git a/deployer/configs/.env.test b/deployer/configs/.env.test new file mode 100644 index 00000000..cbfecf86 --- /dev/null +++ b/deployer/configs/.env.test @@ -0,0 +1,2 @@ +ANSIBLE_BIN=./ansible-playbook-mock +ANSIBLE_DIR=./test/fixtures/ diff --git a/deployer/index.js b/deployer/index.js new file mode 100644 index 00000000..81ff7370 --- /dev/null +++ b/deployer/index.js @@ -0,0 +1,14 @@ +'use strict' +require('loadenv')() + +const log = require('./logger') +const server = require('./worker-server') + +server.start() +.then(() => { + log.trace('server started') +}) +.catch((err) => { + log.error({ err }, 'server error:') + throw err +}) diff --git a/deployer/logger.js b/deployer/logger.js new file mode 100644 index 00000000..4d1a998d --- /dev/null +++ b/deployer/logger.js @@ -0,0 +1,37 @@ +'use strict' +require('loadenv')() +const bunyan = require('bunyan') +const cls = require('continuation-local-storage') + +const serializers = { + tx: () => { + let out + try { + out = { + tid: cls.getNamespace('ponos').get('tid') + } + } catch (e) { + // cant do anything here + } + return out + } +} + +const logger = bunyan.createLogger({ + name: process.env.APP_NAME, + streams: [ + { + level: process.env.LOG_LEVEL, + stream: process.stdout + } + ], + serializers: module.exports.serializers, + src: true, + branch: process.env._VERSION_GIT_COMMIT, + commit: process.env._VERSION_GIT_BRANCH, + environment: process.env.NODE_ENV +}) + +module.exports = logger.child({ tx: true }) + +module.exports.serializers = serializers diff --git a/deployer/package.json b/deployer/package.json new file mode 100644 index 00000000..589c8f29 --- /dev/null +++ b/deployer/package.json @@ -0,0 +1,34 @@ +{ + "name": "deployer", + "version": "2.1.0", + "description": "deploy things", + "main": "index.js", + "scripts": { + "start": "NODE_PATH=./lib node index.js", + "test": "NODE_PATH=./lib lab -v -c ./test" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/CodeNow/devops-scripts.git" + }, + "author": "Anandkumar Patel", + "license": "MIT", + "bugs": { + "url": "https://github.com/CodeNow/devops-scripts/issues" + }, + "homepage": "https://github.com/CodeNow/devops-scripts#readme", + "dependencies": { + "101": "^1.6.2", + "bluebird": "^3.4.6", + "bunyan": "^1.8.5", + "continuation-local-storage": "^3.2.0", + "error-cat": "^3.0.0", + "joi": "^10.0.5", + "loadenv": "^2.2.0", + "ponos": "^5.7.1" + }, + "devDependencies": { + "code": "^4.0.0", + "lab": "^11.2.1" + } +} diff --git a/deployer/test/fixtures/ansible-playbook-mock b/deployer/test/fixtures/ansible-playbook-mock new file mode 100755 index 00000000..18054649 --- /dev/null +++ b/deployer/test/fixtures/ansible-playbook-mock @@ -0,0 +1,2 @@ +echo $* +echo $* > ./ansibleMockArgs diff --git a/deployer/test/integration/deploy.js b/deployer/test/integration/deploy.js new file mode 100644 index 00000000..9e15af44 --- /dev/null +++ b/deployer/test/integration/deploy.js @@ -0,0 +1,53 @@ +'use strict' +const Code = require('code') +const Lab = require('lab') +const Promise = require('bluebird') +const Publisher = require('ponos/lib/rabbitmq') + +const lab = exports.lab = Lab.script() +const app = require('../../index') +const workerServer = require('../../worker-server') + +const after = lab.after +const afterEach = lab.afterEach +const before = lab.before +const beforeEach = lab.beforeEach +const describe = lab.describe +const expect = Code.expect +const it = lab.it + +const publisher = new Publisher({ + name: process.env.APP_NAME, + hostname: process.env.RABBITMQ_HOSTNAME, + port: process.env.RABBITMQ_PORT, + username: process.env.RABBITMQ_USERNAME, + password: process.env.RABBITMQ_PASSWORD, + events: ['deploy.requested'] +}) + +describe('deploy test', () => { + beforeEach((done) => { + publisher.connect() + .then(() => { + return app.start() + }) + .asCallback(done) + }) + + afterEach((done) => { + publisher.disconnect() + .then(() => { + return workerServer.disconnect() + }).asCallback(done) + done() + }) + + it('should run deploy', (done) => { + publisher.publishEvent('deploy.requested', { + version: 'master', + env: 'gamma', + service: 'deployer' + }) + done() + }) +}) diff --git a/deployer/worker-server.js b/deployer/worker-server.js new file mode 100644 index 00000000..92bbc3a2 --- /dev/null +++ b/deployer/worker-server.js @@ -0,0 +1,22 @@ +'use strict' +require('loadenv')() +const Ponos = require('ponos') +const log = require('./logger') + +module.exports = new Ponos.Server({ + name: process.env.APP_NAME, + enableErrorEvents: true, + log: log, + rabbitmq: { + channel: { + prefetch: process.env.WORKER_PREFETCH + }, + hostname: process.env.RABBITMQ_HOSTNAME, + port: process.env.RABBITMQ_PORT, + username: process.env.RABBITMQ_USERNAME, + password: process.env.RABBITMQ_PASSWORD + }, + events: { + 'deploy.requested': require('./workers/deploy.requested') + } +}) diff --git a/deployer/workers/deploy.requested.js b/deployer/workers/deploy.requested.js new file mode 100644 index 00000000..0fc38948 --- /dev/null +++ b/deployer/workers/deploy.requested.js @@ -0,0 +1,62 @@ +'use strict' +require('loadenv')() +const joi = require('joi') +const Promise = require('bluebird') +const spawn = require('child_process').spawn + +const logger = require('../logger') +const ansibleRoot = process.env.ANSIBLE_DIR +const secretRoot = process.env.SECRET_DIR + +module.exports.jobSchema = joi.object({ + version: joi.string().required(), + env: joi.string().required(), + service: joi.string().required() +}).required() + +module.exports.task = (job) => { + const log = logger.child({ job, worker: 'deploy.requested' }) + + return Promise.fromCallback((cb) => { + const version = job.version + const env = job.env + const service = job.service + + const commandArgs = [ + '-i', `${env}-hosts`, + `--vault-password-file=${secretRoot}/vault-pass`, + '-e', `git_branch=${version}`, + '-t', 'deploy', + `${service}.yml` + ] + + const spawnOpts = { + cwd: ansibleRoot + } + + log.trace({ + commandArgs, + spawnOpts + }, `about to call ${process.env.ANSIBLE_BIN}`) + + const cmd = spawn(process.env.ANSIBLE_BIN, commandArgs, spawnOpts) + + cmd.stdout.on('data', (data) => { + log.trace({ type: 'stdout' }, data.toString()) + }) + + cmd.stderr.on('data', (data) => { + log.error({ type: 'stderr' }, data.toString()) + }) + + cmd.on('close', (code) => { + log.trace(`ansible-playbook exited with code ${code}`) + cb() + }) + + cmd.on('error', (err) => { + log.error({ err }, 'Failed to start ansible-playbook process.') + cb(err) + }) + }) +} diff --git a/environments/README.md b/environments/README.md new file mode 100644 index 00000000..8b0070f6 --- /dev/null +++ b/environments/README.md @@ -0,0 +1,39 @@ +# Environments + +Environments should have the following structure: + +``` +main.yml (main variable file) +inventory + hosts +k8 (directory, automatically populated) +secrets (directory, see below) +``` + +### Secrets + +This directory should have the following files: + +``` +/docker-client + id_rsa + known_hosts + ca.pem + ${SERVICE_NAME} (api, khronos, etc.) + cert.pem + key.pem +/certs + ca-key.pem + ca.pem + ca.srl + cert.pem + key.pem + pass +/domains + /${DOMAIN} + ca.pem + cert.pem + key.pem + chained.pem + dhparam.pem +``` diff --git a/environments/delta/inventory/docks.js b/environments/delta/inventory/docks.js new file mode 100755 index 00000000..3a7e1552 --- /dev/null +++ b/environments/delta/inventory/docks.js @@ -0,0 +1,75 @@ +#!/usr/bin/env node + +'use strict'; + +var aws = require('aws-sdk'); +var ec2 = new aws.EC2({ + accessKeyId: 'AKIAJ3RCYU6FCULAJP2Q', + secretAccessKey: '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', + region: 'us-west-2' +}); + +var params = { + Filters: [ + // Only search for docks in the cluster security group + { + Name: 'instance.group-id', + Values: ['sg-6cd7fb08'] + }, + // Only fetch instances that are tagged as docks + { + Name: 'tag:role', + Values: ['dock'] + }, + // Only fetch running instances + { + Name: 'instance-state-name', + Values: ['running'] + } + ] +}; + +ec2.describeInstances(params, function (err, data) { + if (err) { + console.error("An error occurred: ", err); + process.exit(1); + } + + // Get a set of instances from the describe response + var instances = []; + data.Reservations.forEach(function (res) { + res.Instances.forEach(function (instance) { + instances.push(instance); + }); + }); + + // Map the instances to their private ip addresses + // NOTE This will work locally because of the wilcard ssh proxy in the config + var hosts = instances.map(function (instance) { + return instance.PrivateIpAddress; + }); + + var hostVars = {}; + instances.forEach(function (instance) { + for (var i = 0; i < instance.Tags.length; i++) { + if (instance.Tags[i].Key === 'org') { + hostVars[instance.PrivateIpAddress] = { + host_tags: instance.Tags[i].Value + ',build,run' + }; + } + } + }); + + // Output the resulting JSON + // NOTE http://docs.ansible.com/ansible/developing_inventory.html + console.log(JSON.stringify( + { + docks: { + hosts: hosts + }, + _meta : { + hostvars : hostVars + } + } + )); +}); diff --git a/environments/delta/inventory/hosts b/environments/delta/inventory/hosts new file mode 100644 index 00000000..d55fa32b --- /dev/null +++ b/environments/delta/inventory/hosts @@ -0,0 +1,193 @@ +[bastion] +delta-bastion + +[navi-port-router] +delta-navi-port-router + +[navi-proxy] +localhost + +[userland] +delta-userland + +[ingress] +localhost + +[api_group:children] +worker +api +socket-server + +[api] +localhost + +[consul] +delta-consul-a +delta-consul-b +delta-consul-c + +[docker-listener] +localhost + +[vault] +delta-consul-a +delta-consul-b +delta-consul-c + +[user-vault] +localhost + +[user-local] +localhost + +[worker] +localhost + +[socket-server] +localhost + +[socket-server-proxy] +localhost + +[deployer] +localhost + +[docks] + +[dock] + +[agreeable-egret] +localhost + +[eru] +localhost + +[navi] +localhost + +[mongo-navi] +localhost + +[mongo] +localhost + +[clio] +localhost + +[link] +localhost + +[keymaker] +localhost + +[khronos] +localhost + +[optimus] +localhost + +[detention] +localhost + +[palantiri] +localhost + +[rabbitmq] +delta-rabbit + +[web] +localhost + +[marketing] +localhost + +[metabase] +localhost + +[redis] +delta-redis + +[shiva] +localhost + +[sauron] +localhost + +[starlord] +localhost + +[swarm-manager] +localhost + +[drake] +localhost + +[pheidi] +localhost + +[github-varnish] +localhost + +[big-poppa] +localhost + +[cream] +localhost + +[customerbot] +localhost + +[arithmancy] +localhost + +[prometheus] +localhost + +[datadog] +localhost + +[delta:children] +agreeable-egret +api +arithmancy +bastion +big-poppa +consul +cream +customerbot +dock +docker-listener +docks +drake +eru +github-varnish +ingress +khronos +metabase +mongo-navi +mongo +navi +navi-port-router +navi-proxy +optimus +pheidi +prometheus +rabbitmq +redis +sauron +shiva +socket-server +socket-server-proxy +starlord +swarm-manager +user-vault +userland +web +worker +datadog + +[local] +127.0.0.1 + +[targets] +localhost ansible_connection=local bastion_name=gamma-bastion gather_facts=False diff --git a/environments/delta/k8/.gitkeep b/environments/delta/k8/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/environments/delta/main.yml b/environments/delta/main.yml new file mode 100644 index 00000000..6b3d414e --- /dev/null +++ b/environments/delta/main.yml @@ -0,0 +1,203 @@ +egret_pg_database: egret +egret_pg_host: delta-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com +egret_pg_pass: wwHQ5B4RfY9iKS3m +egret_pg_user: egret +egret_port: 5432 + +api_aws_access_key_id: AKIAJWSSSJYUXKNW2ZDA +api_aws_alias_host: us-west-2.compute.internal +api_aws_secret_access_key: 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 +api_github_client_id: d42d6634d4070c9d9bf9 +api_github_client_secret: d6cfde38fef5723e25e52629e3d25825c8a704c9 +api_github_deploy_keys_bucket: runnable.deploykeys.production +api_intercom_api_key: 46e75ada5d21f248787689b35fe80e11efe9303a +api_intercom_app_id: wqzm3rju +api_mixpanel_app_id: 57260a5b6fc972e9c69184882efd009e +api_mongo_password: 72192e5a-a5e1-11e5-add9-0270db32f7ad +api_mongo_database: delta +api_mongo_replset_name: delta-rs0 +api_new_relic_app_name: delta-api-production +api_rollbar_key: a90d9c262c7c48cfabbd32fd0a1bc61c +api_s3_context_bucket: runnable.context.resources.production +api_s3_log_bucket: production-delta.container-logs +api_workers_rollbar_key: 3edfe8fe4fd640ae9fdbbe08fcb9f121 + +arithmancy_pg_database: arithmancy +arithmancy_pg_host: arithmancy-delta.cnksgdqarobf.us-west-2.rds.amazonaws.com +arithmancy_pg_pass: y7A7k7vNf7nkVv4d73YRLYybg3JcA92 +arithmancy_pg_port: 5432 +arithmancy_pg_user: arithmancy_delta + +big_poppa_github_token: e11a1264130fb62ce045bf03118bf123f980c205 +big_poppa_http_rollbar_token: 1f1eeea0b1334aaeb50fb7bc4a43241a +big_poppa_intercom_id: wqzm3rju +big_poppa_intercom_key: 0df3322fda46a34e93ba6a43603a8fc3ef740d84 +big_poppa_mongo_auth: api:72192e5a-a5e1-11e5-add9-0270db32f7ad +big_poppa_mongo_database: delta +big_poppa_mongo_replset_name: delta-rs0 +big_poppa_new_relic_app_name: delta-big-poppa +big_poppa_pg_host: delta-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:{{ pg_port }} +big_poppa_pg_pass: 27eed36a27a54ffd +big_poppa_pg_user: cwallace +big_poppa_worker_rollbar_token: 98cabb8440024e3a8242cf3220b802c9:5432 + +clio_mongo_password: ">hjp8XL4GAwBGXPb" +clio_mongo_replset_name: delta-rs0 +clio_mongo_database: clio + +cream_hello_runnable_github_token: 5ff90dacd14fb1ff202c6b48c1393ba713adf483 +cream_http_rollbar_token: baa03dbd9f814d14ab0c99863ed6a4fb +cream_worker_rollbar_token: 87924b881c3143968cdb059fe41acbc3 +cream_intercom_key: 173c1b366d11a3ef0f641c6b3327914368e67095 +cream_intercom_id: wqzm3rju +cream_stripe_secret_key: sk_live_ZWLZtu5rxJ0ylSoF8xrHtNOw + +customerbot_app_name: customerbot +customerbot_bot_api_key: xoxb-32654916690-Sn4Ez4u9sG5EXHWB3ouRBHa8 +customerbot_datadog_api_key: d3ab5d85bca924f9d4e33d307beacb4a +customerbot_datadog_app_key: 54a74a9c827bd7792f238f55cf3e1979d32065e6 +customerbot_github_access_token: c0be376ab39b9a363308d0928f9f80ee549c5af9 +customerbot_intercom_api_key: ef28806417aef605ca74573ff080c9d5eb0d6384 +customerbot_intercom_app_id: wqzm3rju +customerbot_jira_password: ?hzcwzfGcg98FNuG)%ebWyFcTaCyFN#zoEm +customerbot_jira_username: sohail +customerbot_runnable_api_url: https://api.runnable.io/ +customerbot_runnable_user_content_domain: runnable.io +customerbot_slack_whitelist: sohail,praful,keno,yash,anand,jorgito,nathan +customerbot_stripe_secret_key: sk_live_ZWLZtu5rxJ0ylSoF8xrHtNOw + +docks_rollbar_key: d1af6567ed0f464fb1d676f38fd31751 + +drake_port: 80 +drake_http_rollbar_token: 52ad749ddb8e47b2a8e15312b6b300fb +drake_worker_rollbar_token: 14152b8572034943b714da27ca607698 + +eru_aws_access_key_id: AKIAIFCVEISSC5JMPWDA +eru_aws_environment: delta +eru_aws_secret_access_key: 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 +eru_github_id: 46a23f5f99f0aa9460f8 +eru_github_secret: a0336d72e3d540fb9fbbed2c123a81e1cb329dab +eru_intercom_id: wqzm3rju +eru_intercom_key: ro-f3ce0069697542d73bacd5cea9fba1a48d069e39 +eru_mongodb_database: delta +eru_mongodb_password: tilde-rawboned-lotus-hideaway-diastole +eru_mongodb_replset: delta-rs0 +eru_mongodb_username: eru +eru_subdomain: eru + +keymaker_pg_host: delta-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:{{ pg_port }} +keymaker_pg_pass: 8MsajH7RQuJ+fTCh +keymaker_pg_user: keymaker +keymaker_new_relic_app_name: delta-keymaker +keymaker_log_level: info + +khronos_intercom_api_key: 14771f14efb617900724a16345e57beb55ba9beb +khronos_intercom_app_id: wqzm3rju +khronos_canary_github_branches_instance_id: 5929a06b4190dc0e0066b781 +khronos_canary_logs_instance_id: 56f07f5c1e089a200077f2a3 +khronos_canary_rebuild_instance_id: 571b39b9d35173300021667d +khronos_canary_rebuild_navi_url: http://canary-build-staging-runnabletest.runnableapp.com:8000/ +# @runnable-khronos github token +khronos_canary_token: 4e2e050653b3927e53bb80bfd23d8aa17019426c +khronos_canary_failover_token: 84549e76545306de61d47f23b1d1831e1c95a400 +khronos_mongo_auth: api:72192e5a-a5e1-11e5-add9-0270db32f7ad +khronos_mongo_database: delta +khronos_mongo_replset_name: delta + +mongo_volume_id: vol-01c239d637839ba95 + +metabase_pg_database: metabase +metabase_pg_host: delta-metabase.cnksgdqarobf.us-west-2.rds.amazonaws.com +metabase_pg_pass: nifty-prowl-sought-muscles +metabase_pg_port: 5432 +metabase_pg_user: metabase + +navi_new_relic_app_name: delta-navi +navi_cookie_secret: e80173940e7bdd84734e868b6ea054a0 +navi_intercom_api_key: 6f6400402170e78fa1ad4418608aacc63512122b +navi_intercom_app_id: wqzm3rju +navi_mongo_password: "365bbf0e5a944d3fb94598dd5f69d789" + +optimus_aws_access_id: AKIAJWSSSJYUXKNW2ZDA +optimus_aws_secret_id: tyvGiCbj5jWCiQnMLvfrfD64dFo8i6prkdcga86y +optimus_github_deploy_keys_bucket: runnable.deploykeys.production + +pager_duty_key: eb80ed5844d34a90bd0fc09995b0e346 + +palantiri_rollbar_key: f675e9090d6f483ca4e742af2c7f2f83 + +pheidi_intercom_admin_id: 22382 +pheidi_intercom_id: wqzm3rju +pheidi_intercom_key: 852895329aa77696b65491876eefe0eb386482c5 +pheidi_mongo_auth: pheidi:septa-mauve-enquiry-clipper-history +pheidi_mongo_database: delta +pheidi_mongo_replset_name: delta +pheidi_runnabot_tokens: d22a5ec75f66a0f472f3d856eb01df2321115e0b,33d8accaa7afeace8f44e3a6de409097ea1e67b8,82a7f2838852c04b75bee2b1a8ed236e2f9fc1ef,cab912709f7eb5bb8c21f9177f845561222d3933 + +prometheus_volume_id: vol-0bb9206a0899b637d + +sauron_rollbar_key: 83157ae2d50d4b6398e404c0b9978d26 + +aws_access_key_id: AKIAJ3RCYU6FCULAJP2Q +aws_secret_access_key: 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 + +starlord_vault_token: 319ff979-b066-87c7-1172-6f3b5305d749 + +user_vault_s3_access_key: AKIAJRB2ERCOLHGNYAFQ +user_vault_s3_secret_key: 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 +user_vault_s3_bucket: delta-user-vault +vault_config_file: user-vault.yml + +vault_hello_runnable_github_token: 88ddc423c2312d02a8bbcaad76dd4c374a30e4af +vault_aws_access_key_id: AKIAJ7R4UIM45KH2WGWQ +vault_aws_secret_key: 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 +vault_aws_region: us-east-1 + +marketing_bucket: runnable.com +marketing_aws_access_key: AKIAIPPPY2JIOHX7QVCA +marketing_aws_secret_key: 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 + +ansible_ssh_private_key_file: ~/.ssh/delta.pem +api_hello_runnable_github_token: 88ddc423c2312d02a8bbcaad76dd4c374a30e4af +bastion_sshd_port: 60506 +builder: delta-builder +consul_url: "{{ ansible_default_ipv4.address }}" +consul_host_address: 10.8.6.122 +cream_stripe_publishable_key: pk_live_5yYYZlYIwY3LwvKFaXY0jNlm +datadog_mongodb_pwd: sqa3WBgkCgZsFZuex0kBNahZ +datadog_mongodb_user: datadog +datadog_tags: env:delta +dock_vault_user_creation_access_token: ddf20c34-019c-5b24-9c0d-1b44e3edf29a +domain: runnable.io +env: delta +github_domain: api.github.com +github_protocol: https +is_github_enterprise: false +mongo_hosts: 10.8.4.216:27017,10.8.10.254:27017,10.8.13.89:27017 +mongo_port: 27017 +navi_mongo_hosts: "10.8.6.41" +new_relic_license_key: 338516e0826451c297d44dc60aeaf0a0ca4bfead +node_env: production-delta +pg_host: delta-infradb.cnksgdqarobf.us-west-2.rds.amazonaws.com +pg_pass: 59a5524e-a772-11e5-bedc-1bdc0db458b3 +pg_port: 5432 +rabbit_host_address: 10.8.4.195 +rabbit_password: wKK7g7NWKpQXEeSzyWB7mIpxZIL8H2mDSf3Q6czR3Vk +rabbit_port: 54321 +rabbit_username: o2mdLh9N9Ke2GzhoK8xsruYPhIQFN7iEL44dQJoq7OM +redis_host_address: 10.8.4.9 +registry_token: 4PX2AU9QIJSCDLZEXILYX6ZP2RCXY1HR10WVZKWVR0JW8DS5IIY87D96V0RACMK5 +registry_username: runnable+deltapush +rollbar_web_token: "162a053bebd34e9eb6e2860543c7ae79" +secrets_path: delta-hosts/secrets +super_user: HelloRunnable +user_content_domain: runnableapp.com +user_vault_load_balancer: ab5d35e6f344211e797770ac76ff2aaa-2031464484.us-west-2.elb.amazonaws.com +vault_auth_token: 578c9767-5af8-8490-0954-5d330f27b088 +vault_token_01: 0d324dc7d4cbd94790fd08809d06fb1e28e21e185910081c7646e3e49924f6ed01 +vault_token_02: 42dc8a69df174e77eb47a63b6ef4709bec57101cb1bff11a71c91b73b8bc046102 +vault_token_03: 47f3cb74f5374fa3c51c90fd25e3d4cc851034de97584995fce5fc5382342f1f03 + +web_intercom_id: wqzm3rju +web_sift_public_key: 27e9da5c97 +web_aws_bucket_region: us-west-2 diff --git a/environments/delta/secrets/.gitkeep b/environments/delta/secrets/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/environments/gamma/inventory/docks.js b/environments/gamma/inventory/docks.js new file mode 100755 index 00000000..34f14a3b --- /dev/null +++ b/environments/gamma/inventory/docks.js @@ -0,0 +1,75 @@ +#!/usr/bin/env node + +'use strict'; + +var aws = require('aws-sdk'); +var ec2 = new aws.EC2({ + accessKeyId: 'AKIAJ3RCYU6FCULAJP2Q', + secretAccessKey: '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', + region: 'us-west-2' +}); + +var params = { + Filters: [ + // Only search for docks in the cluster security group + { + Name: 'instance.group-id', + Values: ['sg-577a0d33'] + }, + // Only fetch instances that are tagged as docks + { + Name: 'tag:role', + Values: ['dock'] + }, + // Only fetch running instances + { + Name: 'instance-state-name', + Values: ['running'] + } + ] +}; + +ec2.describeInstances(params, function (err, data) { + if (err) { + console.error("An error occurred: ", err); + process.exit(1); + } + + // Get a set of instances from the describe response + var instances = []; + data.Reservations.forEach(function (res) { + res.Instances.forEach(function (instance) { + instances.push(instance); + }); + }); + + // Map the instances to their private ip addresses + // NOTE This will work locally because of the wilcard ssh proxy in the config + var hosts = instances.map(function (instance) { + return instance.PrivateIpAddress; + }); + + var hostVars = {}; + instances.forEach(function (instance) { + for (var i = 0; i < instance.Tags.length; i++) { + if (instance.Tags[i].Key === 'org') { + hostVars[instance.PrivateIpAddress] = { + host_tags: instance.Tags[i].Value + ',build,run' + }; + } + } + }); + + // Output the resulting JSON + // NOTE http://docs.ansible.com/ansible/developing_inventory.html + console.log(JSON.stringify( + { + docks: { + hosts: hosts + }, + _meta : { + hostvars : hostVars + } + } + )); +}); diff --git a/environments/gamma/inventory/hosts b/environments/gamma/inventory/hosts new file mode 100644 index 00000000..47aeb992 --- /dev/null +++ b/environments/gamma/inventory/hosts @@ -0,0 +1,194 @@ +[bastion] +gamma-bastion + +[userland] +gamma-userland + +[navi-proxy] +localhost + +[mongo] +localhost + +[api_group:children] +worker +api +socket-server + +[api] +localhost + +[big-poppa] +localhost + +[cream] +localhost + +[customerbot] +localhost + +[consul] +localhost + +[datadog] +localhost + +[docker-listener] +localhost + +[vault] +gamma-consul-a +gamma-consul-b +gamma-consul-c + +[user-vault] +localhost + +[user-local] +localhost + +[worker] +localhost + +[agreeable-egret] +localhost + +[eru] +localhost + +[navi] +localhost + +[ingress] +localhost + +[link] +localhost + +[mongo-navi] +localhost + +[clio] +localhost + +[charon] +localhost + +[keymaker] +localhost + +[khronos] +localhost + +[optimus] +localhost + +[detention] +localhost + +[palantiri] +localhost + +[rabbitmq] +localhost + +[web] +localhost + +[marketing] +localhost + +[redis] +localhost + +[sauron] +localhost + +[shiva] +localhost + +[starlord] +localhost + +[socket-server] +localhost + +[socket-server-proxy] +localhost + +[swarm-manager] +localhost + +[drake] +localhost + +[pheidi] +localhost + +[github-varnish] +localhost + +[arithmancy] +localhost + +[docks] + +[dock] + +[prometheus] +localhost + +[kartographer] +localhost + +[user-local] +localhost + +[navi-port-router] +gamma-userland + +[gamma:children] +agreeable-egret +api +arithmancy +bastion +big-poppa +charon +consul +cream +customerbot +datadog +dock +docker-listener +docks +drake +eru +github-varnish +ingress +kartographer +khronos +mongo +mongo-navi +navi +navi-proxy +optimus +pheidi +prometheus +rabbitmq +redis +sauron +shiva +socket-server +socket-server-proxy +starlord +swarm-manager +user-vault +userland +web +worker + +[local] +127.0.0.1 + +[targets] +localhost ansible_connection=local bastion_name=gamma-bastion gather_facts=False diff --git a/environments/gamma/k8/.gitkeep b/environments/gamma/k8/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/environments/gamma/main.yml b/environments/gamma/main.yml new file mode 100644 index 00000000..60585123 --- /dev/null +++ b/environments/gamma/main.yml @@ -0,0 +1,185 @@ +egret_pg_database: egret +egret_pg_host: gamma-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:32659 +egret_pg_pass: b3UKjxbGblKZtG6c +egret_pg_user: egret +egret_port: 65520 + +api_aws_access_key_id: AKIAIDC4WVMTCGV7KRVQ +api_aws_alias_host: us-west-2.compute.internal +api_aws_secret_access_key: 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 +api_github_client_id: b6072dc57062faca7fcb +api_github_client_secret: ba73a9294dc4bfaa7ed02ba187f73918506e4293 +api_github_deploy_keys_bucket: runnable.deploykeys.production-beta +api_mixpanel_app_id: c41affa4b08818443365c526cbb51606 +api_mongo_password: 3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +api_mongo_database: gamma +api_mongo_replset_name: gamma-rs0 +api_rollbar_key: a90d9c262c7c48cfabbd32fd0a1bc61c +api_s3_context_bucket: runnable.context.resources.production-beta +api_s3_log_bucket: production-gamma.container-logs +api_workers_rollbar_key: 3edfe8fe4fd640ae9fdbbe08fcb9f121 + +arithmancy_pg_database: arithmancy +arithmancy_pg_host: arithmancy-gamma.cnksgdqarobf.us-west-2.rds.amazonaws.com +arithmancy_pg_pass: arithmancy-gamma +arithmancy_pg_port: 5432 +arithmancy_pg_user: arithmancy_gamma + +big_poppa_github_token: e11a1264130fb62ce045bf03118bf123f980c205 +big_poppa_http_rollbar_token: 1f1eeea0b1334aaeb50fb7bc4a43241a +big_poppa_intercom_id: xs5g95pd +big_poppa_intercom_key: 92d281df5653eb72f8f4b322cf0689be893d4965 +big_poppa_mongo_auth: api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +big_poppa_mongo_database: gamma +big_poppa_mongo_replset_name: gamma-rs0 +big_poppa_pg_host: gamma-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:32659 +big_poppa_pg_pass: JFmZDuVYPt9vUxk4DBjj +big_poppa_pg_user: big_poppa +big_poppa_worker_rollbar_token: 98cabb8440024e3a8242cf3220b802c9 + +clio_mongo_password: "Fkj8CBoMNpuhe7p*" +clio_mongo_replset_name: gamma-rs0 +clio_mongo_database: clio + +cream_hello_runnable_github_token: e1b68a2cbfefcfee87d72df6d1c07bce886454b9 +cream_http_rollbar_token: baa03dbd9f814d14ab0c99863ed6a4fb +cream_worker_rollbar_token: 87924b881c3143968cdb059fe41acbc3 +cream_intercom_key: 2548f3affd4923602dbf7bbab66eac377b5d3aac +cream_intercom_id: xs5g95pd +cream_stripe_secret_key: sk_test_4De8Zdkfcyb29swkMmjZUMRh + +customerbot_app_name: customerbot-dev +customerbot_bot_api_key: xoxb-124083371828-QVryLuIIJL8YL1eiMmGNKdih +customerbot_datadog_api_key: d3ab5d85bca924f9d4e33d307beacb4a +customerbot_datadog_app_key: 54a74a9c827bd7792f238f55cf3e1979d32065e6 +customerbot_github_access_token: c0be376ab39b9a363308d0928f9f80ee549c5af9 +customerbot_intercom_api_key: 9b4ee7b4f9818b36e368985fda0befa611928876 +customerbot_intercom_app_id: xs5g95pd +customerbot_jira_password: ?hzcwzfGcg98FNuG)%ebWyFcTaCyFN#zoEm +customerbot_jira_username: sohail +customerbot_runnable_api_url: https://api.runnable-gamma.com/ +customerbot_runnable_user_content_domain: runnable.ninja +customerbot_slack_whitelist: sohail,praful,keno,yash,anand,jorgito,nathan +customerbot_stripe_secret_key: sk_test_4De8Zdkfcyb29swkMmjZUMRh + +docks_rollbar_key: d1af6567ed0f464fb1d676f38fd31751 + +drake_port: 80 +drake_http_rollbar_token: 52ad749ddb8e47b2a8e15312b6b300fb +drake_worker_rollbar_token: 14152b8572034943b714da27ca607698 + +eru_aws_access_key_id: AKIAIFCVEISSC5JMPWDA +eru_aws_environment: gamma +eru_aws_secret_access_key: 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 +eru_github_id: 8abb08f83f6d1c52bd1a +eru_github_secret: 74a23ee56486d57b14f292283cb04625f600917c +eru_intercom_id: xs5g95pd +eru_intercom_key: ro-9367eb0eb11542323371dcf25b8e260891f89b36 +eru_mongodb_database: gamma +eru_mongodb_password: success-nan-europium-rerun-sheep +eru_mongodb_replset: gamma-rs0 +eru_mongodb_username: eru +eru_subdomain: eru + +kartographer_hello_runnable_github_token: 000cb6b65858ad0a7590bc2370b5118ff1bc95db + +keymaker_pg_host: gamma-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:32659 +keymaker_pg_pass: n4CsuCiWgbx.2BVJ +keymaker_pg_user: keymaker +keymaker_log_level: trace + +khronos_intercom_api_key: 9b4ee7b4f9818b36e368985fda0befa611928876 +khronos_intercom_app_id: xs5g95pd +khronos_mongo_auth: api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +khronos_mongo_database: gamma +khronos_mongo_replset_name: gamma + +mongo_volume_id: vol-0daf05c3686a7e0a8 + +navi_mongo_hosts: mongo +navi_mongo_password: 3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +navi_cookie_secret: c90e5fb4e7e511e597309a79f06e9478 + +optimus_aws_access_id: AKIAJPA2ZYSVVA5V7XXQ +optimus_aws_secret_id: 5V70AUxfIyHeLvlYZe0xaYevDAdgTOWOn5G7nHlt +optimus_github_deploy_keys_bucket: runnable.deploykeys.production-beta + +pager_duty_key: testkey + +palantiri_rollbar_key: f675e9090d6f483ca4e742af2c7f2f83 + +pheidi_intercom_admin_id: 22382 +pheidi_intercom_id: xs5g95pd +pheidi_intercom_key: ac207e26721127c7db60544b78988c75c2e20e12 +pheidi_mongo_auth: api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +pheidi_mongo_database: gamma +pheidi_mongo_replset_name: gamma +pheidi_runnabot_tokens: ff3d259c5d988badbb692cc400998e46cdd5f1fc + +prometheus_volume_id: vol-098bf1c076a182eb2 + +sauron_rollbar_key: 83157ae2d50d4b6398e404c0b9978d26 + +aws_access_key_id: AKIAJ3RCYU6FCULAJP2Q +aws_secret_access_key: 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 + +starlord_vault_token: 8d6b414a-2e6d-65fb-f0b8-c6200ae688ad + +user_vault_s3_access_key: AKIAIOTM4MKOJJVUL7IQ +user_vault_s3_secret_key: 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 +user_vault_s3_bucket: gamma-user-vault +vault_config_file: user-vault.yml + +vault_hello_runnable_github_token: 88ddc423c2312d02a8bbcaad76dd4c374a30e4af +vault_aws_access_key_id: AKIAJ7R4UIM45KH2WGWQ +vault_aws_secret_key: 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 +vault_aws_region: us-east-1 + +marketing_bucket: runnable-gamma.com +marketing_aws_access_key: AKIAICIWKIZEQCMDXLEA +marketing_aws_secret_key: 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 + +rabbit_volume_id: vol-08cb7a2d1218ed9a0 + +redis_volume_id: vol-040eb379856609100 + +ansible_ssh_private_key_file: ~/.ssh/gamma.pem +api_hello_runnable_github_token: 88ddc423c2312d02a8bbcaad76dd4c374a30e4af +bastion_sshd_port: 60709 +builder: gamma-builder +consul_url: "{{ ansible_default_ipv4.address }}" +consul_host_address: 10.4.5.144 +cream_stripe_publishable_key: pk_test_sHr5tQaPtgwiE2cpW6dQkzi8 +datadog_mongodb_pwd: sqa3WBgkCgZsFZuex0kBNahZ +datadog_mongodb_user: datadog +datadog_tags: env:gamma +dock_vault_user_creation_access_token: 137f441f-db71-40a2-8448-10a565323b1e +domain: runnable-gamma.com +env: gamma +github_domain: api.github.com +github_protocol: https +is_github_enterprise: false +mongo_hosts: 10.4.7.56:27017,10.4.8.241:27017,10.4.13.116:27017 +mongo_port: 27017 +node_env: production-gamma +pg_host: gamma-infrastructure-db.cnksgdqarobf.us-west-2.rds.amazonaws.com +pg_pass: e9G7zYRCxYmxG9HQ8J9x2BDB +pg_port: 32659 +rabbit_password: wKK7g7NWKpQXEeSzyWB7mIpxZIL8H2mDSf3Q6czR3Vk +rabbit_username: o2mdLh9N9Ke2GzhoK8xsruYPhIQFN7iEL44dQJoq7OM +redis_host_address: 10.4.6.45 +registry_token: 8G0NT1HZQZHYXU7OB1QAI8HA1560V6R68DE6R6B8YJWQAED82JAFCD057ZWIDT76 +registry_username: runnable+gamma +rollbar_web_token: "162a053bebd34e9eb6e2860543c7ae79" +secrets_path: gamma-hosts/secrets +super_user: HelloRunnable +user_content_domain: runnablecloud.com +user_vault_load_balancer: aa8364af12f5a11e7bb9e02301ded9ea-905956029.us-west-2.elb.amazonaws.com +vault_auth_token: f9a39e92-99f3-66a0-a27d-a6e07717d30d +vault_token_01: 2c0be2adf99931bc9ed443000e87bbcd0ef096dddc79f6add97ebe8fa7e93d2c05 +vault_token_02: 3489b87c913058740537bbbd4503f3720d74f7cb0f4e0c30a9436e1e52a18d7003 +vault_token_03: ac4e1e9800cbf77283298d08172a2f0e46d0b7cbc457c47788d04768af12584a02 + +web_intercom_id: xs5g95pd +web_sift_public_key: eea9746dff +web_aws_bucket_region: us-west-2 diff --git a/environments/gamma/secrets/.gitkeep b/environments/gamma/secrets/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/environments/runnable-self-hosted-2/inventory/hosts b/environments/runnable-self-hosted-2/inventory/hosts new file mode 100644 index 00000000..65dccdd9 --- /dev/null +++ b/environments/runnable-self-hosted-2/inventory/hosts @@ -0,0 +1,189 @@ +[bastion] +10.10.56.78 + +[userland] +USERLAND + +[navi-proxy] +localhost + +[mongo] +localhost + +[api_group:children] +worker +api +socket-server + +[api] +localhost + +[big-poppa] +localhost + +[cream] +localhost + +[consul] +localhost + +[datadog] +localhost + +[docker-listener] +localhost + +[vault] +# Should there be anything here? +localhost + +[user-vault] +localhost + +[user-local] +localhost + +[worker] +localhost + +[agreeable-egret] +localhost + +[eru] +localhost + +[navi] +localhost + +[ingress] +localhost + +[link] +localhost + +[mongo-navi] +localhost + +[clio] +localhost + +[charon] +localhost + +[keymaker] +localhost + +[khronos] +localhost + +[optimus] +localhost + +[detention] +localhost + +[palantiri] +localhost + +[rabbitmq] +localhost + +[web] +localhost + +[marketing] +localhost + +[redis] +localhost + +[sauron] +localhost + +[shiva] +localhost + +[starlord] +localhost + +[socket-server] +localhost + +[socket-server-proxy] +localhost + +[swarm-manager] +localhost + +[drake] +localhost + +[pheidi] +localhost + +[github-varnish] +localhost + +[arithmancy] +localhost + +[docks] + +[dock] + +[prometheus] +localhost + +[kartographer] +localhost + +[user-local] +localhost + +[navi-port-router] +USERLAND + +[gamma:children] +agreeable-egret +api +arithmancy +bastion +big-poppa +charon +consul +cream +datadog +dock +docker-listener +docks +drake +eru +github-varnish +ingress +kartographer +khronos +mongo +mongo-navi +navi +navi-proxy +optimus +pheidi +prometheus +rabbitmq +redis +sauron +shiva +socket-server +socket-server-proxy +starlord +swarm-manager +user-vault +userland +web +worker + +[local] +127.0.0.1 + +[targets] +localhost ansible_connection=local bastion_name=10.10.56.78 gather_facts=False diff --git a/environments/runnable-self-hosted-2/main.yml b/environments/runnable-self-hosted-2/main.yml new file mode 100644 index 00000000..9064a8ca --- /dev/null +++ b/environments/runnable-self-hosted-2/main.yml @@ -0,0 +1,129 @@ +# Generated By Terraform +aws_region: us-west-2 +aws_access_key_id: AKIAIDCQ7YKWQPHXXVPA # TODO, add to terraform +aws_secret_access_key: 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 # TODO, Add to terraform +postgres_host: tf-00764cdb5ae0cde9ec08f55623.cnksgdqarobf.us-west-2.rds.amazonaws.com +postgres_password: 8pl2ZwYWGAp0AWmFxrDPWhOsd40 +postgres_user: runnable +# User vault is accesible from cluster +user_vault_load_balancer: "vault.{{ domain }}" +mongo_hosts: 'mongo' +# Accessible from dock +consul_host_address: 'consul.{{ domain }}' # TODO: Make private +# Accessible from dock +redis_host_address: +mongo_volume_id: vol-0b364425ec3f190ce # TODO, add to terraform +rabbit_volume_id: vol-03b21a05ad0d1030e # TODO, add to terraform +redis_volume_id: vol-0b364425ec3f190ce # TODO, add to terraform +environment: runnable-self-hosted-2 +docks_ip_range: "34.223.211.0/24" # NOTE: Not usable because of loss of IP address from docks to k8s cluster + +# User Provided +domain: runnable-test-main-2.info +user_content_domain: runnable-test-navi-2.info +super_user: runnable-self-hosted-2-hello-runnable +vault_auth_token: 079094d8-9852-c091-f50e-9db200ca6053 +vault_token_01: 4912f3702fd174662020a3bcab4ae05ce0c432c2b78aee7183e8ec51c3ec3f7501 +vault_token_02: 1997da00dc760190288eb21ee8a5e9bb115d65daef3c232eb66e580a9a3cfe4f02 +vault_token_03: aae1fa2d57d2a7ba3a697655f41f02c65afd87ada1a5b3bac02b1a0b5d2d79a003 +vault_token_04: 69e5fefb9b2849f9fe13d5d98eb2689574851259cb5547a5f90f8a9f0fc5636b04 +vault_token_05: da93ded6108cefd3ecf41192920883e83f25f02e85ccd7318f4ac89ec8d4e48405 +rabbit_password: 047a6b3ce2c74be694afb2ac3a960c44 +rabbit_username: b56cbb97644743809559fa0297c8e51d +mongo_user: 93ce22876b554eba8c5d392a5d57ee71 +mongo_password: 1c1d692441314b0cb060d6e70f78a5a4 +github_token: c9a3140944a239a337a92ab1a1e022b000131c3b # thejsj +github_client_id: 41aa03dc2948c3bc9b8c +github_client_secret: 4e7d070cd472945e2837fbca0748be15edf77edc +hello_runnable_github_token: d9980e93ce53e6312457c72528ee76640e0dbbe7 # runnable-self-hosted-2-hello-runnable +hello_runnable_github_id: 29638669 # runnable-self-hosted-2-hello-runnable +navi_cookie_secret: fb4acc7a69f04c10b84e943eb8538768 +dock_vault_user_creation_access_token: + +# Provided by Runnable +registry_username: "runnable+zendeskpull" +registry_token: FJQPM3VZKWCIMPO6TWQ9BD3TPWJD008HHH0RCDC3RKYO4AHMQ89RN4H4E94QUUCY +intercom_id: wqzm3rju +intercom_key: dG9rOjcyYzAwMzMzX2MxMGVfNDBiYl9iY2M3XzcxYzExMzFkYTI1ODoxOjA + +api_aws_access_key_id: "{{ aws_access_key_id }}" +api_aws_alias_host: "{{ aws_region }}.compute.internal" +api_aws_secret_access_key: "{{ aws_secret_access_key }}" +api_github_client_id: "{{ github_client_id }}" +api_github_client_secret: "{{ github_client_secret }}" +api_github_deploy_keys_bucket: "runnable.deploykeys.{{ environment }}" +api_mongo_user: "{{ mongo_user }}" +api_mongo_password: "{{ mongo_password }}" +api_mongo_database: "api" +api_s3_context_bucket: "runnable.context.resources.{{ environment }}" +api_s3_log_bucket: "{{ environment }}.container-logs" + +redis_host_address: "redis" +redis_external_host_address: "redis.{{ domain }}" + +big_poppa_github_token: "{{ github_token }}" +big_poppa_intercom_id: "{{ intercom_id }}" +big_poppa_intercom_key: "{{ intercom_key }}" +big_poppa_mongo_auth: "{{ mongo_user }}:{{ mongo_password }}" +big_poppa_mongo_database: "{{ api_mongo_database }}" +big_poppa_pg_host: "{{ postgres_host }}" +big_poppa_pg_pass: "{{ postgres_password}}" +big_poppa_pg_user: "{{ postgres_user }}" + +clio_mongo_user: "{{ mongo_user }}" +clio_mongo_password: "{{ mongo_password }}" +clio_mongo_database: "clio" + +kartographer_hello_runnable_github_token: "{{ hello_runnable_github_token }}" + +khronos_intercom_api_key: "{{ intercom_key }}" +khronos_intercom_app_id: "{{ intercom_id }}" +khronos_mongo_auth: "{{ mongo_user }}:{{ mongo_password }}" +khronos_mongo_database: "{{ api_mongo_database }}" + +optimus_aws_access_id: "{{ aws_access_key_id }}" +optimus_aws_secret_id: "{{ aws_secret_access_key }}" +optimus_github_deploy_keys_bucket: "runnable.deploykeys.{{ environment }}" + +pheidi_intercom_admin_id: 22382 +pheidi_intercom_id: "{{ intercom_id }}" +pheidi_intercom_key: "{{ intercom_key }}" +pheidi_mongo_auth: "{{ mongo_user }}:{{ mongo_password }}" +pheidi_mongo_database: "{{ api_mongo_database }}" +pheidi_runnabot_tokens: "{{ github_token }}" + +vault_hello_runnable_github_token: "{{ hello_runnable_github_id }}" +vault_aws_access_key_id: "{{ aws_access_key_id }}" +vault_aws_secret_key: "{{ aws_secret_access_key }}" +vault_aws_region: "{{ aws_region }}" +vault_consul_address: "consul:8500" + +marketing_bucket: "{{ domain }}" +marketing_aws_access_key: "{{ aws_access_key_id }}" +marketing_aws_secret_key: "{{ aws_secret_access_key }}" + +link_hello_runnable_github_token: "{{ hello_runnable_github_token }}" + +api_hello_runnable_github_token: "{{ hello_runnable_github_token }}" +bastion_sshd_port: 22 +env: "{{ environment }}" +is_github_enterprise: false +mongo_port: 27017 +navi_mongo_user: "{{ mongo_user }}" +navi_mongo_password: "{{ mongo_password }}" +node_env: "{{ environment }}" +pg_host: "{{ postgres_host }}" +pg_pass: "{{ postgres_password }}" +pg_port: 5432 +include_eru_proxy: false +include_ergre_proxy: false +is_on_prem: true + +web_intercom_id: "{{ intercom_id }}" +web_aws_bucket_region: "{{ aws_region }}" + +# Docks +ec2_describe_aws_access_key: "{{ aws_access_key_id }}" +ec2_describe_aws_secret_key: "{{ aws_secret_access_key }}" +dock_registry_s3_access_key: "{{ aws_access_key_id }}" +dock_registry_s3_secret_key: "{{ aws_secret_access_key }}" diff --git a/package.json b/package.json new file mode 100644 index 00000000..39adf67a --- /dev/null +++ b/package.json @@ -0,0 +1,25 @@ +{ + "name": "ansible", + "version": "0.0.1", + "description": "ansible", + "main": "index.js", + "directories": { + "test": "test" + }, + "scripts": { + "test": "mocha test" + }, + "repository": { + "type": "git", + "url": "http://github.com/CodeNow/devops-scripts" + }, + "author": "", + "license": "BSD-2-Clause", + "bugs": { + "url": "https://github.com/CodeNow/devops-scripts/issues" + }, + "homepage": "https://github.com/CodeNow/devops-scripts", + "dependencies": { + "aws-sdk": "^2.1.48" + } +} diff --git a/ssh/config b/ssh/config new file mode 100644 index 00000000..2b852777 --- /dev/null +++ b/ssh/config @@ -0,0 +1,155 @@ +Host gamma* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + Identityfile ~/.ssh/gamma.pem + +Host delta* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + Identityfile ~/.ssh/delta.pem + + +################################################################################ +# utility +################################################################################ +Host migration-router + HostName 52.24.117.16 + User ubuntu + ForwardAgent yes + IdentityFile ~/.ssh/oregon.pem + +################################################################################ +# DELTA staging runnable.io +################################################################################ + +Host delta-staging-data + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.6.59 22 + +################################################################################ +# Gamma +################################################################################ + +Host gamma-bastion + HostName 54.69.34.161 + Port 60709 + +Host 10.4.*.* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + ProxyCommand ssh -o StrictHostKeyChecking=no -q ubuntu@gamma-bastion nc %h %p + IdentityFile ~/.ssh/gamma.pem + +Host gamma-builder + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.8.95 22 + +Host gamma-consul-a + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.5.144 22 + +Host gamma-consul-b + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.11.220 22 + +Host gamma-consul-c + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.13.196 22 + +Host gamma-mongo-a + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.7.56 22 + +Host gamma-mongo-b + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.8.241 22 + +Host gamma-mongo-c + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.13.116 22 + +Host gamma-redis + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.6.45 22 + + +################################################################################ +# Delta +################################################################################ + +Host delta-bastion + HostName 52.37.51.230 + Port 60506 + +Host 10.8.*.* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + ProxyCommand ssh -o StrictHostKeyChecking=no -q ubuntu@delta-bastion nc %h %p + IdentityFile ~/.ssh/delta.pem + +Host delta-builder + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.12.242 22 + +Host delta-consul-a + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.6.122 22 + +Host delta-consul-b + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.8.130 22 + +Host delta-consul-c + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.86 22 + +Host delta-metabase + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.193 22 + +Host delta-redis + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.9 22 + +Host delta-rabbit + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.195 22 + +Host delta-navi-port-router + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.0.111 22 + +Host delta-mongo-a + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.216 22 + +Host delta-mongo-b + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.10.254 22 + +Host delta-mongo-c + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.13.89 22 + +Host delta-prometheus + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.5.172 22 + +Host delta-navi + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.6.41 22 + +Host delta-swarm-manager + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.40 22 + +################################################################################ +# other +################################################################################ +Host 127.0.0.1 + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + User core + LogLevel QUIET + +################################################################################ +# Self Hosted +################################################################################ + +Host self-hosted-2* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + Identityfile ~/.ssh/gamma.pem + +Host self-hosted-2-bastion + HostName 34.212.166.255 + Port 22 + +Host 10.10.*.* + User admin + ForwardAgent yes + StrictHostKeyChecking no + ProxyCommand ssh -o StrictHostKeyChecking=no -q ubuntu@self-hosted-2-bastion nc %h %p + IdentityFile ~/.ssh/gamma.pem diff --git a/wiki/dailynews.png b/wiki/dailynews.png new file mode 100644 index 00000000..dba25023 Binary files /dev/null and b/wiki/dailynews.png differ diff --git a/wiki/rollbar-01-overview.png b/wiki/rollbar-01-overview.png new file mode 100644 index 00000000..e2d4fc2f Binary files /dev/null and b/wiki/rollbar-01-overview.png differ diff --git a/wiki/rollbar-02-projects-dropdown.png b/wiki/rollbar-02-projects-dropdown.png new file mode 100644 index 00000000..b324fb18 Binary files /dev/null and b/wiki/rollbar-02-projects-dropdown.png differ diff --git a/wiki/rollbar-03-error-vs-warning.png b/wiki/rollbar-03-error-vs-warning.png new file mode 100644 index 00000000..7a3f68ec Binary files /dev/null and b/wiki/rollbar-03-error-vs-warning.png differ diff --git a/wiki/rollbar-04-occurrences.png b/wiki/rollbar-04-occurrences.png new file mode 100644 index 00000000..1af3cda3 Binary files /dev/null and b/wiki/rollbar-04-occurrences.png differ diff --git a/wiki/rollbar-05-environment-dropdown.png b/wiki/rollbar-05-environment-dropdown.png new file mode 100644 index 00000000..ba9e4bbc Binary files /dev/null and b/wiki/rollbar-05-environment-dropdown.png differ diff --git a/wiki/rollbar-06-error-details.png b/wiki/rollbar-06-error-details.png new file mode 100644 index 00000000..e7d4282e Binary files /dev/null and b/wiki/rollbar-06-error-details.png differ diff --git a/wiki/rollbar-07-error-controls.png b/wiki/rollbar-07-error-controls.png new file mode 100644 index 00000000..3352428c Binary files /dev/null and b/wiki/rollbar-07-error-controls.png differ diff --git a/wiki/rollbar-08-items-view.png b/wiki/rollbar-08-items-view.png new file mode 100644 index 00000000..82eaa2ef Binary files /dev/null and b/wiki/rollbar-08-items-view.png differ diff --git a/wiki/scurry.jpg b/wiki/scurry.jpg new file mode 100644 index 00000000..b839f7ef Binary files /dev/null and b/wiki/scurry.jpg differ diff --git a/wiki/staging-environment-arch.png b/wiki/staging-environment-arch.png new file mode 100644 index 00000000..edd87087 Binary files /dev/null and b/wiki/staging-environment-arch.png differ diff --git a/wiki/swarm-setup.gif b/wiki/swarm-setup.gif new file mode 100644 index 00000000..b41e6eb1 Binary files /dev/null and b/wiki/swarm-setup.gif differ