diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..8ba103f0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,18 @@ +node_modules +*.pem +pass +.pass +hellorunnable +dump.rdb +erl_crash.dump +npm-debug.log +ca.srl +.DS_Store +ansible/roles/hipache/templates/runnable* +ansible/certs/* +*.retry +*.tfstate* +terraform/credentials.tfvars +terraform/.build +ansible/secrets/* +.idea diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..3b7ed08f --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM ubuntu:14.04 + +RUN apt-get update -y +RUN DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends -y -q unzip build-essential python-pip python-dev python-yaml libxml2-dev libxslt1-dev zlib1g-dev git curl sshpass openssh-client +RUN pip install --upgrade pyyaml jinja2 pycrypto + +RUN curl -sL https://deb.nodesource.com/setup_7.x | sudo -E bash - && \ + apt-get install -y nodejs + +RUN curl -O https://releases.hashicorp.com/vault/0.6.3/vault_0.6.3_linux_amd64.zip && \ + unzip ./vault_0.6.3_linux_amd64.zip -d /bin && \ + chmod +x /bin/vault + +RUN git clone git://github.com/ansible/ansible.git --recursive /opt/ansible + +RUN cd /opt/ansible && \ + git checkout v2.1.3.0-1 && \ + git submodule update --init --recursive && \ + bash -c 'source ./hacking/env-setup' + +ENV PATH /opt/ansible/bin:$PATH +ENV PYTHONPATH /opt/ansible/lib:$PYTHONPATH +ENV MANPATH /opt/ansible/docs/man:$MANPATH + +ADD ./ssh /root/.ssh +RUN echo 'eval `ssh-agent`' >> /root/start.sh +RUN echo 'ssh-add /root/.ssh/id_rsa' >> /root/start.sh +RUN echo 'npm start' >> /root/start.sh +RUN chmod +x /root/start.sh + +ADD ./ansible/ /ansible +RUN cd /ansible && npm install + +ADD ./deployer/ /deployer +RUN cd /deployer && npm install + +WORKDIR /deployer +CMD /root/start.sh diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..4cda39ab --- /dev/null +++ b/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,21 @@ +[//]: # (Let's get your best description here about what's happend! Here's a list as well, if you like:) + +* I removed this function +* I fixed all these things, etc. + +#### Dependencies + +- [ ] list dependencies (eg, PR from another branch or repo; tags or versions required prior to deployment) + +#### Tests + +> Test any modifications on one of our environments. + +- [ ] tested on _environment_ by _someone_ + +#### Deployment (post-merge) + +> Ensure that all environments have the given changes. + +- [ ] deployed to gamma +- [ ] deployed to delta diff --git a/README.md b/README.md index 9404c0e5..9ee6b133 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,145 @@ devops-scripts ============== -devops-scripts +Scripts for managing our deployments. + +# How to Deploy at Runnable +## Setup + +Before you can deploy you'll need to install the appropriate tools, scripts, and keys on your local machine. +To do so, execute the following steps: + +1. Install Ansible v2.1.0.0 (the deploy automation tool we use to deploy projects to production) +http://docs.ansible.com/intro_installation.html + +2. Get the latest devops-scripts (the recipes that we use to deploy various projects) +https://github.com/CodeNow/devops-scripts + +3. Change to the devops scripts repo directory and run the following command: +`ln -s //ssh/config ~/.ssh/config` + +4. Obtain the “Keys of Power” from someone who can already deploy (ask Anand if you don’t know). Depending on what you want to deploy you'll receive `*.pem` files. + +5. Move the “Keys of Power” .pem files to your `~/.ssh` directory + +6. Install two required tools onto your machine: +```bash +brew update && brew install vault daemon +``` + +At this point you should be capable of deploying; keep reading to find out how to actually perform a deploy! + +## Deploying Services +- **IMPORTANT:** always pull latest devopts-scripts (`git pull origin master`) +- **IMPORTANT:** Before you deploy a new version of any project make sure to determine which version of the project is currently deployed. This way you can quickly revert to the last stable release if something goes wrong after pushing a new version. + +### Step 1: Determine the Current Deploy Version +To determine the latest deploy tag for a project please check the project's repository on +github and look for the latest release tag (should be in the form `vX.Y.Z`). Once you've located the tag, +copy it down somewhere that is easily and quickly accessible (you may need to use it quickly if something goes wrong). + +### Step 2: Deploy the Project via `ansible-playbook` + +- **WARNING:** If you were unable to determine the last deploy tag for a project and cannot revert **STOP**. + Ask someone on the team for help before continuing. +- **IMPORTANT:** All commands should be run from the `devops-script/ansible` directory. + +#### Ansible Vault + +Please note that there are playbook that require encrypted [ansible vault](http://docs.ansible.com/ansible/playbooks_vault.html) files. If you see the following error: + +```bash +ERROR: A vault password must be specified to decrypt # snip +``` + +you will need to re-run the playbook with: + +```bash +--ask-vault-pass +``` + +#### Latest Tag +Build and deploy a service to the latest tag of its repository. This will build +the docker image needed to run the container on our infrastructure. + +#### Branch or Tag +Build and deploy a service to a specific branch or tag on its repository. This performs a build +of the docker image needed to run the service on our architecture. + +##### Command +``` +ansible-playbook -i ./[inventory_dir] [service-playbook] -e git_branch=[branch-or-tag] -t deploy +``` + +##### Arguments +- `[inventory_dir]` - The environment inventory files (servers and variables). Should be one of the following: + - `stage-hosts` - Runnable sandbox staging environment services + - `gamma-hosts` - Gamma services (internal use only; production mirror) + - `delta-hosts` - Delta services (real production) +- `[service-playbook]` - The playbook for the service you wish to deploy, ex: + - `api.yml` - Deploys both the api and the api-workers services + - `shiva.yml` - Deploys the shiva micro-service + - `charon.yml` - Deploys a specific version of charon DNS to all docks +- `[branch-or-tag]` - The branch or tag you wish to deploy, ex: + - `-e git_branch=v1.9.9` (version tag) + - `-e git_branch=my-feature-branch` (branch) + - `-e git_branch=3928745892364578623` (specific commit) + +##### Rebuild and Deploy Tag or Branch (No Cache) +Forces a rebuild of a docker image for the given service at the given branch or tag and then deploys the +newly created image. This is useful when a previously deployed branch has new changes that need to +be deployed to an environment. + +Generally this command is only used with `gamma-hosts/` as it is often used to update code +being tested in the production mirror. + +##### Command +``` +ansible-playbook -i ./[inventory_dir] [service-playbook] -e git_branch=[branch-or-tag] -e build_args=--no-cache +``` + +##### Arguments +- `[inventory_dir]` - The environment inventory files (servers and variables). +- `[service-playbook]` - The playbook for the service you wish to deploy. +- `[branch-or-tag]` - The branch or tag you wish to deploy. + + +## Reverting +If, for some reason, the new deploy is not operating as expected you can quickly revert by referencing the tag you collected in Step 1. +Simply run the appropriate deploy command in the previous section with the last release tag and the new deploy will be reverted. + +## Deploy Songs + +- **IMPORTANT:** Make sure to play the song loud and proud when deploying! + +It is the custom at Runnable to play a song to the entire team when deploying. For each of the repositories here are the respective songs: + +| Service | Deploy Song Link | +| ------- | ---------------- | +| api / api-workers | [Push it - Rick Ross](https://www.youtube.com/watch?v=qk2jeE1LOn8) | +| arithmancy | [onerepublic - Counting Stars](https://www.youtube.com/watch?v=hT_nvWreIhg) | +| big poppa | [Big Poppa - The Notorious B.I.G.](https://www.youtube.com/watch?v=phaJXp_zMYM) | +| charon | [Enter Sandman - Metallica](https://www.youtube.com/watch?v=CD-E-LDc384) | +| cream | [C.R.E.A.M. - Wu-Tang Clan](https://www.youtube.com/watch?v=PBwAxmrE194) | +| deployer | [Roll our](https://www.youtube.com/watch?v=t21DFnu00Dc) | +| detention | [Unbreakable Kimmy Schmidt](https://youtu.be/CV9xF8CjhJk?t=21s) | +| docker-listener | [Call Me Maybe - Carly Rae Jepsen](https://www.youtube.com/watch?v=fWNaR-rxAic) | +| drake | [Drake - Hotline Bling](https://www.youtube.com/watch?v=uxpDa-c-4Mc) +| filibuster | [He's a Pirate - Pirates Of The Caribbean](https://www.youtube.com/watch?v=yRh-dzrI4Z4) | +| Full Stack Deploy (`all.yml`) | [The Cleveland Orchestra (George Szell conducting) Ludwig von Beethoven Symphony No. 9 "Chorale (Ode To Joy)" Opus 125 IV.] (https://www.youtube.com/watch?v=4g5770gaais) | +| github-proxy | [Proxy - Martin Garrix](https://www.youtube.com/watch?v=NWB6-PJw4Mk) | +| khronos | [Time After Time - Cyndi Lauper](https://www.youtube.com/watch?v=VdQY7BusJNU) | +| krain | [Men at Work - Down Under](https://www.youtube.com/watch?v=XfR9iY5y94s) | +| link | [Zelda Main Theme Song](https://www.youtube.com/watch?v=cGufy1PAeTU) | +| mavis | [Fairy Tail theme song](https://www.youtube.com/watch?v=R4UFCTMrV-o) | +| navi | [Ocarina of Time: Lost Woods The Legend of Zelda](https://www.youtube.com/watch?v=iOGpdGEEcJM) | +| optimus | [Original Transformers Opening Theme](https://www.youtube.com/watch?v=nLS2N9mHWaw) | +| pheidi | [Chariots of Fire Theme](https://www.youtube.com/watch?v=CSav51fVlKU) | +| runnable-angular | [Push it to the limit - Scarface](https://www.youtube.com/watch?v=9D-QD_HIfjA) | +| sauron | [Sauron theme song from LOTR](https://www.youtube.com/watch?v=V_rk9VBrXMY) | +| Security Groups | [Out of the Woods - Tayor Swift](https://www.youtube.com/watch?v=JLf9q36UsBk) +| shiva | [FFXIV Shiva Theme](https://www.youtube.com/watch?v=noJiH8HLZw4) | +| swarm-deamon | [Pink Floyd - Another Brick In The Wall](https://www.youtube.com/watch?v=5IpYOF4Hi6Q) | +| swarm-manager | [Eric Prydz VS Pink Floyd - 'Proper Education'](https://www.youtube.com/watch?v=IttkDYE33aU) | +| varnish | [Karate Kid Theme Song](https://www.youtube.com/watch?v=VIYqtkdMxQg) | +| vault / vault-values | [Seal - Kiss From A Rose](https://www.youtube.com/watch?v=zP3so2hY4CM) | diff --git a/ansible/.gitignore b/ansible/.gitignore new file mode 100644 index 00000000..61053df0 --- /dev/null +++ b/ansible/.gitignore @@ -0,0 +1,2 @@ +_cache +*.pyc diff --git a/ansible/README.md b/ansible/README.md new file mode 100644 index 00000000..78aec1d0 --- /dev/null +++ b/ansible/README.md @@ -0,0 +1,15 @@ + +Ansible provides a framework for our administration and deployment. It requires an organization for scripts and variables. By design it uses SSH to connect to all hosts before it executes the actions. As such it can be run from any machine. All Ansible provided functionality is idempotent and it strongly encourage custom scripts match that standard. + +Here is the organization of the files in `devops-scripts/ansible` + +* `*-hosts` - Files naming all the servers +* `*.yml` - The top level ansible actions. These files describe how a host has vars and roles executed on it. +* `/group_vars` - yml files that define variables and values for your ansible scripts. This mostly maps one to one with machine types in AWS. They’re a key value map. +* `/library` - Third party libraries and scripts. +* `/roles` - A set of folders containing the ansible roles. A role defines the executable actions by ansible. The center pieces is the `/tasks/main.yml`. It defines name actions and requirements. +The role can have several sub folders. + * `/handlers` - ??? + * `/defaults` - ??? + * `/meta` - contains dependencies + * `/template` - templates for any files that need to be generate and delivered. diff --git a/ansible/agreeable-egret.yml b/ansible/agreeable-egret.yml new file mode 100644 index 00000000..db0f5215 --- /dev/null +++ b/ansible/agreeable-egret.yml @@ -0,0 +1,11 @@ +--- +- hosts: redis +- hosts: agreeable-egret + vars_files: + - group_vars/alpha-agreeable-egret.yml + roles: + - role: notify + tags: [ notify ] + - role: builder + tags: [ build ] + - role: container_kill_start diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 00000000..8083aaca --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,3 @@ +[defaults] +# Required so `sudo: yes` does not lose the environment variables, which hold the ssh-agent socket +sudo_flags = -HE diff --git a/ansible/api-core.yml b/ansible/api-core.yml new file mode 100644 index 00000000..3643aac1 --- /dev/null +++ b/ansible/api-core.yml @@ -0,0 +1,34 @@ +--- +- hosts: mongodb +- hosts: navi +- hosts: charon +- hosts: rabbitmq +- hosts: redis +- hosts: big-poppa +- hosts: cream +- hosts: swarm-manager +- hosts: consul + +- hosts: api + vars_files: + - group_vars/alpha-api-base.yml + - group_vars/alpha-api.yml + roles: + - role: notify + rollbar_token: "{{ api_rollbar_key }}" + tags: [ notify ] + + - role: builder + tags: [ build ] + + - role: docker_client + + - role: tls-client + tags: [ tls ] + tls_service: mongodb + + - role: datadog + tags: [ datadog ] + + - role: container_start + number_of_containers: "{{ ansible_processor_cores }}" diff --git a/ansible/api.yml b/ansible/api.yml new file mode 100644 index 00000000..d6520458 --- /dev/null +++ b/ansible/api.yml @@ -0,0 +1,3 @@ +- include: api-core.yml +- include: socket-server.yml +- include: workers.yml diff --git a/ansible/app-services.yml b/ansible/app-services.yml new file mode 100644 index 00000000..46333f10 --- /dev/null +++ b/ansible/app-services.yml @@ -0,0 +1,6 @@ +- include: detention.yml git_branch="{{ detention_branch }}" +- include: drake.yml git_branch="{{ drake_branch }}" +- include: eru.yml git_branch="{{ eru_branch }}" +- include: metis.yml git_branch="{{ astral_branch }}" +- include: shiva.yml git_branch="{{ astral_branch }}" +# run with: `--extra-vars "@current_versions.yml"` diff --git a/ansible/arithmancy.yml b/ansible/arithmancy.yml new file mode 100644 index 00000000..1054d4d4 --- /dev/null +++ b/ansible/arithmancy.yml @@ -0,0 +1,13 @@ +--- +- hosts: rabbitmq +- hosts: consul + +- hosts: arithmancy + vars_files: + - group_vars/alpha-arithmancy.yml + roles: + - role: notify + rollbar_token: "{{ arithmancy_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [build] } + - { role: container_kill_start } diff --git a/ansible/base.yml b/ansible/base.yml new file mode 100644 index 00000000..6ee8aead --- /dev/null +++ b/ansible/base.yml @@ -0,0 +1,23 @@ +--- +- hosts: localhost + connection: local + tasks: + - fail: msg="`host` (target host) needs to be defined to run this role" + when: host is not defined + + - add_host: + name={{ host }} + groups=dock + +- hosts: "{{ host }}" + roles: + - { role: apt_update } + - { role: package-dock, tags: [ dock, package ] } + - { role: package-aws, tags: [ dock, package ] } + - { role: package_ntp } + - { role: build_essential } + - { role: docker, tags: [ docker ] } + - { role: datadog, tags: [ datadog ] } + - { role: ulimits, tags: [ ulimits ] } + - { role: loggly, tags: [ loggly, clean ] } + - { role: node } diff --git a/ansible/bastion.yml b/ansible/bastion.yml new file mode 100644 index 00000000..d2216eef --- /dev/null +++ b/ansible/bastion.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + +- hosts: bastion + roles: + - role: notify + tags: [ notify ], + app_name: bastion_sshd, + git_branch: latest, + name: bastion_sshd + - { role: bastion_sshd, tags: bastion-sshd } diff --git a/ansible/big-poppa-http.yml b/ansible/big-poppa-http.yml new file mode 100644 index 00000000..f556f39c --- /dev/null +++ b/ansible/big-poppa-http.yml @@ -0,0 +1,15 @@ +--- +- hosts: rabbitmq +- hosts: consul +- hosts: mongodb +- hosts: github-varnish + +- hosts: big-poppa + vars_files: + - group_vars/alpha-big-poppa-http.yml + roles: + - role: notify + rollbar_token: "{{ big_poppa_http_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: container_kill_start } diff --git a/ansible/big-poppa-worker.yml b/ansible/big-poppa-worker.yml new file mode 100644 index 00000000..b5c207d6 --- /dev/null +++ b/ansible/big-poppa-worker.yml @@ -0,0 +1,15 @@ +--- +- hosts: rabbitmq +- hosts: consul +- hosts: mongodb +- hosts: github-varnish + +- hosts: big-poppa + vars_files: + - group_vars/alpha-big-poppa-worker.yml + roles: + - role: notify + rollbar_token: "{{ big_poppa_worker_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: container_kill_start } diff --git a/ansible/big-poppa.yml b/ansible/big-poppa.yml new file mode 100644 index 00000000..06e52daa --- /dev/null +++ b/ansible/big-poppa.yml @@ -0,0 +1,2 @@ +- include: big-poppa-http.yml +- include: big-poppa-worker.yml diff --git a/ansible/cadvisor.yml b/ansible/cadvisor.yml new file mode 100644 index 00000000..ad1d0629 --- /dev/null +++ b/ansible/cadvisor.yml @@ -0,0 +1,7 @@ +--- +- hosts: docks + vars_files: + - "group_vars/alpha-cadvisor.yml" + roles: + - { role: notify, tags: "notify" } + - { role: container_kill_start } diff --git a/ansible/charon.yml b/ansible/charon.yml new file mode 100644 index 00000000..774bfc5d --- /dev/null +++ b/ansible/charon.yml @@ -0,0 +1,13 @@ +--- +- hosts: redis +- hosts: consul + +- hosts: "{{ dock | default('docks') }}" + vars_files: + - group_vars/alpha-charon.yml + roles: + - { role: notify, tags: [notify] } + - { role: git_repo } + - { role: node_service } + - { role: loggly } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/consul-services.yml b/ansible/consul-services.yml new file mode 100644 index 00000000..3c43c909 --- /dev/null +++ b/ansible/consul-services.yml @@ -0,0 +1,7 @@ +--- +- hosts: rabbitmq +- hosts: redis + +- hosts: consul + roles: + - { role: consul-services } diff --git a/ansible/consul-template-updater.yml b/ansible/consul-template-updater.yml new file mode 100644 index 00000000..9731cdcb --- /dev/null +++ b/ansible/consul-template-updater.yml @@ -0,0 +1,12 @@ +--- +- hosts: consul + +- hosts: "{{ host }}" + vars_files: + - group_vars/{{ var_file }} + - group_vars/alpha-consul-template-updater.yml + roles: + - role: notify + tags: notify + + - role: container_kill_start diff --git a/ansible/consul-values.yml b/ansible/consul-values.yml new file mode 100644 index 00000000..5f226ffd --- /dev/null +++ b/ansible/consul-values.yml @@ -0,0 +1,32 @@ +--- +- hosts: consul + vars_files: + - "group_vars/alpha-consul.yml" + tasks: + - name: make sure httplib2 is installed + become: true + apt: package=python-httplib2 state=present + + - name: put values into consul + run_once: true + when: write_values is defined + uri: + method=PUT + url=http://{{ ansible_default_ipv4.address }}:8500/v1/kv/{{ item.key }} + body="{{ item.value }}" + with_items: "{{ consul_seed }}" + + - name: get values from consul + run_once: true + when: read_values is defined + uri: + method=GET + url=http://{{ ansible_default_ipv4.address }}:8500/v1/kv/{{ item.key }} + with_items: "{{ consul_seed }}" + register: values + + - name: print values to screen + run_once: true + when: read_values is defined + debug: msg="{{ item.item.key }}" -> "{{ item.json[0].Value | b64decode }}" + with_items: "{{ values.results }}" diff --git a/ansible/consul.yml b/ansible/consul.yml new file mode 100644 index 00000000..a218a600 --- /dev/null +++ b/ansible/consul.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + serial: 1 + vars_files: + - group_vars/alpha-consul.yml + roles: + - { role: notify, tags: notify } + - { role: database } + - { role: datadog, tags: [ datadog ] } + - { role: consul } + - { role: container_kill_start } diff --git a/ansible/cream-http.yml b/ansible/cream-http.yml new file mode 100644 index 00000000..9c30d1fa --- /dev/null +++ b/ansible/cream-http.yml @@ -0,0 +1,13 @@ +--- +- hosts: rabbitmq +- hosts: consul + +- hosts: cream + vars_files: + - group_vars/alpha-cream-http.yml + roles: + - role: notify + rollbar_token: "{{ cream_http_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: container_kill_start } diff --git a/ansible/cream-worker.yml b/ansible/cream-worker.yml new file mode 100644 index 00000000..b05c05f2 --- /dev/null +++ b/ansible/cream-worker.yml @@ -0,0 +1,13 @@ +--- +- hosts: rabbitmq +- hosts: consul + +- hosts: cream + vars_files: + - group_vars/alpha-cream-worker.yml + roles: + - role: notify + rollbar_token: "{{ cream_http_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: container_kill_start } diff --git a/ansible/cream.yml b/ansible/cream.yml new file mode 100644 index 00000000..d9b74b1d --- /dev/null +++ b/ansible/cream.yml @@ -0,0 +1,2 @@ +- include: cream-worker.yml +- include: cream-http.yml diff --git a/ansible/datadog.yml b/ansible/datadog.yml new file mode 100644 index 00000000..0a3cc17e --- /dev/null +++ b/ansible/datadog.yml @@ -0,0 +1,5 @@ +--- +- hosts: all + roles: + - { role: apt_update } + - { role: datadog, tags: ["datadog"] } diff --git a/ansible/delta-hosts/docks.js b/ansible/delta-hosts/docks.js new file mode 100755 index 00000000..3a7e1552 --- /dev/null +++ b/ansible/delta-hosts/docks.js @@ -0,0 +1,75 @@ +#!/usr/bin/env node + +'use strict'; + +var aws = require('aws-sdk'); +var ec2 = new aws.EC2({ + accessKeyId: 'AKIAJ3RCYU6FCULAJP2Q', + secretAccessKey: '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', + region: 'us-west-2' +}); + +var params = { + Filters: [ + // Only search for docks in the cluster security group + { + Name: 'instance.group-id', + Values: ['sg-6cd7fb08'] + }, + // Only fetch instances that are tagged as docks + { + Name: 'tag:role', + Values: ['dock'] + }, + // Only fetch running instances + { + Name: 'instance-state-name', + Values: ['running'] + } + ] +}; + +ec2.describeInstances(params, function (err, data) { + if (err) { + console.error("An error occurred: ", err); + process.exit(1); + } + + // Get a set of instances from the describe response + var instances = []; + data.Reservations.forEach(function (res) { + res.Instances.forEach(function (instance) { + instances.push(instance); + }); + }); + + // Map the instances to their private ip addresses + // NOTE This will work locally because of the wilcard ssh proxy in the config + var hosts = instances.map(function (instance) { + return instance.PrivateIpAddress; + }); + + var hostVars = {}; + instances.forEach(function (instance) { + for (var i = 0; i < instance.Tags.length; i++) { + if (instance.Tags[i].Key === 'org') { + hostVars[instance.PrivateIpAddress] = { + host_tags: instance.Tags[i].Value + ',build,run' + }; + } + } + }); + + // Output the resulting JSON + // NOTE http://docs.ansible.com/ansible/developing_inventory.html + console.log(JSON.stringify( + { + docks: { + hosts: hosts + }, + _meta : { + hostvars : hostVars + } + } + )); +}); diff --git a/ansible/delta-hosts/hosts b/ansible/delta-hosts/hosts new file mode 100644 index 00000000..92536b47 --- /dev/null +++ b/ansible/delta-hosts/hosts @@ -0,0 +1,191 @@ +[bastion] +delta-bastion + +[hipache] +delta-hipache httpsCheckForBackend80=false prependIncomingPort=true subDomainDepth=4 + +[userland] +delta-userland + +[mongodb] +delta-mongo-a +delta-mongo-b +delta-mongo-c + +[arithmancy:vars] +arithmancy_pg_database=arithmancy +arithmancy_pg_host=arithmancy-delta.cnksgdqarobf.us-west-2.rds.amazonaws.com +arithmancy_pg_pass=y7A7k7vNf7nkVv4d73YRLYybg3JcA92 +arithmancy_pg_port=5432 +arithmancy_pg_user=arithmancy_delta + +[ingress] +delta-ingress + +[api_group:children] +worker +api +socket-server + +[api] +delta-api + +[consul] +delta-consul-a +delta-consul-b +delta-consul-c + +[docker-listener] +delta-dock-services + +[vault] +delta-consul-a +delta-consul-b +delta-consul-c + +[worker] +delta-api-worker + +[socket-server] +delta-api-socket + +[socket-server-proxy] +delta-api-socket-proxy + +[deployer] +delta-app-services + +[docks] + +[dock] + +[agreeable-egret] +delta-app-services + +[eru] +delta-app-services + +[navi] +delta-navi + +[mongo-navi] +delta-navi + +[link] +delta-navi + +[charon] +delta-app-services + +[khronos] +delta-dock-services + +[optimus] +delta-dock-services + +[detention] +delta-app-services + +[palantiri] +delta-dock-services + +[rabbitmq] +delta-rabbit + +[web] +delta-app-services + +[marketing] +delta-app-services + +[metabase] +delta-metabase + +[redis] +delta-redis + +[redis-slave] +delta-redis-slave + +[shiva] +delta-app-services + +[registry] +delta-registry + +[sauron] +delta-dock-services + +[swarm-manager] +delta-swarm-manager + +[metis] +delta-app-services + +[drake] +delta-app-services + +[pheidi] +delta-app-services + +[github-varnish] +delta-app-services + +[big-poppa] +delta-app-services + +[cream] +delta-app-services + +[arithmancy] +delta-app-services + +[prometheus] +delta-prometheus + +[delta:children] +agreeable-egret +api +arithmancy +bastion +big-poppa +charon +consul +cream +dock +docker-listener +docks +drake +eru +github-varnish +hipache +ingress +khronos +metabase +metis +mongodb +navi +optimus +pheidi +prometheus +rabbitmq +redis +redis-slave +registry +sauron +shiva +socket-server +socket-server-proxy +swarm-manager +userland +web +worker + +[local] +127.0.0.1 + +[ec2] +local + +[targets] +localhost ansible_connection=local bastion_name=delta-bastion diff --git a/ansible/delta-hosts/variables b/ansible/delta-hosts/variables new file mode 100644 index 00000000..cc6d2ebe --- /dev/null +++ b/ansible/delta-hosts/variables @@ -0,0 +1,210 @@ +[agreeable-egret:vars] +agreeable_egret_port=65520 +egret_pg_host=delta-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com +egret_pg_port=5432 +egret_pg_user=egret +egret_pg_pass=wwHQ5B4RfY9iKS3m +egret_pg_database=egret + +[api_group:vars] +api_aws_access_key_id=AKIAJWSSSJYUXKNW2ZDA +api_aws_secret_access_key=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 +api_github_client_id=d42d6634d4070c9d9bf9 +api_github_client_secret=d6cfde38fef5723e25e52629e3d25825c8a704c9 +api_github_deploy_keys_bucket=runnable.deploykeys.production +api_mixpanel_app_id=57260a5b6fc972e9c69184882efd009e +api_mongo_auth=api:72192e5a-a5e1-11e5-add9-0270db32f7ad +api_mongo_database=delta +api_mongo_replset_name=delta-rs0 +api_new_relic_app_name=delta-api-production +api_rollbar_key=a90d9c262c7c48cfabbd32fd0a1bc61c +api_workers_rollbar_key=3edfe8fe4fd640ae9fdbbe08fcb9f121 +api_s3_context_bucket=runnable.context.resources.production +api_intercom_app_id=wqzm3rju +api_intercom_api_key=46e75ada5d21f248787689b35fe80e11efe9303a + +[big-poppa:vars] +big_poppa_new_relic_app_name=delta-big-poppa +big_poppa_pg_pass=189a4a90-60d9-11e6-92c9-7b1a5b7c8417 +big_poppa_pg_host=delta-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com +big_poppa_pg_port=5432 +big_poppa_pg_user=cwallace +big_poppa_github_token=e11a1264130fb62ce045bf03118bf123f980c205 +big_poppa_http_rollbar_token=1f1eeea0b1334aaeb50fb7bc4a43241a +big_poppa_worker_rollbar_token=98cabb8440024e3a8242cf3220b802c9 +big_poppa_intercom_key=0df3322fda46a34e93ba6a43603a8fc3ef740d84 +big_poppa_intercom_id=wqzm3rju +big_poppa_mongo_auth=api:72192e5a-a5e1-11e5-add9-0270db32f7ad +big_poppa_mongo_database=delta +big_poppa_mongo_replset_name=delta-rs0 + +[cream:vars] +cream_hello_runnable_github_token=5ff90dacd14fb1ff202c6b48c1393ba713adf483 +cream_http_rollbar_token=baa03dbd9f814d14ab0c99863ed6a4fb +cream_worker_rollbar_token=87924b881c3143968cdb059fe41acbc3 +cream_intercom_key=173c1b366d11a3ef0f641c6b3327914368e67095 +cream_intercom_id=wqzm3rju +cream_stripe_secret_key=sk_live_ZWLZtu5rxJ0ylSoF8xrHtNOw +cream_stripe_publishable_key=pk_live_5yYYZlYIwY3LwvKFaXY0jNlm + +[docks:vars] +docker_config=docks +docks_rollbar_key=d1af6567ed0f464fb1d676f38fd31751 + +[dock:vars] +docks_rollbar_key=d1af6567ed0f464fb1d676f38fd31751 + +[drake:vars] +drake_port=80 +drake_http_rollbar_token=52ad749ddb8e47b2a8e15312b6b300fb +drake_worker_rollbar_token=14152b8572034943b714da27ca607698 + +[eru:vars] +eru_aws_access_key_id=AKIAIFCVEISSC5JMPWDA +eru_aws_environment=delta +eru_aws_secret_access_key=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 +eru_github_id=46a23f5f99f0aa9460f8 +eru_github_secret=a0336d72e3d540fb9fbbed2c123a81e1cb329dab +eru_intercom_id=wqzm3rju +eru_intercom_key=ro-f3ce0069697542d73bacd5cea9fba1a48d069e39 +eru_mongodb_database=delta +eru_mongodb_password=tilde-rawboned-lotus-hideaway-diastole +eru_mongodb_replset=delta-rs0 +eru_mongodb_username=eru +eru_subdomain=eru + +[khronos:vars] +khronos_intercom_api_key=14771f14efb617900724a16345e57beb55ba9beb +khronos_intercom_app_id=wqzm3rju +khronos_canary_github_branches_instance_id=56f05754a443842400b66a53 +khronos_canary_logs_instance_id=56f07f5c1e089a200077f2a3 +khronos_canary_rebuild_instance_id=571b39b9d35173300021667d +khronos_canary_rebuild_navi_url=http://canary-build-staging-runnabletest.runnableapp.com:8000/ +# @runnable-khronos github token +khronos_canary_token=4e2e050653b3927e53bb80bfd23d8aa17019426c +khronos_canary_failover_token=84549e76545306de61d47f23b1d1831e1c95a400 +khronos_mongo_auth=api:72192e5a-a5e1-11e5-add9-0270db32f7ad +khronos_mongo_database=delta +khronos_mongo_replset_name=delta + +[metabase:vars] +metabase_pg_database=metabase +metabase_pg_host=delta-metabase.cnksgdqarobf.us-west-2.rds.amazonaws.com +metabase_pg_pass=nifty-prowl-sought-muscles +metabase_pg_port=5432 +metabase_pg_user=metabase + +[metis:vars] +metis_rollbar_key=fdc8565a7ce64c6d9432c34be425937c + +[navi:vars] +navi_new_relic_app_name=delta-navi +navi_cookie_secret=e80173940e7bdd84734e868b6ea054a0 +navi_intercom_api_key=6f6400402170e78fa1ad4418608aacc63512122b +navi_intercom_app_id=wqzm3rju + + +[optimus:vars] +optimus_aws_access_id=AKIAJWSSSJYUXKNW2ZDA +optimus_aws_secret_id=tyvGiCbj5jWCiQnMLvfrfD64dFo8i6prkdcga86y +optimus_github_deploy_keys_bucket=runnable.deploykeys.production + +[palantiri:vars] +palantiri_rollbar_key=f675e9090d6f483ca4e742af2c7f2f83 + +[pheidi:vars] +pheidi_intercom_admin_id=22382 +pheidi_intercom_id=wqzm3rju +pheidi_intercom_key=852895329aa77696b65491876eefe0eb386482c5 +pheidi_mongo_auth=pheidi:septa-mauve-enquiry-clipper-history +pheidi_mongo_database=delta +pheidi_mongo_replset_name=delta +pheidi_runnabot_tokens=d22a5ec75f66a0f472f3d856eb01df2321115e0b,33d8accaa7afeace8f44e3a6de409097ea1e67b8,82a7f2838852c04b75bee2b1a8ed236e2f9fc1ef,cab912709f7eb5bb8c21f9177f845561222d3933 + +[prometheus:vars] +pager_duty_key=ky6EFGL7Zh-xH8x1qEJV + +[sauron:vars] +sauron_rollbar_key=83157ae2d50d4b6398e404c0b9978d26 + +[registry:vars] +registry_s3_access_key=AKIAJKCSFJCHFDITLBUQ +registry_s3_bucket=runnableimages.alpha +registry_s3_secret_key=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 +registry_s3_region=us-west-2 + +[shiva:vars] +aws_access_key_id=AKIAJ3RCYU6FCULAJP2Q +aws_secret_access_key=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 + +[swarm-manager:vars] +aws_access_key=AKIAIB5W3E6HR6Q52HEQ +aws_secret_key=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 +environment_name=delta + +[vault:vars] +vault_hello_runnable_github_token=88ddc423c2312d02a8bbcaad76dd4c374a30e4af +vault_aws_access_key_id=AKIAJ7R4UIM45KH2WGWQ +vault_aws_secret_key=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 +vault_aws_region=us-east-1 + +[marketing:vars] +marketing_bucket=runnable.com +marketing_aws_access_key=AKIAIPPPY2JIOHX7QVCA +marketing_aws_secret_key=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 + +[delta:vars] +ansible_ssh_private_key_file=~/.ssh/delta.pem +api_hello_runnable_github_token=88ddc423c2312d02a8bbcaad76dd4c374a30e4af +bastion_sshd_port=60506 +datadog_mongodb_pwd=sqa3WBgkCgZsFZuex0kBNahZ +datadog_mongodb_user=datadog +datadog_tags=env:delta +domain=runnable.io +env=delta +github_domain=api.github.com +github_protocol=https +is_github_enterprise=false +mongo_port=27017 +new_relic_license_key=338516e0826451c297d44dc60aeaf0a0ca4bfead +node_env=production-delta +pg_host=delta-infradb.cnksgdqarobf.us-west-2.rds.amazonaws.com +pg_pass=59a5524e-a772-11e5-bedc-1bdc0db458b3 +pg_user=astral +rabbit_password=wKK7g7NWKpQXEeSzyWB7mIpxZIL8H2mDSf3Q6czR3Vk +rabbit_username=o2mdLh9N9Ke2GzhoK8xsruYPhIQFN7iEL44dQJoq7OM +registry_host=10.8.4.126 +user_content_domain=runnableapp.com +vault_auth_token=578c9767-5af8-8490-0954-5d330f27b088 +vault_token_01=0d324dc7d4cbd94790fd08809d06fb1e28e21e185910081c7646e3e49924f6ed01 +vault_token_02=42dc8a69df174e77eb47a63b6ef4709bec57101cb1bff11a71c91b73b8bc046102 +vault_token_03=47f3cb74f5374fa3c51c90fd25e3d4cc851034de97584995fce5fc5382342f1f03 + +[ec2:vars] +aws_custid=437258487404 +bastion_sshd_port=60506 +region=us-west-2 +sg_hipache=sg-7fd7fb1b +sg_api=sg-0bf7db6f +sg_app_services=sg-37198950 +sg_bastion=sg-99d6fafd +sg_consul=sg-0f981868 +sg_dock=sg-6cd7fb08 +sg_dock_services=sg-2c18884b +sg_hipache=sg-7fd7fb1b +sg_mongo=sg-00d7fb64 +sg_nat=sg-3082cc54 +sg_navi=sg-e5d7fb81 +sg_rabbit=sg-92d1fdf6 +sg_rds=sg-65d2fe01 +sg_redis=sg-6ed2fe0a +sg_registry=sg-ec1f8f8b +sg_services=sg-71d2fe15 +sg_userland=sg-41d2fe25 +sg_web=sg-51d2fe35 +vpc_id=vpc-864c6be3 + +[web:vars] +web_intercom_id=wqzm3rju +web_sift_public_key=27e9da5c97 +web_aws_bucket_region=us-west-2 diff --git a/ansible/deployer.yml b/ansible/deployer.yml new file mode 100644 index 00000000..38ca6e94 --- /dev/null +++ b/ansible/deployer.yml @@ -0,0 +1,16 @@ +--- +- hosts: rabbitmq + +- hosts: deployer + vars_files: + - group_vars/alpha-deployer.yml + roles: + - role: copy_secret_file + file_names: + - "{{ env }}.pem" + - id_rsa + - vault-pass + + + - { role: build_with_dockerfile } + - { role: container_kill_start } diff --git a/ansible/detention.yml b/ansible/detention.yml new file mode 100644 index 00000000..c4f5a82f --- /dev/null +++ b/ansible/detention.yml @@ -0,0 +1,12 @@ +--- +- hosts: redis +- hosts: consul + +- hosts: detention + vars_files: + - "group_vars/alpha-detention.yml" + roles: + - { role: notify, tags: "notify" } + - { role: redis_key, tags: ["setup", "redis_key"] } + - { role: builder, tags: "build" } + - { role: container_kill_start } diff --git a/ansible/dock-init.yml b/ansible/dock-init.yml new file mode 100644 index 00000000..0e7d7040 --- /dev/null +++ b/ansible/dock-init.yml @@ -0,0 +1,16 @@ +--- +- hosts: consul + +- hosts: "{{ dock }}" + vars_files: + - group_vars/alpha-dock-init.yml + roles: + - { role: notify, tags: [notify] } + - { role: package-dock, tags: [dock, package] } + - { role: package-aws, tags: [dock, package] } + - { role: docker, tags: [docker] } + - { role: datadog, tags: [datadog] } + - { role: git_repo } + - { role: dock-init } + - { role: consul_value, tags: [consul_value] } + - { role: ulimits, tags: [ulimits] } diff --git a/ansible/dock-services.yml b/ansible/dock-services.yml new file mode 100644 index 00000000..0e2ec173 --- /dev/null +++ b/ansible/dock-services.yml @@ -0,0 +1,6 @@ +- include: khronos.yml git_branch="{{ khronos_branch }}" +- include: optimus.yml git_branch="{{ optimus_branch }}" +- include: palantiri.yml git_branch="{{ palantiri_branch }}" +- include: sauron.yml git_branch="{{ sauron_branch }}" +- include: swarm-manager.yml +# run with: `--extra-vars "@current_versions.yml"` diff --git a/ansible/dock.yml b/ansible/dock.yml new file mode 100644 index 00000000..9eea4fb6 --- /dev/null +++ b/ansible/dock.yml @@ -0,0 +1,31 @@ +--- +- hosts: registry + +- hosts: localhost + connection: local + tasks: + - fail: msg="`dock` (target dock) needs to be defined to run this role" + when: dock is not defined + + - add_host: + name={{ dock }} + groups=dock + +- include: charon.yml git_branch="v4.0.0" +- include: dock-init.yml git_branch="v10.1.0" +- include: krain.yml git_branch="v0.3.0" + +- hosts: "{{ dock }}" + tasks: + - name: remove datadog agent + become: true + apt: + name=datadog-agent + state=absent + purge=yes + force=yes + + roles: + - { role: install-ssm } + - { role: dock-images } + - { role: docks-psad } diff --git a/ansible/docker-files/base/Dockerfile b/ansible/docker-files/base/Dockerfile new file mode 100644 index 00000000..ce30b6ad --- /dev/null +++ b/ansible/docker-files/base/Dockerfile @@ -0,0 +1,21 @@ +FROM dockerfile/ansible + +# Update aptitude with new repo +RUN apt-get update + +# Install software +RUN apt-get install -y build-essential wget make git + +# Make ssh dir +RUN mkdir /root/.ssh/ +ADD ./Test-runnable.pem /root/.ssh/Test-runnable.pem +ADD ./runnablevpc.pem /root/.ssh/runnablevpc.pem + +# add .ssh config file +ADD config /root/.ssh/config + +# Copy over private key, +ADD id_rsa /root/.ssh/id_rsa + +# set correct permissions +RUN chmod 600 /root/.ssh/id_rsa diff --git a/ansible/docker-files/base/README.md b/ansible/docker-files/base/README.md new file mode 100644 index 00000000..ba5dcbab --- /dev/null +++ b/ansible/docker-files/base/README.md @@ -0,0 +1 @@ +must add pem files before building diff --git a/ansible/docker-files/base/config b/ansible/docker-files/base/config new file mode 100644 index 00000000..32e89389 --- /dev/null +++ b/ansible/docker-files/base/config @@ -0,0 +1,2 @@ +Host github.com + StrictHostKeyChecking no \ No newline at end of file diff --git a/ansible/docker-files/base/id_rsa b/ansible/docker-files/base/id_rsa new file mode 100644 index 00000000..571e51ba --- /dev/null +++ b/ansible/docker-files/base/id_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEAyvlXpOTIWmW9o8M7SsI5PRGg87Jj4qgBdS7/EHFhOAxkq5dF +2nIIrAg7h/QDgD6DpXiFa4W/a60gRjzSvyaunaf/cUdY3xKgldQFBpW+HQtKbj6r +L4gFMd6yFyyvBpr10wU5sXjOx2ZFnatEavdESyXor5c6ZdDRn2asEc38GfyQ6q2F +sZI1rHrbqlWncF8NBzhHcgcoD1trr+EP2A6/SY91xujMnjdOKAFKyAhv3eiaAWAR +I8wAwr5YmsxCCQqXidjMOFTE8vj+h5grmJIaYcNxbt/zheZMWa8JT6A2r6aKqHAp +FB9gA4Uid2yJd00aMVghXyPNxsXmENm9VURAXwIDAQABAoIBAQCkl31S4ZL2evpF +CdPS05zGwLzb4RTOf55vACUlmEtVv/cCjjC+6ynNFv/vRxhvrkHitNC8WEDZeBJr +4najOyLuBYVGvqx6eAhwdvV8WYFI3BcHPxph+NL9lbCkyj9EA/TDocEfAO/tEinx +qShZ9RkUNzFw8hkQ5zyVHhT6eyariohafy+WGovdis7y+iCrpu+QAbdMEUgzILzP +2wBTmAfraSkqMfvWqbBOpV+cOtKoHzLMDUWtNbXv2DAdNaS1By1q0ibGGIIApwAH +fiV2WdInB9mSFhOTkjSxwC36Q8ObfNz3JoZEF4Oi24ol0V6UtukUIvw1zvFFmLnM +yzNat61BAoGBAPnMIsE2ZHrG3PU+WMd8/TYvQI2yDy8afKXedGIPF8shmdJSk2Em +fAIQmfRyUh5MUc3HBaeOG1zbhU98E3PbHpKJugWhIiyY/94hV3vk4m1Ea4otAhrh +8jrut4UrsOi5ycg0L+M4pd+2nIslLGh0Ok1qwAVLlYzDbdlvNkWvmLfbAoGBANAD +kYo6AKvKQxXyyMtEY3E6Jz6sixXgXTZXuefd42/rc5fgkYIV7+eps1cZCyS8eLmZ +WJfHRUL01deS/4y7JABxiT7JJRF3kIjNikSMRDuyWwWdboZRYTP3Xxq6/Zyn+ik2 +YfdsHSsViP7m1NweD7Bl4sD4uAhD2E2AM/rMRPLNAoGBAM7bRCCaKWUezxVrlttD +3vAC+0Mbo8VO+s/kwp39f6egYKAx74fw9ZpEIymxjd03ODreyAjz/ibMRr4Gspkr +aErTMK++uu0RmjlZhPe9F+CCW0emc+rNFunEi0cFYk+morlethVE8N+sekAj/X1W +Wn2HnBIGgIZ7w7jehqP4gNAnAoGBAIp+db4kVCIMrpHEba/4PoU6hLqTwp60HRt7 +WQlpkG4Jw5KRMA/+bxY8xGzu/Z5d5bscwAF96T1lKaM3+fFG//dToF8vKQDyLEcJ +Ik/1W9CrNTMN2HKRaDhjMOzpJsXQiRw7WwqS7vCqi+W65moPkb6Od2SntE+8bB1d +iD4pFpbZAoGBAMiOuF0yG2u+DAMlQlVgTJwYEe1bt0AjFg2zkp7RdwNWN0WtjXZH +bDrW6UJXhk9RD+8pJNWGPfpMWDqro+1CSbLE//4qtPeKLNF6ho2sfJismPltrY+3 +GXTX4R/+mgksV0YYo3UJqQHNN9t36wtqsUTLIhCZiKf0sn4qiw/TmxhF +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/ansible/docker-files/base/id_rsa.pub b/ansible/docker-files/base/id_rsa.pub new file mode 100644 index 00000000..9b34dfd8 --- /dev/null +++ b/ansible/docker-files/base/id_rsa.pub @@ -0,0 +1,2 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9u533YdQnQwk97oMWgOohNnYfLOWbbu3HMM9cmIShQ8TGWpJLm4nnk0CcuwoZq3lfTEbfJcvFEGQtLfnw2UezB9JaoDlXYLGZYLeXYSSzN6xR5FSm2dENnYxyG9S9EgGhD/B12+RyaSEFQwQmerzlS04PGhkwkoFwFENC3fY2eme4fLQ9p6AWrdJ977kUWqGwAnpQNtgmIj+vUJJLwvHlfmCxMnCtru4rKyqSgmZBpaIxSwfHGQo+GgvE6e9LzF2bFHd/3895C0t2inxY7h7I6DaG5NTEKvoTPwJZXkZnQhx+e1RZtPoNJJ6iS7zqY7faXlFOQMqQnpwjQzgyd163 root@workstation.example.com +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9u533YdQnQwk97oMWgOohNnYfLOWbbu3HMM9cmIShQ8TGWpJLm4nnk0CcuwoZq3lfTEbfJcvFEGQtLfnw2UezB9JaoDlXYLGZYLeXYSSzN6xR5FSm2dENnYxyG9S9EgGhD/B12+RyaSEFQwQmerzlS04PGhkwkoFwFENC3fY2eme4fLQ9p6AWrdJ977kUWqGwAnpQNtgmIj+vUJJLwvHlfmCxMnCtru4rKyqSgmZBpaIxSwfHGQo+GgvE6e9LzF2bFHd/3895C0t2inxY7h7I6DaG5NTEKvoTPwJZXkZnQhx+e1RZtPoNJJ6iS7zqY7faXlFOQMqQnpwjQzgyd163 root@workstation.example.com diff --git a/ansible/docker-files/base/known_hosts b/ansible/docker-files/base/known_hosts new file mode 100644 index 00000000..def8373d --- /dev/null +++ b/ansible/docker-files/base/known_hosts @@ -0,0 +1,2 @@ +|1|x8MyY8C/3BFUfVhHsOmrMlMWRo8=|F/YA3KjVZBWe821D63JaVndqfIM= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== +|1|x8MyY8C/3BFUfVhHsOmrMlMWRo8=|F/YA3KjVZBWe821D63JaVndqfIM= ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ== diff --git a/ansible/docker-files/main/Dockerfile b/ansible/docker-files/main/Dockerfile new file mode 100644 index 00000000..02176171 --- /dev/null +++ b/ansible/docker-files/main/Dockerfile @@ -0,0 +1,19 @@ +FROM registry.runnable.com/runnable/ansible_base + +ENV REPO_BASE /repos + +RUN mkdir $REPO_BASE +WORKDIR /repos + +# clone repos for dockerfiles +RUN git clone git@github.com:CodeNow/api.git +RUN git clone git@github.com:CodeNow/runnable-angular.git +RUN git clone git@github.com:CodeNow/devops-scripts.git +# copy things into places +RUN mv $REPO_BASE/devops-scripts/ssh/config /root/.ssh/config +ENV ANSIBLE_HOSTS /repos/devops-scripts/ansible/hosts + +WORKDIR /repos/devops-scripts/ansible + +CMD ansible-playbook + diff --git a/ansible/docker-listener.yml b/ansible/docker-listener.yml new file mode 100644 index 00000000..b1816d4b --- /dev/null +++ b/ansible/docker-listener.yml @@ -0,0 +1,15 @@ +--- +- hosts: rabbitmq +- hosts: consul +- hosts: swarm-manager + +- hosts: docker-listener + vars_files: + - group_vars/alpha-docker-listener.yml + roles: + - role: notify + rollbar_token: "{{ docker_listener_rollbar_key }}" + tags: [ notify ] + - { role: docker_client } + - { role: builder, tags: "build" } + - { role: container_kill_start } diff --git a/ansible/drake-http.yml b/ansible/drake-http.yml new file mode 100644 index 00000000..59076bd5 --- /dev/null +++ b/ansible/drake-http.yml @@ -0,0 +1,14 @@ +--- +- hosts: redis +- hosts: rabbitmq +- hosts: consul + +- hosts: drake + vars_files: + - group_vars/alpha-drake-http.yml + roles: + - role: notify + rollbar_token: "{{ drake_http_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: container_start } diff --git a/ansible/drake-worker.yml b/ansible/drake-worker.yml new file mode 100644 index 00000000..05b2c862 --- /dev/null +++ b/ansible/drake-worker.yml @@ -0,0 +1,13 @@ +--- +- hosts: rabbitmq +- hosts: consul + +- hosts: drake + vars_files: + - group_vars/alpha-drake-worker.yml + roles: + - role: notify + rollbar_token: "{{ drake_worker_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: container_start } diff --git a/ansible/drake.yml b/ansible/drake.yml new file mode 100644 index 00000000..f1b7e442 --- /dev/null +++ b/ansible/drake.yml @@ -0,0 +1,2 @@ +- include: drake-http.yml +- include: drake-worker.yml diff --git a/ansible/eru.yml b/ansible/eru.yml new file mode 100644 index 00000000..cd81e69f --- /dev/null +++ b/ansible/eru.yml @@ -0,0 +1,20 @@ +--- +- hosts: consul +- hosts: mongodb +- hosts: redis +- hosts: rabbitmq +- hosts: big-poppa +- hosts: socket-server-proxy +- hosts: github-varnish + +- hosts: eru + vars_files: + - group_vars/alpha-eru.yml + roles: + - { role: notify, tags: [ notify ] } + - { role: builder, tags: [ build ] } + - role: container_start + - role: nginx-proxied-service + nginx_host: "{{ groups['socket-server-proxy'][0] }}" + target_ip_address: "{{ hostvars[groups['eru'][0]]['ansible_default_ipv4']['address'] }}" + templates: [ 11-eru-server.conf ] diff --git a/ansible/filter_plugins/split.py b/ansible/filter_plugins/split.py new file mode 100755 index 00000000..b4698d11 --- /dev/null +++ b/ansible/filter_plugins/split.py @@ -0,0 +1,29 @@ +from ansible import errors +import re + +def split_string(string, seperator=' '): + try: + return string.split(seperator) + except Exception, e: + raise errors.AnsibleFilterError('split plugin error: %s, string=%s' % str(e),str(string) ) + +def split_regex(string, seperator_pattern): + try: + return re.split(seperator_pattern, string) + except Exception, e: + raise errors.AnsibleFilterError('split plugin error: %s' % str(e)) + +def split_get_index(array, index): + try: + return array[index] + except Exception, e: + raise errors.AnsibleFilterError('split plugin error: %s, index=%s' % str(e),str(index)) + +class FilterModule(object): + ''' A filter to split a string into a list. ''' + def filters(self): + return { + 'split' : split_string, + 'split_regex' : split_regex, + 'split_get_index': split_get_index + } diff --git a/ansible/fluffy.yml b/ansible/fluffy.yml new file mode 100644 index 00000000..5962d48d --- /dev/null +++ b/ansible/fluffy.yml @@ -0,0 +1,14 @@ +--- +- hosts: redis +- hosts: consul + +- hosts: fluffy + vars_files: + - group_vars/alpha-fluffy.yml + roles: + - role: notify + rollbar_token: "{{ fluffy_rollbar_token }}" + tags: [ notify ] + - { role: redis_key, tags: [ setup, redis_key ] } + - { role: builder, tags: [ build ] } + - { role: container_start } diff --git a/ansible/gamma-hosts/docks.js b/ansible/gamma-hosts/docks.js new file mode 100755 index 00000000..34f14a3b --- /dev/null +++ b/ansible/gamma-hosts/docks.js @@ -0,0 +1,75 @@ +#!/usr/bin/env node + +'use strict'; + +var aws = require('aws-sdk'); +var ec2 = new aws.EC2({ + accessKeyId: 'AKIAJ3RCYU6FCULAJP2Q', + secretAccessKey: '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', + region: 'us-west-2' +}); + +var params = { + Filters: [ + // Only search for docks in the cluster security group + { + Name: 'instance.group-id', + Values: ['sg-577a0d33'] + }, + // Only fetch instances that are tagged as docks + { + Name: 'tag:role', + Values: ['dock'] + }, + // Only fetch running instances + { + Name: 'instance-state-name', + Values: ['running'] + } + ] +}; + +ec2.describeInstances(params, function (err, data) { + if (err) { + console.error("An error occurred: ", err); + process.exit(1); + } + + // Get a set of instances from the describe response + var instances = []; + data.Reservations.forEach(function (res) { + res.Instances.forEach(function (instance) { + instances.push(instance); + }); + }); + + // Map the instances to their private ip addresses + // NOTE This will work locally because of the wilcard ssh proxy in the config + var hosts = instances.map(function (instance) { + return instance.PrivateIpAddress; + }); + + var hostVars = {}; + instances.forEach(function (instance) { + for (var i = 0; i < instance.Tags.length; i++) { + if (instance.Tags[i].Key === 'org') { + hostVars[instance.PrivateIpAddress] = { + host_tags: instance.Tags[i].Value + ',build,run' + }; + } + } + }); + + // Output the resulting JSON + // NOTE http://docs.ansible.com/ansible/developing_inventory.html + console.log(JSON.stringify( + { + docks: { + hosts: hosts + }, + _meta : { + hostvars : hostVars + } + } + )); +}); diff --git a/ansible/gamma-hosts/hosts b/ansible/gamma-hosts/hosts new file mode 100644 index 00000000..7feb4a76 --- /dev/null +++ b/ansible/gamma-hosts/hosts @@ -0,0 +1,181 @@ +[bastion] +gamma-bastion + +[hipache] +gamma-hipache httpsCheckForBackend80=false prependIncomingPort=true subDomainDepth=4 + +[userland] +gamma-userland + +[mongodb] +gamma-mongo-a +gamma-mongo-b +gamma-mongo-c + +[api_group:children] +worker +api +socket-server + +[api] +gamma-api + +[big-poppa] +gamma-app-services + +[cream] +gamma-app-services + +[consul] +gamma-consul-a +gamma-consul-b +gamma-consul-c + +[deployer] +gamma-app-services + +[docker-listener] +gamma-dock-services + +[vault] +gamma-consul-a +gamma-consul-b +gamma-consul-c + +[worker] +gamma-api-worker + +[agreeable-egret] +gamma-app-services + +[eru] +gamma-app-services + +[navi] +gamma-navi + +[ingress] +gamma-ingress + +[link] +gamma-navi + +[mongo-navi] +gamma-navi + +[charon] +gamma-app-services + +[khronos] +gamma-dock-services + +[optimus] +gamma-dock-services + +[detention] +gamma-app-services + +[palantiri] +gamma-dock-services + +[rabbitmq] +gamma-rabbit + +[web] +gamma-app-services + +[marketing] +gamma-app-services + +[redis] +gamma-redis + +[redis-slave] +gamma-redis-slave + +[sauron] +gamma-dock-services + +[shiva] +gamma-app-services + +[socket-server] +gamma-api-socket + +[socket-server-proxy] +gamma-api-socket-proxy + +[registry] +gamma-registry + +[swarm-manager] +gamma-dock-services + +[metis] +gamma-app-services + +[drake] +gamma-app-services + +[pheidi] +gamma-app-services + +[github-varnish] +gamma-app-services + +[arithmancy] +gamma-app-services + +[docks] + +[dock] + +[prometheus] +gamma-dock-services + +[gamma:children] +agreeable-egret +api +arithmancy +bastion +big-poppa +charon +consul +cream +deployer +dock +docker-listener +docks +drake +eru +github-varnish +hipache +ingress +khronos +metis +mongodb +navi +optimus +pheidi +prometheus +rabbitmq +redis +redis-slave +registry +sauron +shiva +socket-server +socket-server-proxy +swarm-manager +userland +web +worker + +[local] +127.0.0.1 + +[ec2] +local + +[targets] +localhost ansible_connection=local bastion_name=gamma-bastion diff --git a/ansible/gamma-hosts/variables b/ansible/gamma-hosts/variables new file mode 100644 index 00000000..92692374 --- /dev/null +++ b/ansible/gamma-hosts/variables @@ -0,0 +1,188 @@ +[agreeable-egret:vars] +agreeable_egret_port=65520 +egret_pg_host=gamma-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:32659 +egret_pg_port=32659 +egret_pg_user=egret +egret_pg_pass=b3UKjxbGblKZtG6c +egret_pg_database=egret + +[api_group:vars] +api_aws_access_key_id=AKIAIDC4WVMTCGV7KRVQ +api_aws_secret_access_key=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 +api_github_client_id=b6072dc57062faca7fcb +api_github_client_secret=ba73a9294dc4bfaa7ed02ba187f73918506e4293 +api_github_deploy_keys_bucket=runnable.deploykeys.production-beta +api_mixpanel_app_id=c41affa4b08818443365c526cbb51606 +api_mongo_auth=api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +api_mongo_database=gamma +api_mongo_replset_name=gamma-rs0 +api_rollbar_key=a90d9c262c7c48cfabbd32fd0a1bc61c +api_workers_rollbar_key=3edfe8fe4fd640ae9fdbbe08fcb9f121 +api_s3_context_bucket=runnable.context.resources.production-beta + +[arithmancy:vars] +arithmancy_pg_database=arithmancy +arithmancy_pg_host=arithmancy-gamma.cnksgdqarobf.us-west-2.rds.amazonaws.com +arithmancy_pg_pass=arithmancy-gamma +arithmancy_pg_port=5432 +arithmancy_pg_user=arithmancy_gamma + +[big-poppa:vars] +big_poppa_pg_pass=JFmZDuVYPt9vUxk4DBjj +big_poppa_pg_host=gamma-big-poppa.cnksgdqarobf.us-west-2.rds.amazonaws.com:32659 +big_poppa_pg_port=32659 +big_poppa_pg_user=big_poppa +big_poppa_github_token=e11a1264130fb62ce045bf03118bf123f980c205 +big_poppa_http_rollbar_token=1f1eeea0b1334aaeb50fb7bc4a43241a +big_poppa_worker_rollbar_token=98cabb8440024e3a8242cf3220b802c9 +big_poppa_intercom_key=92d281df5653eb72f8f4b322cf0689be893d4965 +big_poppa_intercom_id=xs5g95pd +big_poppa_mongo_auth=api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +big_poppa_mongo_database=gamma +big_poppa_mongo_replset_name=gamma-rs0 + +[cream:vars] +cream_hello_runnable_github_token=e1b68a2cbfefcfee87d72df6d1c07bce886454b9 +cream_http_rollbar_token=baa03dbd9f814d14ab0c99863ed6a4fb +cream_worker_rollbar_token=87924b881c3143968cdb059fe41acbc3 +cream_intercom_key=2548f3affd4923602dbf7bbab66eac377b5d3aac +cream_intercom_id=xs5g95pd +cream_stripe_secret_key=sk_test_4De8Zdkfcyb29swkMmjZUMRh +cream_stripe_publishable_key=pk_test_sHr5tQaPtgwiE2cpW6dQkzi8 + +[docks:vars] +docker_config=docks +docks_rollbar_key=d1af6567ed0f464fb1d676f38fd31751 + +[dock:vars] +docks_rollbar_key=d1af6567ed0f464fb1d676f38fd31751 + +[drake:vars] +drake_port=80 +drake_http_rollbar_token=52ad749ddb8e47b2a8e15312b6b300fb +drake_worker_rollbar_token=14152b8572034943b714da27ca607698 + +[eru:vars] +eru_aws_access_key_id=AKIAIFCVEISSC5JMPWDA +eru_aws_environment=gamma +eru_aws_secret_access_key=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 +eru_github_id=8abb08f83f6d1c52bd1a +eru_github_secret=74a23ee56486d57b14f292283cb04625f600917c +eru_intercom_id=xs5g95pd +eru_intercom_key=ro-9367eb0eb11542323371dcf25b8e260891f89b36 +eru_mongodb_database=gamma +eru_mongodb_password=success-nan-europium-rerun-sheep +eru_mongodb_replset=gamma-rs0 +eru_mongodb_username=eru +eru_subdomain=eru + +[khronos:vars] +khronos_intercom_api_key=9b4ee7b4f9818b36e368985fda0befa611928876 +khronos_intercom_app_id=xs5g95pd +khronos_mongo_auth=api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +khronos_mongo_database=gamma +khronos_mongo_replset_name=gamma + +[metis:vars] +metis_rollbar_key=fdc8565a7ce64c6d9432c34be425937c + +[navi:vars] +navi_cookie_secret=c90e5fb4e7e511e597309a79f06e9478 + +[optimus:vars] +optimus_aws_access_id=AKIAJPA2ZYSVVA5V7XXQ +optimus_aws_secret_id=5V70AUxfIyHeLvlYZe0xaYevDAdgTOWOn5G7nHlt +optimus_github_deploy_keys_bucket=runnable.deploykeys.production-beta + +[palantiri:vars] +palantiri_rollbar_key=f675e9090d6f483ca4e742af2c7f2f83 + +[pheidi:vars] +pheidi_intercom_admin_id=22382 +pheidi_intercom_id=xs5g95pd +pheidi_intercom_key=ac207e26721127c7db60544b78988c75c2e20e12 +pheidi_mongo_auth=api:3f5210b8-8fe3-11e5-8e62-07b6eff19ecb +pheidi_mongo_database=gamma +pheidi_mongo_replset_name=gamma +pheidi_runnabot_tokens=ff3d259c5d988badbb692cc400998e46cdd5f1fc + +[sauron:vars] +sauron_rollbar_key=83157ae2d50d4b6398e404c0b9978d26 + +[registry:vars] +registry_s3_access_key=AKIAJK5EN7W6E62A3C3Q +registry_s3_bucket=runnableimages.beta +registry_s3_secret_key=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 +registry_s3_region=us-east-1 + +[shiva:vars] +aws_access_key_id=AKIAJ3RCYU6FCULAJP2Q +aws_secret_access_key=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 + +[swarm-manager:vars] +aws_access_key=AKIAIB5W3E6HR6Q52HEQ +aws_secret_key=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 +environment_name=gamma + +[vault:vars] +vault_hello_runnable_github_token=88ddc423c2312d02a8bbcaad76dd4c374a30e4af +vault_aws_access_key_id=AKIAJ7R4UIM45KH2WGWQ +vault_aws_secret_key=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 +vault_aws_region=us-east-1 + +[marketing:vars] +marketing_bucket=runnable-gamma.com +marketing_aws_access_key=AKIAICIWKIZEQCMDXLEA +marketing_aws_secret_key=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 + +[gamma:vars] +ansible_ssh_private_key_file=~/.ssh/gamma.pem +api_hello_runnable_github_token=88ddc423c2312d02a8bbcaad76dd4c374a30e4af +bastion_sshd_port=60709 +datadog_mongodb_pwd=sqa3WBgkCgZsFZuex0kBNahZ +datadog_mongodb_user=datadog +datadog_tags=env:gamma +domain=runnable-gamma.com +secondary_domain=runnable.com +env=gamma +github_domain=api.github.com +github_protocol=https +is_github_enterprise=false +mongo_port=27017 +node_env=production-gamma +pg_host=gamma-infrastructure-db.cnksgdqarobf.us-west-2.rds.amazonaws.com +pg_pass=e9G7zYRCxYmxG9HQ8J9x2BDB +rabbit_password=wKK7g7NWKpQXEeSzyWB7mIpxZIL8H2mDSf3Q6czR3Vk +rabbit_username=o2mdLh9N9Ke2GzhoK8xsruYPhIQFN7iEL44dQJoq7OM +registry_host=10.4.4.82 +user_content_domain=runnable.ninja +vault_auth_token=f9a39e92-99f3-66a0-a27d-a6e07717d30d +vault_token_01=2c0be2adf99931bc9ed443000e87bbcd0ef096dddc79f6add97ebe8fa7e93d2c05 +vault_token_02=3489b87c913058740537bbbd4503f3720d74f7cb0f4e0c30a9436e1e52a18d7003 +vault_token_03=ac4e1e9800cbf77283298d08172a2f0e46d0b7cbc457c47788d04768af12584a02 + +[ec2:vars] +aws_custid=437258487404 +bastion_sshd_port=60709 +region=us-west-2 +vpc_id=vpc-c53464a0 +sg_api=sg-3b0c7b5f +sg_app_services=sg-35d14052 +sg_bastion=sg-91eb81f5 +sg_consul=sg-899616ee +sg_dock=sg-577a0d33 +sg_dock_services=sg-12d14075 +sg_hipache=sg-e70c7883 +sg_mongo=sg-977a0df3 +sg_nat=sg-b595ffd1 +sg_navi=sg-45633421 +sg_rabbit=sg-44b7cb20 +sg_rds=sg-081e596c +sg_redis=sg-477b0c23 +sg_registry=sg-c8d140af +sg_userland=sg-12ce9876 +sg_web=sg-fe8bf49a + +[web:vars] +web_intercom_id=xs5g95pd +web_sift_public_key=eea9746dff diff --git a/ansible/getVersions.sh b/ansible/getVersions.sh new file mode 100755 index 00000000..d720c07c --- /dev/null +++ b/ansible/getVersions.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +consul=localhost:8500/v1 + +if [[ $1 != "" ]]; then + consul=$1:8500/v1 +fi + +kv=$consul/kv + +echo NODE_ENV: $(curl -s $kv/node/env | jq -r '.[0].Value' | base64 -d) + +echo image-builder: $(curl -s $kv/image-builder/version | jq -r '.[0].Value' | base64 -d) +echo docker-listener: $(curl -s $kv/docker-listener/version | jq -r '.[0].Value' | base64 -d) +echo krain: $(curl -s $kv/krain/version | jq -r '.[0].Value' | base64 -d) +echo sauron: $(curl -s $kv/sauron/version | jq -r '.[0].Value' | base64 -d) +echo charon: $(curl -s $kv/charon/version | jq -r '.[0].Value' | base64 -d) diff --git a/ansible/github-varnish.yml b/ansible/github-varnish.yml new file mode 100644 index 00000000..177f39c9 --- /dev/null +++ b/ansible/github-varnish.yml @@ -0,0 +1,8 @@ +--- + +- hosts: github-varnish + vars_files: + - group_vars/alpha-github-varnish.yml + roles: + - { role: build_with_dockerfile, tags: [ build ] } + - { role: container_kill_start } diff --git a/ansible/group_vars/all.yml b/ansible/group_vars/all.yml new file mode 100644 index 00000000..e91ff748 --- /dev/null +++ b/ansible/group_vars/all.yml @@ -0,0 +1,212 @@ +--- +# ops +ops_slack_channel_url: https://hooks.slack.com/services/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 +pager_duty_key: testkey + +# registry settings +registry_env: prod +registry_port: 80 + +# for docker stop old container +stop_time: 5 + +# For docker built containers +do_not_push: true + +# default for container args (which are optional) +container_run_args: "" + +# REPO_BASE for getting dockerfiles +repo_base: "{{ lookup('env','REPO_BASE') }}" + +dockerfile: basic_node/Dockerfile +docker_network: 172.17.0.0/16 +base_dockerfile: node_base + +docker_config: runnable + +# slack rooms to send notifications +slack_token: T029DEC10/B1RSX8LNS/qLLSYEEqkGddohOdE44eDf3j +slack_channels: [ '#ops' ] + +loggly_token: f673760d-e0b3-4a93-a15e-2862ea074f91 +loggly_password: TebejAcutHeH_Ch0tR9ru4anaT6CRu*3 +loggly_username: ops + +## +## cores and logs +## + +app_log_dir: /docker/app-logs +core_file_dir: /docker/app-cores + +## +## shared application configs +## + +# agreeable-egret +agreeable_egret_hostname: egret.{{ domain }} + +#angular +angular_url: https://app.{{ domain }} +mixpanel_proxy_url: https://mixpanel.{{ domain }} + +# api +api_github_hook_secret: 3V3RYTHINGisAW3S0ME! +api_hostname: api.{{ domain }} +api_port: 80 +api_socket_server_hostname: apisock.{{ domain }} +api_socket_server_rollbar_key: cad27f265e8e473c9b293615e7ed0f3a +api_url: https://{{ api_hostname }} + +# arithmancy +arithmancy_rollbar_key: 1fe145fdeb1a4526b48f5dd82b0d6eb5 + +# astral +pg_database: astral + +# big-poppa +big_poppa_pg_database: big_poppa +big_poppa_port: 7788 +big_poppa_host_address: "{{ hostvars[groups['big-poppa'][0]]['ansible_default_ipv4']['address'] }}" + +# cadvisor +cadvisor_port: 29007 + +# charon +charon_port: 53 + +# consul +consul_host_address: "{{ hostvars[groups['consul'][0]]['ansible_default_ipv4']['address'] }}" +consul_api_port: 8500 +consul_https_port: 8501 + +# cream +cream_port: 8899 +cream_host_address: "{{ hostvars[groups['cream'][0]]['ansible_default_ipv4']['address'] }}" + +# datadog +datadog_api_key: d3ab5d85bca924f9d4e33d307beacb4a +datadog_host_address: "{{ ansible_default_ipv4.address }}" +datadog_port: 8125 + +# detention +detention_host_address: "{{ hostvars[groups['detention'][0]]['ansible_default_ipv4']['address'] }}" +detention_port: 9123 +detention_hostname: detention.{{ domain }} + +# docker log driver +log_driver: syslog +log_facility: local7 + +# docker +docker_ca_path: /etc/ssl/docker/ca.pem +docker_cert_path: /etc/ssl/docker/cert.pem +docker_key_path: /etc/ssl/docker/key.pem +docker_port: 4242 + +# docker-listener +docker_listener_rollbar_key: 7b5924eccfff415d9fc00c051811e9d7 + +# drake +drake_hostname: drake.{{ domain }} + +# ec2 +aws_access_key: "AKIAIWRXWZ4P3MIMY3LA" +aws_secret_key: "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" + +# eru + +# fluffy +fluffy_port: 80 +fluffy_hostname: "fluffy.{{ domain }}" +fluffy_rollbar_token: 6eb108bab1f54687b9b023e5b485967a + +# github-varnish +github_varnish_host: "{{ hostvars[groups['github-varnish'][0]]['ansible_default_ipv4']['address'] }}" +github_varnish_port: 8765 + +# khronos +rollbar_khronos_token: 5a140828cce14410812d34a3ef936f25 + +# krain +krain_port: 3100 + +# marketing +marketing_url: https://{{ domain }} + +# metis +metis_port: 3000 + +# mongo +mongo_hosts: "{% for host in groups['mongodb'] %}{{ hostvars[host]['ansible_default_ipv4']['address'] }}:{{ mongo_port }}{% if not loop.last %},{% endif %}{% endfor %}" + +# navi +navi_host_address: "{{ hostvars[groups['navi'][0]]['ansible_default_ipv4']['address'] }}" +navi_http_port: 3567 +navi_rollbar_token: 719269e87b9b42848472542a8b2059ae + +# node_exporter +node_exporter_port: 29006 + +# optimus +optimus_port: 80 +optimus_hostname: optimus.{{ domain }} +optimus_rollbar_token: a49f9cce09ee46f09df3f603178fba75 + +# palantiri +palantiri_rollbar_token: ed971bbca9ea44a29268afc606ab8c7d + +# pheidi +pheidi_email_github_token: 115b4d854e34e8a5ba99ab73eefe4bf7a8944d6d +pheidi_rollbar_token: 6fc422ac645441bea7f6f14853eb01ab + +# prometheus +prometheus_port: 9090 +prometheus_aws_access_key: AKIAIFG37NSI6O2QMRRQ +prometheus_aws_secret_key: 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 + +# prometheus_alart +prometheus_alert_port: 9093 +prometheus_alert_url: http://{{ hostvars[groups['prometheus'][0]]['ansible_default_ipv4']['address'] }}:{{ prometheus_alert_port }} + +# rabbit +rabbit_host_address: "{{ hostvars[groups['rabbitmq'][0]]['ansible_default_ipv4']['address'] }}" +rabbit_port: 54321 + +# redis +redis_host_address: "{{ hostvars[groups['redis'][0]]['ansible_default_ipv4']['address'] }}" +redis_port: 6379 +redis_tls_port: 6380 + +registry_address: "{{ hostvars[groups['registry'][0]]['ansible_default_ipv4']['address'] }}" + +# sauron +sauron_rollbar_token: 83157ae2d50d4b6398e404c0b9978d26 + +# secrets +secret_root: /opt/runnable/secrets + +# shiva +pg_user: shiva +shiva_port: 3000 +shiva_rollbar_token: 0526a90faec845d796e1ef5361a00526 + +# swarm +swarm_master_port: 2375 +swarm_host_address: "{{ hostvars[groups['swarm-manager'][0]]['ansible_default_ipv4']['address'] }}" +swarm_container_name: swarm + +# navi/link +navi_mongo_database: navi +navi_mongo_host_address: "{{ hostvars[groups['mongo-navi'][0]]['ansible_default_ipv4']['address'] }}" +navi_mongo_port: 27017 + +npm_token: c76363e9-78e0-4667-82ac-e2ac01efcfe2 + +# local-vault +vault_local_port: 31836 +vault_addr: http://127.0.0.1:{{ vault_local_port }} + +# userland +userland_host_address: "{{ hostvars[groups['userland'][0]]['ansible_default_ipv4']['address'] }}" diff --git a/ansible/group_vars/alpha-agreeable-egret.yml b/ansible/group_vars/alpha-agreeable-egret.yml new file mode 100644 index 00000000..5cf08169 --- /dev/null +++ b/ansible/group_vars/alpha-agreeable-egret.yml @@ -0,0 +1,27 @@ +name: "agreeable-egret" + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +hosted_ports: ["{{ agreeable_egret_port }}"] +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.3.1" +npm_version: "3.7.5" + +# Exposes egret +redis_key: "frontend:{{ agreeable_egret_hostname }}" +is_redis_update_required: 'yes' + +# container settings +container_envs: > + -e HELLO_RUNNABLE_GITHUB_TOKEN={{ api_hello_runnable_github_token }} + -e NODE_ENV={{ node_env }} + -e RUNNABLE_API_URL={{ api_url }} + -e PORT={{ hosted_ports[0] }} + -e RUNNABLE_USER_CONTENT_DOMAIN={{ user_content_domain }} + -e POSTGRES_CONNECT_STRING=postgres://{{ egret_pg_user }}:{{ egret_pg_pass }}@{{ egret_pg_host }}/{{ egret_pg_database }} + +container_run_opts: > + -h {{ name }} + -d + -p {{ hosted_ports[0] }}:{{ hosted_ports[0] }} + {{ container_envs }} diff --git a/ansible/group_vars/alpha-api-base.yml b/ansible/group_vars/alpha-api-base.yml new file mode 100644 index 00000000..cc110628 --- /dev/null +++ b/ansible/group_vars/alpha-api-base.yml @@ -0,0 +1,64 @@ +container_tag: "{{ git_branch }}" + +node_version: 4.2.2 +npm_version: 4.0.3 + +repo: git@github.com:CodeNow/api.git +restart_policy: always +has_shrinkwrap: true + +# for sendGrid +sendgrid_key: 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 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] +dockerfile_pre_install_commands: [ + 'echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc' +] + +# shared ENV's between api services +api_base_container_envs: >- + -e AWS_ACCESS_KEY_ID={{ api_aws_access_key_id }} + -e AWS_SECRET_ACCESS_KEY={{ api_aws_secret_access_key }} + -e BIG_POPPA_HOST=http://{{ big_poppa_host_address }}:{{ big_poppa_port }} + -e COOKIE_DOMAIN={{ domain }} + -e CREAM_HOST=http://{{ cream_host_address }}:{{ cream_port }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e DOCKER_BUILD_LINE_TIMEOUT_MS=3600000 + -e DOMAIN={{ domain }} + -e GITHUB_CALLBACK_URL={{ api_url }}/auth/github/callback + -e GITHUB_CLIENT_ID={{ api_github_client_id }} + -e GITHUB_CLIENT_SECRET={{ api_github_client_secret }} + -e GITHUB_DEPLOY_KEYS_BUCKET={{ api_github_deploy_keys_bucket }} + -e GITHUB_HOOK_URL={{ api_url }}/actions/github + -e GITHUB_WEBHOOK_URL=https://{{ drake_hostname }}/github + -e GITHUB_VARNISH_HOST={{ github_varnish_host }} + -e GITHUB_VARNISH_PORT={{ github_varnish_port }} + -e GITHUB_PROTOCOL=http + -e HELLO_RUNNABLE_GITHUB_TOKEN={{ api_hello_runnable_github_token }} + -e KRAIN_PORT={{ krain_port }} + -e MIXPANEL_APP_ID={{ api_mixpanel_app_id }} + -e MONGO_REPLSET_NAME={{ api_mongo_replset_name }} + -e MONGO=mongodb://{{ api_mongo_auth }}@{{ mongo_hosts }}/{{ api_mongo_database }} + -e NAVI_HOST=http://{{ navi_host_address }}:{{ navi_http_port }} + {% if api_new_relic_app_name is defined %} -e NEW_RELIC_APP_NAME={{ api_new_relic_app_name }} {% endif %} + {% if api_new_relic_app_name is defined %} -e NEW_RELIC_LICENSE_KEY={{ new_relic_license_key }} {% endif %} + {% if api_new_relic_app_name is defined %} -e NEW_RELIC_LOG_LEVEL=fatal {% endif %} + {% if api_new_relic_app_name is defined %} -e NEW_RELIC_NO_CONFIG_FILE=true {% endif %} + -e NODE_ENV={{ node_env }} + -e NUM_WORKERS=1 + -e OPTIMUS_HOST=http://{{ optimus_hostname }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e REDIS_IPADDRESS={{ redis_host_address }} + -e REDIS_PORT={{ redis_port }} + -e S3_CONTEXT_RESOURCE_BUCKET={{ api_s3_context_bucket }} + -e SENDGRID_KEY={{ sendgrid_key }} + -e SWARM_HOST=http://{{ swarm_host_address }}:{{ swarm_master_port }} + -e USER_CONTENT_DOMAIN={{ user_content_domain }} + {% if api_intercom_app_id is defined %} -e INTERCOM_APP_ID={{ api_intercom_app_id }} {% endif %} + {% if api_intercom_api_key is defined %} -e INTERCOM_API_KEY={{ api_intercom_api_key }} {% endif %} diff --git a/ansible/group_vars/alpha-api.yml b/ansible/group_vars/alpha-api.yml new file mode 100644 index 00000000..69e3a5c5 --- /dev/null +++ b/ansible/group_vars/alpha-api.yml @@ -0,0 +1,24 @@ +name: api + +container_image: registry.runnable.com/runnable/{{ name }} + +hosted_ports: [ "{{ api_port }}" ] + +# for redis +redis_key: frontend:{{ api_hostname }} +is_redis_update_required: yes + +memory_hard_limit: 1g + +# for container settings +container_envs: > + {{ api_base_container_envs }} + -e ROLLBAR_KEY={{ api_rollbar_key }} + +container_run_opts: > + -h {{ name }} + -d + -P + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + -v /opt/ssl/mongodb-client:/opt/ssl/mongodb-client:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-arithmancy.yml b/ansible/group_vars/alpha-arithmancy.yml new file mode 100644 index 00000000..e8de6183 --- /dev/null +++ b/ansible/group_vars/alpha-arithmancy.yml @@ -0,0 +1,25 @@ +name: "arithmancy" + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +# container settings +container_envs: > + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e NODE_ENV={{ node_env }} + -e POSTGRES_CONNECT_STRING=postgres://{{ arithmancy_pg_user }}:{{ arithmancy_pg_pass }}@{{ arithmancy_pg_host }}/{{ arithmancy_pg_database }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ arithmancy_rollbar_key }} + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-big-poppa-http.yml b/ansible/group_vars/alpha-big-poppa-http.yml new file mode 100644 index 00000000..d3e06796 --- /dev/null +++ b/ansible/group_vars/alpha-big-poppa-http.yml @@ -0,0 +1,58 @@ +name: big-poppa-http + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +hosted_ports: ["{{ big_poppa_port }}"] +repo: "git@github.com:CodeNow/big-poppa.git" +node_version: 4.4.7 +npm_version: 2 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "apt-get update", + "apt-get install postgresql-client=9.3+154ubuntu1 -y", + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +# for container settings +container_envs: > + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e GITHUB_TOKEN={{ big_poppa_github_token }} + -e PORT={{ hosted_ports[0] }} + -e INTERCOM_API_KEY={{ big_poppa_intercom_key | default('undefined') }} + -e INTERCOM_APP_ID={{ big_poppa_intercom_id | default('undefined') }} + -e NODE_ENV={{ node_env }} + -e PGUSER={{ big_poppa_pg_user }} + -e PGPASSWORD={{ big_poppa_pg_pass }} + -e PGHOST={{ big_poppa_pg_host }} + -e PGPORT={{ big_poppa_pg_port }} + -e PGDATABASE={{ big_poppa_pg_database }} + -e POSTGRES_CONNECT_STRING=postgres://{{ big_poppa_pg_user }}:{{ big_poppa_pg_pass }}@{{ big_poppa_pg_host }}/{{ big_poppa_pg_database }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ big_poppa_http_rollbar_token }} + -e MONGO=mongodb://{{ big_poppa_mongo_auth }}@{{ mongo_hosts }}/{{ big_poppa_mongo_database }} + -e MONGO_REPLSET_NAME={{ big_poppa_mongo_replset_name }} + -e GITHUB_VARNISH_HOST={{ github_varnish_host }} + -e GITHUB_VARNISH_PORT={{ github_varnish_port }} + -e GITHUB_PROTOCOL=http + {% if big_poppa_new_relic_app_name is defined %} -e NEW_RELIC_APP_NAME={{ big_poppa_new_relic_app_name }} {% endif %} + {% if big_poppa_new_relic_app_name is defined %} -e NEW_RELIC_LICENSE_KEY={{ new_relic_license_key }} {% endif %} + {% if big_poppa_new_relic_app_name is defined %} -e NEW_RELIC_LOG_LEVEL=fatal {% endif %} + {% if big_poppa_new_relic_app_name is defined %} -e NEW_RELIC_NO_CONFIG_FILE=true {% endif %} + -e STRIPE_API_KEY={{ cream_stripe_secret_key }} + -e STRIPE_PUBLISHABLE_KEY={{ cream_stripe_publishable_key }} + +container_run_opts: > + -h {{ name }} + -d + -p {{ hosted_ports[0] }}:{{ hosted_ports[0] }} + {{container_envs}} + +npm_start_command: run start-http diff --git a/ansible/group_vars/alpha-big-poppa-worker.yml b/ansible/group_vars/alpha-big-poppa-worker.yml new file mode 100644 index 00000000..653834e6 --- /dev/null +++ b/ansible/group_vars/alpha-big-poppa-worker.yml @@ -0,0 +1,46 @@ +name: big-poppa-worker + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/big-poppa.git" +node_version: 4.4.7 +npm_version: 2 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "apt-get update", + "apt-get install postgresql-client=9.3+154ubuntu1 -y", + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +# for container settings +container_envs: > + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e GITHUB_TOKEN={{ big_poppa_github_token }} + -e NODE_ENV={{ node_env }} + -e INTERCOM_API_KEY={{ big_poppa_intercom_key | default('undefined') }} + -e INTERCOM_APP_ID={{ big_poppa_intercom_id | default('undefined') }} + -e POSTGRES_CONNECT_STRING=postgres://{{ big_poppa_pg_user }}:{{ big_poppa_pg_pass }}@{{ big_poppa_pg_host }}/{{ big_poppa_pg_database }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ big_poppa_worker_rollbar_token }} + -e MONGO=mongodb://{{ big_poppa_mongo_auth }}@{{ mongo_hosts }}/{{ big_poppa_mongo_database }} + -e MONGO_REPLSET_NAME={{ big_poppa_mongo_replset_name }} + -e GITHUB_VARNISH_HOST={{ github_varnish_host }} + -e GITHUB_VARNISH_PORT={{ github_varnish_port }} + -e GITHUB_PROTOCOL=http + -e STRIPE_API_KEY={{ cream_stripe_secret_key }} + -e STRIPE_PUBLISHABLE_KEY={{ cream_stripe_publishable_key }} + +container_run_opts: > + -h {{ name }} + -d + {{container_envs}} + +npm_start_command: run start-worker diff --git a/ansible/group_vars/alpha-cadvisor.yml b/ansible/group_vars/alpha-cadvisor.yml new file mode 100644 index 00000000..00ad3649 --- /dev/null +++ b/ansible/group_vars/alpha-cadvisor.yml @@ -0,0 +1,15 @@ +name: cadvisor + +container_image: google/{{ name }} +container_tag: "v0.24.1" + +memory_hard_limit: 100mb + +container_run_opts: > + --name={{ name }} + --volume=/:/rootfs:ro + --volume=/var/run:/var/run:rw + --volume=/sys:/sys:ro + --volume=/var/lib/docker/:/var/lib/docker:ro + --publish={{ cadvisor_port }}:8080 + --memory-reservation=50mb diff --git a/ansible/group_vars/alpha-charon.yml b/ansible/group_vars/alpha-charon.yml new file mode 100644 index 00000000..32f68fec --- /dev/null +++ b/ansible/group_vars/alpha-charon.yml @@ -0,0 +1,21 @@ +name: charon +app_name: "{{ name }}" +app_repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: ["{{ charon_port }}"] + +enviroment_vars: + API_URL: "{{ api_url }}" + API_TOKEN: 51c61b779f3de616a9639cfc44a22c79fbd8e328 + DATADOG_HOST: "{{ datadog_host_address }}" + DATADOG_PORT: "{{ datadog_port }}" + DOMAIN_FILTER: "{{ user_content_domain }}" + PORT: "{{ hosted_ports[0] }}" + REDIS_HOST: "{{ redis_host_address }}" + REDIS_PORT: "{{ redis_port }}" + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" + - key: api/url + value: "{{ api_url }}" diff --git a/ansible/group_vars/alpha-consul-template-updater.yml b/ansible/group_vars/alpha-consul-template-updater.yml new file mode 100644 index 00000000..5888f8af --- /dev/null +++ b/ansible/group_vars/alpha-consul-template-updater.yml @@ -0,0 +1,17 @@ +name: consul-template-updater + +# container_kill_start settings +container_image: avthart/consul-template +container_tag: latest + +container_run_opts: > + -d + --name {{ name }} + -v /var/run/docker.sock:/tmp/docker.sock + -v {{ template_path }}:/tmp/template + --volumes-from {{ target_container_name }} + +container_run_args: > + -consul={{ consul_host_address }}:{{ consul_api_port }} + -wait=1s + -template="/tmp/template:{{ target_updater_file_path }}/{{ proxy_service_name }}.conf:{{ template_command }}" diff --git a/ansible/group_vars/alpha-consul.yml b/ansible/group_vars/alpha-consul.yml new file mode 100644 index 00000000..6f89f542 --- /dev/null +++ b/ansible/group_vars/alpha-consul.yml @@ -0,0 +1,42 @@ +name: consul + +# for database role +db_path: /etc/consul.d + +# for container_kill_start +pause_length_minutes: 3 + +container_image: runnable/consul +container_tag: v0.6.3 + +container_run_opts: > + -d + -h {{ inventory_hostname }} + -v /consul:/data + -v /etc/consul.d:/etc/consul.d:ro + -v /opt/runnable/consul/consul.json:/consul.json:ro + -v /opt/consul/server:/opt/consul/server:ro + -p {{ ansible_default_ipv4.address }}:8300:8300 + -p {{ ansible_default_ipv4.address }}:8301:8301 + -p {{ ansible_default_ipv4.address }}:8301:8301/udp + -p {{ ansible_default_ipv4.address }}:8302:8302 + -p {{ ansible_default_ipv4.address }}:8302:8302/udp + -p {{ ansible_default_ipv4.address }}:8400:8400 + -p {{ ansible_default_ipv4.address }}:{{ consul_api_port }}:8500 + -p {{ ansible_default_ipv4.address }}:{{ consul_https_port }}:{{ consul_https_port }} + --restart=always + +container_run_args: > + consul agent + --config-file /consul.json + --config-dir /etc/consul.d + +# some seed values +# pulled 2015/16/11 - Bryan +consul_seed: + - key: node/env + value: "{{ node_env }}" + - key: api/hostname + value: "{{ api_hostname }}" + - key: api/url + value: "{{ api_url }}" diff --git a/ansible/group_vars/alpha-cream-http.yml b/ansible/group_vars/alpha-cream-http.yml new file mode 100644 index 00000000..9528bafd --- /dev/null +++ b/ansible/group_vars/alpha-cream-http.yml @@ -0,0 +1,45 @@ +name: cream-http + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +hosted_ports: ["{{ cream_port }}"] +repo: "git@github.com:CodeNow/cream.git" +node_version: 4.5.0 +npm_version: 2 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +# for container settings +container_envs: > + -e BIG_POPPA_HOST=http://{{ big_poppa_host_address }}:{{ big_poppa_port }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e GIT_BRANCH={{ git_branch }} + -e HELLO_RUNNABLE_GITHUB_TOKEN={{ cream_hello_runnable_github_token }} + -e PORT={{ hosted_ports[0] }} + {% if cream_intercom_key is defined %} -e INTERCOM_API_KEY={{ cream_intercom_key }} {% endif %} + {% if cream_intercom_id is defined %} -e INTERCOM_APP_ID={{ cream_intercom_id }} {% endif %} + -e NODE_ENV={{ node_env }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e RUNNABLE_API_URL={{ api_url }} + -e RUNNABLE_USER_CONTENT_DOMAIN={{ user_content_domain }} + -e ROLLBAR_KEY={{ cream_http_rollbar_token }} + -e STRIPE_API_KEY={{ cream_stripe_secret_key }} + -e STRIPE_PUBLISHABLE_KEY={{ cream_stripe_publishable_key }} + +container_run_opts: > + -h {{ name }} + -d + -p {{ hosted_ports[0] }}:{{ hosted_ports[0] }} + {{container_envs}} + +npm_start_command: run start-http diff --git a/ansible/group_vars/alpha-cream-worker.yml b/ansible/group_vars/alpha-cream-worker.yml new file mode 100644 index 00000000..f98956ff --- /dev/null +++ b/ansible/group_vars/alpha-cream-worker.yml @@ -0,0 +1,42 @@ +name: cream-worker + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/cream.git" +node_version: 4.5.0 +npm_version: 2 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +# for container settings +container_envs: > + -e BIG_POPPA_HOST=http://{{ big_poppa_host_address }}:{{ big_poppa_port }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e GIT_BRANCH={{ git_branch }} + -e HELLO_RUNNABLE_GITHUB_TOKEN={{ cream_hello_runnable_github_token }} + -e NODE_ENV={{ node_env }} + {% if cream_intercom_key is defined %} -e INTERCOM_API_KEY={{ cream_intercom_key }} {% endif %} + {% if cream_intercom_id is defined %} -e INTERCOM_APP_ID={{ cream_intercom_id }} {% endif %} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e RUNNABLE_API_URL={{ api_url }} + -e RUNNABLE_USER_CONTENT_DOMAIN={{ user_content_domain }} + -e ROLLBAR_KEY={{ cream_worker_rollbar_token }} + -e STRIPE_API_KEY={{ cream_stripe_secret_key }} + -e STRIPE_PUBLISHABLE_KEY={{ cream_stripe_publishable_key }} + +container_run_opts: > + -h {{ name }} + -d + {{container_envs}} + +npm_start_command: run start-worker diff --git a/ansible/group_vars/alpha-deployer.yml b/ansible/group_vars/alpha-deployer.yml new file mode 100644 index 00000000..ad36be33 --- /dev/null +++ b/ansible/group_vars/alpha-deployer.yml @@ -0,0 +1,20 @@ +--- +name: deployer + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/devops-scripts.git + +container_envs: >- + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + +container_run_opts: > + -h {{ name }} + -d + -v {{ secret_root }}/{{ env }}.pem:/root/.ssh/{{ env }}.pem + -v {{ secret_root }}/vault-pass:/root/.ssh/vault-pass + -v {{ secret_root }}/id_rsa:/root/.ssh/id_rsa + {{ container_envs }} diff --git a/ansible/group_vars/alpha-detention.yml b/ansible/group_vars/alpha-detention.yml new file mode 100644 index 00000000..cb1cbf12 --- /dev/null +++ b/ansible/group_vars/alpha-detention.yml @@ -0,0 +1,29 @@ +name: "detention" + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +hosted_ports: ["{{ detention_port }}"] +node_version: "0.12.0" +npm_version: "2.1.18" + +# for redis +redis_key: "frontend:{{ detention_hostname }}" +is_redis_update_required: 'yes' + +dockerfile_post_install_commands: [ + "npm run grunt" +] + +container_envs: > + -e ABSOLUTE_URL={{ detention_hostname }} + -e API_URL={{ api_url }} + -e HELLO_RUNNABLE_GITHUB_TOKEN={{ api_hello_runnable_github_token }} + -e NODE_ENV={{ node_env }} + -e PORT={{ hosted_ports[0] }} + +container_run_opts: > + -h {{ name }} + -d + -p {{ hosted_ports[0] }}:{{ hosted_ports[0] }} + {{ container_envs }} diff --git a/ansible/group_vars/alpha-dock-init.yml b/ansible/group_vars/alpha-dock-init.yml new file mode 100644 index 00000000..35153872 --- /dev/null +++ b/ansible/group_vars/alpha-dock-init.yml @@ -0,0 +1,11 @@ +name: dock-init +app_name: "{{ name }}" +app_repo: git@github.com:CodeNow/{{ name }}.git + +# for docker role +docker_config: dock + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" diff --git a/ansible/group_vars/alpha-docker-listener.yml b/ansible/group_vars/alpha-docker-listener.yml new file mode 100644 index 00000000..758c06e6 --- /dev/null +++ b/ansible/group_vars/alpha-docker-listener.yml @@ -0,0 +1,29 @@ +name: docker-listener + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +restart_policy: "always" + +container_envs: > + -e CONSUL_HOST={{ consul_host_address }}:{{ consul_api_port }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e DOCKER_CERT_PATH=/etc/ssl/docker + -e LOGGLY_TOKEN={{ loggly_token }} + -e NODE_ENV={{ node_env }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ docker_listener_rollbar_key }} + -e SWARM_HOST={{ swarm_host_address }}:{{ swarm_master_port }} + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-drake-http.yml b/ansible/group_vars/alpha-drake-http.yml new file mode 100644 index 00000000..5f0d9429 --- /dev/null +++ b/ansible/group_vars/alpha-drake-http.yml @@ -0,0 +1,34 @@ +name: drake-http + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +hosted_ports: ["{{ drake_port }}"] +repo: "git@github.com:CodeNow/drake.git" +node_version: "4.4.7" +npm_version: 2 + +# Exposes drake (heh) +redis_key: "frontend:{{ drake_hostname }}" +is_redis_update_required: 'yes' + +# for container settings +container_envs: > + -e API_URL={{ api_url }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e DATADOG_SECRET=I_solemnly_swear_that_I_am_up_to_no_good + -e NODE_ENV={{ node_env }} + -e PORT={{ hosted_ports[0] }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ drake_http_rollbar_token }} + +container_run_opts: > + -h {{ name }} + -d + -P + {{container_envs}} + +npm_start_command: run start-http diff --git a/ansible/group_vars/alpha-drake-worker.yml b/ansible/group_vars/alpha-drake-worker.yml new file mode 100644 index 00000000..24ad29ae --- /dev/null +++ b/ansible/group_vars/alpha-drake-worker.yml @@ -0,0 +1,27 @@ +name: drake-worker + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/drake.git" +node_version: "4.4.7" +npm_version: 2 + +# for container settings +container_envs: > + -e API_URL={{ api_url }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e LOG_LEVEL=info + -e NODE_ENV={{ node_env }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ drake_worker_rollbar_token }} + +container_run_opts: > + -h {{ name }} + -d + {{container_envs}} + +npm_start_command: run start-worker diff --git a/ansible/group_vars/alpha-eru.yml b/ansible/group_vars/alpha-eru.yml new file mode 100644 index 00000000..1d427309 --- /dev/null +++ b/ansible/group_vars/alpha-eru.yml @@ -0,0 +1,64 @@ +--- +name: eru + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: [ "5501", "5502" ] +node_version: 4.4.4 +npm_version: 3.8 +has_shrinkwrap: true + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}", + "BIG_POPPA_HOST {{ big_poppa_host_address }}:{{ big_poppa_port }}", + "RABBITMQ_HOSTNAME {{ rabbit_host_address }}", + "RABBITMQ_PASSWORD {{ rabbit_password }}", + "RABBITMQ_PORT {{ rabbit_port }}" +] +dockerfile_pre_install_commands: [ + 'echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc' +] + +dockerfile_post_install_commands: +- apt-get update && apt-get install -y supervisor +- npm run build + +container_envs: >- + -e AWS_ACCESS_KEY={{ eru_aws_access_key_id }} + -e AWS_ENVIRONMENT={{ eru_aws_environment }} + -e AWS_SECRET_KEY={{ eru_aws_secret_access_key }} + -e BIG_POPPA_HOST={{ big_poppa_host_address }}:{{ big_poppa_port }} + -e CONSUL_HOST={{ consul_host_address }}:{{ consul_api_port }} + -e DOMAIN={{ eru_subdomain }}.{{ domain }} + -e GITHUB_CLIENT_ID={{ eru_github_id }} + -e GITHUB_CLIENT_SECRET={{ eru_github_secret }} + -e GITHUB_VARNISH_HOST={{ github_varnish_host }} + -e GITHUB_VARNISH_PORT={{ github_varnish_port }} + -e INTERCOM_API_KEY={{ eru_intercom_key }} + -e INTERCOM_APP_ID={{ eru_intercom_id }} + -e LOG_ENVIRONMENT={{ node_env }} + -e LOG_LEVEL=trace + -e MONGODB_DATABASE={{ eru_mongodb_database }} + -e MONGODB_HOSTS={{ mongo_hosts }} + -e MONGODB_PASSWORD={{ eru_mongodb_password }} + -e MONGODB_REPLSET={{ eru_mongodb_replset }} + -e MONGODB_USERNAME={{ eru_mongodb_username }} + -e NODE_ENV=production + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e REDIS_HOSTNAME={{ redis_host_address }} + -e REDIS_PORT={{ redis_port }} + -e RUNNABLE_DOMAIN={{ domain }} + -e USER_CONTENT_DOMAIN={{ user_content_domain }} + +container_run_opts: > + -h {{ name }} + -d + -P + -v /var/log:/var/log:rw + {{ container_envs }} + +container_run_args: bash -c "supervisord --configuration supervisord.conf && sleep 10 && tail -n 100 -qf /tmp/*std*.log /tmp/supervisord.log" diff --git a/ansible/group_vars/alpha-fluffy.yml b/ansible/group_vars/alpha-fluffy.yml new file mode 100644 index 00000000..8d1efa9e --- /dev/null +++ b/ansible/group_vars/alpha-fluffy.yml @@ -0,0 +1,30 @@ +name: "fluffy" + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +hosted_ports: ["{{ fluffy_port }}"] +node_version: "4.2.2" +npm_version: "2.1.18" + +# for redis +redis_key: "frontend:{{ fluffy_hostname }}" +is_redis_update_required: 'yes' + +dockerfile_post_install_commands: [ + "git clone -b master --single-branch git@github.com:CodeNow/devops-scripts /opt/devops-scripts", + "git clone -b bin-bash --single-branch git@github.com:CodeNow/docks-cli /opt/docks-cli", + "cd /opt/docks-cli && npm install && npm link ." +] + +# container settings +container_envs: > + -e PORT={{ hosted_ports[0] }} + -e NODE_ENV={{ node_env }} + -e DEVOPS_SCRIPTS_PATH="opt/devops-scripts" + +container_run_opts: > + -h {{ name }} + -d + -P + {{container_envs}} diff --git a/ansible/group_vars/alpha-github-varnish.yml b/ansible/group_vars/alpha-github-varnish.yml new file mode 100644 index 00000000..1ca67f82 --- /dev/null +++ b/ansible/group_vars/alpha-github-varnish.yml @@ -0,0 +1,19 @@ +--- +name: github-varnish + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/{{ name }}.git + +container_envs: >- + -e VARNISH_PORT=80 + -e VARNISH_MALLOC=100M + -e GITHUB_DOMAIN={{ github_domain }} + -e IS_GITHUB_ENTERPRISE={{ is_github_enterprise }} + -e GITHUB_PROTOCOL={{ github_protocol }} + +container_run_opts: > + -h {{ name }} + -d + -p {{ github_varnish_port }}:80 + {{ container_envs }} diff --git a/ansible/group_vars/alpha-hipache.yml b/ansible/group_vars/alpha-hipache.yml new file mode 100644 index 00000000..9b293020 --- /dev/null +++ b/ansible/group_vars/alpha-hipache.yml @@ -0,0 +1,19 @@ +# container_kill_start settings +name: hipache + +container_image: registry.runnable.com/runnable/hipache +container_tag: latest +hosted_ports: [ 80, 443 ] + +container_run_opts: > + -d + -p 80:80 + -p 443:443 + -v /opt/hipache-conf:/host:rw + -v /etc/ssl/certs/{{ domain }}:/etc/ssl/certs/{{ domain }} + -v /etc/ssl/private/{{ domain }}.key:/etc/ssl/private/{{ domain }}.key + +container_run_args: /usr/local/bin/hipache --config /host/config.json + +# hipache settings +retry_on_error: 0 diff --git a/ansible/group_vars/alpha-image-builder.yml b/ansible/group_vars/alpha-image-builder.yml new file mode 100644 index 00000000..58c593cc --- /dev/null +++ b/ansible/group_vars/alpha-image-builder.yml @@ -0,0 +1,10 @@ +app_name: image-builder +name: "{{ app_name }}" +# namespace (can include "registry.runnable.com:") +image_builder_docker_namespace: runnable/image-builder +app_repo: git@github.com:CodeNow/{{ app_name }}.git + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" diff --git a/ansible/group_vars/alpha-ingress-proxy.yml b/ansible/group_vars/alpha-ingress-proxy.yml new file mode 100644 index 00000000..449263cc --- /dev/null +++ b/ansible/group_vars/alpha-ingress-proxy.yml @@ -0,0 +1,26 @@ +--- +name: nginx + +# used by consul template updater +proxy_service_name: api +target_container_name: nginx +target_updater_file_path: /etc/nginx/sites-enabled +template_command: /bin/docker kill -s HUP {{ target_container_name }} +template_path: /etc/nginx/template/api-{{ domain }}.tmpl + +# used by container_kill_start +container_image: "{{ name }}" +container_tag: "1.10" + +restart_policy: always + +container_run_opts: > + -d + --name {{ name }} + -p 0.0.0.0:443:443 + -p 0.0.0.0:80:80 + -v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro + -v /etc/nginx/other-sites-enabled:/etc/nginx/other-sites-enabled + -v {{ target_updater_file_path }} + -v /etc/ssl/certs/{{ domain }}:/etc/ssl/certs/{{ domain }}:ro + -v /var/log/nginx:/var/log/nginx diff --git a/ansible/group_vars/alpha-khronos.yml b/ansible/group_vars/alpha-khronos.yml new file mode 100644 index 00000000..6bace92d --- /dev/null +++ b/ansible/group_vars/alpha-khronos.yml @@ -0,0 +1,80 @@ +name: khronos + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.4.7" +npm_version: 2 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] +dockerfile_pre_install_commands: [ + 'echo "//registry.npmjs.org/:_authToken=${NPM_TOKEN}" > .npmrc' +] + +# for cron job +# this is the list of queues we want to enqueue a job into +main_cron_queues: "\ + containers.image-builder.prune \ + containers.orphan.prune \ + context-versions.prune-expired \ + images.prune \ + instances.cleanup \ + weave.prune" + +canary_cron_queues: "\ + canary.build.run \ + canary.github-branch.run \ + canary.log.run \ + canary.network.run \ + metrics.container-status" + +hourly_canary_cron_queues: "\ + canary.failover.run" + +# a nice version of the rabbitmq host +cron_rabbit_host_address: "{{ rabbit_host_address }}:{{ rabbit_port }}" +# a quick version of authentication for rabbit for cron +cron_rabbit_auth: --username {{ rabbit_username }} --password {{ rabbit_password }} + +# for container settings +container_envs: > + -e API_SOCKET_SERVER=https://{{ api_socket_server_hostname }} + -e API_URL={{ api_url }} + -e BIG_POPPA_HOST=http://{{ big_poppa_host_address }}:{{ big_poppa_port }} + -e CONSUL_HOST={{ consul_host_address }}:{{ consul_api_port }} + -e CANARY_API_TOKEN={{ khronos_canary_token | default('undefined') }} + {% if khronos_canary_failover_token is defined %} + -e CANARY_API_FAILOVER_TOKEN={{ khronos_canary_failover_token }} + {% endif %} + -e CANARY_GITHUB_BRANCHES_INSTANCE_ID={{ khronos_canary_github_branches_instance_id | default('undefined') }} + -e CANARY_LOG_INSTANCE_ID={{ khronos_canary_logs_instance_id | default('undefined') }} + -e CANARY_LOG_TERMINAL_SLEEP=10 + -e CANARY_REBUILD_INSTANCE_ID={{ khronos_canary_rebuild_instance_id | default('undefined') }} + -e CANARY_REBUILD_NAVI_URL={{ khronos_canary_rebuild_navi_url | default('undefined') }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e INTERCOM_API_KEY={{ khronos_intercom_api_key | default('undefined') }} + -e INTERCOM_APP_ID={{ khronos_intercom_app_id | default('undefined') }} + -e KHRONOS_MONGO=mongodb://{{ khronos_mongo_auth }}@{{ mongo_hosts }}/{{ khronos_mongo_database }} + -e MONGO_CACERT=/opt/ssl/mongo-client/ca.pem + -e MONGO_CERT=/opt/ssl/mongo-client/cert.pem + -e MONGO_KEY=/opt/ssl/mongo-client/key.pem + -e NODE_ENV={{ node_env }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e SWARM_HOST=http://{{ swarm_host_address }}:{{ swarm_master_port }} + -e USER_CONTENT_DOMAIN={{ user_content_domain }} + -e WORKER_MAX_RETRY_DELAY=3600000 + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + -v /opt/ssl/mongo-client:/opt/ssl/mongo-client:ro + {{container_envs}} + +container_run_args: timeout 1h npm start diff --git a/ansible/group_vars/alpha-krain.yml b/ansible/group_vars/alpha-krain.yml new file mode 100644 index 00000000..285fb62d --- /dev/null +++ b/ansible/group_vars/alpha-krain.yml @@ -0,0 +1,16 @@ +# krain options +name: krain + +krain_env: default + +# upstart template variables +app_name: krain +app_repo: git@github.com:CodeNow/krain.git +node_env: "{{ krain_env }}" + +enviroment_vars: {} + +# consul values +consul_values: + - key: "{{ name }}/version" + value: "{{ git_branch }}" diff --git a/ansible/group_vars/alpha-link.yml b/ansible/group_vars/alpha-link.yml new file mode 100644 index 00000000..1d8e2d22 --- /dev/null +++ b/ansible/group_vars/alpha-link.yml @@ -0,0 +1,25 @@ +name: link + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/{{ name }}.git +node_version: "4.2.1" +npm_version: "2.14.7" + +container_envs: > + -e API_URL={{ api_url }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e HELLO_RUNNABLE_GITHUB_TOKEN=5d8f7029d3d6941b0fc62a7eb8c605d8e0bc7c29 + -e MONGO=mongodb://{{ navi_mongo_host_address }}:{{ navi_mongo_port }}/{{ navi_mongo_database }} + -e NODE_ENV={{ node_env }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e USER_CONTENT_DOMAIN={{ user_content_domain }} + +container_run_opts: > + -h {{ name }} + -d + {{ container_envs }} diff --git a/ansible/group_vars/alpha-marketing.yml b/ansible/group_vars/alpha-marketing.yml new file mode 100644 index 00000000..a97aadca --- /dev/null +++ b/ansible/group_vars/alpha-marketing.yml @@ -0,0 +1,22 @@ +name: "runnable.com" + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.3.1" +npm_version: "3.7.5" +do_not_push: yes + +dockerfile_enviroment: [ + "API_URL https://{{ api_hostname }}", + "AWS_ACCESS_KEY {{ marketing_aws_access_key }}", + "AWS_SECRET_KEY {{ marketing_aws_secret_key }}", + "ANGULAR_URL {{ angular_url }}", + "AWS_BUCKET {{ marketing_bucket }}", + "AWS_REGION {{ web_aws_bucket_region | default('us-standard') }}", + "NODE_ENV {{ node_env }}" +] + +dockerfile_post_install_commands: [ + "npm run deploy" +] diff --git a/ansible/group_vars/alpha-metabase.yml b/ansible/group_vars/alpha-metabase.yml new file mode 100644 index 00000000..54e3a49e --- /dev/null +++ b/ansible/group_vars/alpha-metabase.yml @@ -0,0 +1,20 @@ +name: "metabase" + +container_image: metabase/{{ name }} +container_tag: v0.19.3 + +memory_hard_limit: 20g + +container_envs: > + -e "MB_DB_TYPE=postgres" + -e "MB_DB_DBNAME={{ metabase_pg_database }}" + -e "MB_DB_PORT={{ metabase_pg_port }}" + -e "MB_DB_USER={{ metabase_pg_user }}" + -e "MB_DB_PASS={{ metabase_pg_pass }}" + -e "MB_DB_HOST={{ metabase_pg_host }}" + +container_run_opts: > + -h {{ name }} + -d + -p 4444:3000 + {{ container_envs }} diff --git a/ansible/group_vars/alpha-metis.yml b/ansible/group_vars/alpha-metis.yml new file mode 100644 index 00000000..fe371c36 --- /dev/null +++ b/ansible/group_vars/alpha-metis.yml @@ -0,0 +1,39 @@ +name: metis +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/astral.git +node_version: "4.2.1" +npm_version: "2.8.3" + +# Overrides the start command in the builder role +npm_start_command: run metis-start + +# Not actually needed, just allows container-kill-start to work +hosted_ports: [ "{{ metis_port }}" ] + +# container settings +redis_ca_cert_path: /opt/ssl/metis/redis/ca.pem +container_envs: > + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e POSTGRES_CONNECT_STRING=postgres://{{ pg_user }}:{{ pg_pass }}@{{ pg_host }}/{{ pg_database }} + -e AWS_ACCESS_KEY_ID={{ aws_access_key_id }} + -e AWS_SECRET_ACCESS_KEY={{ aws_secret_access_key }} + -e NODE_ENV={{ node_env }} + -e REDIS_CACERT={{ redis_ca_cert_path }} + -e REDIS_HOST={{ redis_host_address }} + -e REDIS_PORT={{ redis_tls_port }} + -e REGISTRY_HOST={{ registry_host }} + -e ROLLBAR_KEY={{ metis_rollbar_key }} + -e DOCKER_PORT={{ docker_port }} + +container_run_opts: > + -h {{ name }} + -d + -P + -v {{ redis_ca_cert_path }}:{{ redis_ca_cert_path }} + {{ container_envs }} diff --git a/ansible/group_vars/alpha-mongo-navi.yml b/ansible/group_vars/alpha-mongo-navi.yml new file mode 100644 index 00000000..9d9cf42d --- /dev/null +++ b/ansible/group_vars/alpha-mongo-navi.yml @@ -0,0 +1,13 @@ +# database vars +name: mongo-navi + +db_path: /opt/mongodb/db + +# container_kill_start settings +container_image: mongo +container_tag: latest +container_run_opts: > + -h {{ name }} + -d + -p 27017:27017 + -v {{ db_path }}:/data/db:rw diff --git a/ansible/group_vars/alpha-mongo.yml b/ansible/group_vars/alpha-mongo.yml new file mode 100644 index 00000000..06227dc4 --- /dev/null +++ b/ansible/group_vars/alpha-mongo.yml @@ -0,0 +1,13 @@ +# database vars +name: mongodb + +db_path: /opt/mongodb/db + +# container_kill_start settings +container_image: mongo +container_tag: latest +container_run_opts: -d --name mongodb -p 27017:27017 -v {{ db_path }}:/data/db:rw + +# do not monitor docker-daemon for mongo, as there is none +no_datadog_docker_monitoring: true +has_dd_integration: true diff --git a/ansible/group_vars/alpha-navi-proxy.yml b/ansible/group_vars/alpha-navi-proxy.yml new file mode 100644 index 00000000..f37a0700 --- /dev/null +++ b/ansible/group_vars/alpha-navi-proxy.yml @@ -0,0 +1,26 @@ +--- +name: nginx + +# used by consul template updater +proxy_service_name: navi +target_container_name: nginx +target_updater_file_path: /etc/nginx/sites-enabled +template_command: /bin/docker kill -s HUP {{ target_container_name }} +template_path: /etc/nginx/template/navi.tmpl + +# used by container_kill_start +container_image: "{{ name }}" +container_tag: "1.10" + +restart_policy: always + +container_run_opts: > + -d + --net=host + --name {{ name }} + -p 0.0.0.0:443:443 + -p 0.0.0.0:80:80 + -v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro + -v {{ target_updater_file_path }} + -v /etc/ssl/certs/{{ user_content_domain }}:/etc/ssl/certs/{{ user_content_domain }}:ro + -v /var/log/nginx:/var/log/nginx diff --git a/ansible/group_vars/alpha-navi.yml b/ansible/group_vars/alpha-navi.yml new file mode 100644 index 00000000..b79d2c8a --- /dev/null +++ b/ansible/group_vars/alpha-navi.yml @@ -0,0 +1,47 @@ +name: navi + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: [ "{{ navi_http_port }}" ] +node_version: "4.2.4" +npm_version: "2.8.3" + +restart_policy: always + +redis_ca_cert_path: /opt/ssl/{{ name }}/redis/ca.pem +content_domain_certs: /opt/ssl/{{ user_content_domain }} + +container_envs: > + -e API_URL={{ api_url }} + -e CERT_PATH={{ content_domain_certs }} + -e COOKIE_DOMAIN=.{{ user_content_domain }} + -e COOKIE_SECRET={{ navi_cookie_secret }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e ENABLE_LRU_CACHE=1 + -e ERROR_URL=http://{{ detention_host_address }}:{{ detention_port }} + -e HTTP_PORT={{ hosted_ports[0] }} + -e LOG_LEVEL_STDOUT=trace + -e MONGO=mongodb://{{ navi_mongo_host_address }}:{{ navi_mongo_port }}/{{ navi_mongo_database }} + -e NODE_ENV={{ node_env }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e REDIS_CACERT={{ redis_ca_cert_path }} + -e REDIS_IPADDRESS={{ redis_host_address }} + -e REDIS_PORT={{ redis_tls_port }} + {% if navi_intercom_api_key is defined %} -e INTERCOM_API_KEY={{ navi_intercom_api_key }} {% endif %} + {% if navi_intercom_app_id is defined %} -e INTERCOM_APP_ID={{ navi_intercom_app_id }} {% endif %} + {% if navi_new_relic_app_name is defined %} -e NEW_RELIC_APP_NAME={{ navi_new_relic_app_name }} {% endif %} + {% if navi_new_relic_app_name is defined %} -e NEW_RELIC_LICENSE_KEY={{ new_relic_license_key }} {% endif %} + {% if navi_new_relic_app_name is defined %} -e NEW_RELIC_LOG_LEVEL=fatal {% endif %} + {% if navi_new_relic_app_name is defined %} -e NEW_RELIC_NO_CONFIG_FILE=true {% endif %} + +container_run_opts: > + -h {{ name }} + -d + -P + -v {{ redis_ca_cert_path }}:{{ redis_ca_cert_path }} + {{ container_envs }} diff --git a/ansible/group_vars/alpha-node-exporter.yml b/ansible/group_vars/alpha-node-exporter.yml new file mode 100644 index 00000000..ecfd0f7b --- /dev/null +++ b/ansible/group_vars/alpha-node-exporter.yml @@ -0,0 +1,15 @@ +name: node-exporter + +container_image: prom/{{ name }} +container_tag: "0.12.0" + +memory_hard_limit: 100mb + +container_run_opts: > + --name={{ name }} + --net=host + --memory-reservation=50mb + +container_run_args: > + --web.listen-address=:{{ node_exporter_port }} + --collectors.enabled=conntrack,diskstats,filefd,filesystem,loadavg,meminfo,netdev,netstat,stat,time \ diff --git a/ansible/group_vars/alpha-optimus.yml b/ansible/group_vars/alpha-optimus.yml new file mode 100644 index 00000000..58cb61d2 --- /dev/null +++ b/ansible/group_vars/alpha-optimus.yml @@ -0,0 +1,29 @@ +name: optimus + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/{{ name }}.git +hosted_ports: ["{{ optimus_port }}"] +node_version: "4.3.2" +npm_version: "2.8.3" + +# for redis +redis_key: frontend:{{ optimus_hostname }} +is_redis_update_required: yes + +container_envs: > + -e NODE_ENV={{ node_env }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e PORT={{ hosted_ports[0] }} + -e AWS_ACCESS_KEY_ID={{ optimus_aws_access_id }} + -e AWS_SECRET_ACCESS_KEY={{ optimus_aws_secret_id }} + -e S3_DEPLOY_KEY_BUCKET={{ optimus_github_deploy_keys_bucket }} + -e ROLLBAR_KEY=a49f9cce09ee46f09df3f603178fba75 + -e LOG_LEVEL=trace + +container_run_opts: > + -h {{ name }} + -d + -P + {{ container_envs }} diff --git a/ansible/group_vars/alpha-palantiri.yml b/ansible/group_vars/alpha-palantiri.yml new file mode 100644 index 00000000..ea2670e2 --- /dev/null +++ b/ansible/group_vars/alpha-palantiri.yml @@ -0,0 +1,29 @@ +name: "palantiri" + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +# container settings +container_envs: > + -e CONSUL_HOST={{ consul_host_address }}:{{ consul_api_port }} + -e NODE_ENV={{ node_env }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e ROLLBAR_KEY={{ palantiri_rollbar_key }} + -e RSS_LIMIT=2000000 + -e LOG_LEVEL_STDOUT=trace + -e SWARM_HOSTNAME={{ swarm_host_address }} + -e SWARM_PORT={{ swarm_master_port }} + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-pheidi.yml b/ansible/group_vars/alpha-pheidi.yml new file mode 100644 index 00000000..8b8f95e7 --- /dev/null +++ b/ansible/group_vars/alpha-pheidi.yml @@ -0,0 +1,51 @@ +name: pheidi + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +# for sendGrid +sendgrid_key: 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 + +dockerfile_enviroment: [ + "NPM_TOKEN {{ npm_token }}" +] + +dockerfile_pre_install_commands: [ + "echo '//registry.npmjs.org/:_authToken=${NPM_TOKEN}' > .npmrc" +] + +container_envs: > + -e BIG_POPPA_HOST=http://{{ big_poppa_host_address }}:{{ big_poppa_port }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e DOMAIN={{ domain }} + -e EMAIL_GITHUB_ACCESS_TOKEN={{ pheidi_email_github_token }} + -e FULL_API_DOMAIN=https://api.{{ domain }} + -e INTERCOM_ADMIN_ID={{ pheidi_intercom_admin_id }} + -e INTERCOM_API_KEY={{ pheidi_intercom_key }} + -e INTERCOM_APP_ID={{ pheidi_intercom_id }} + -e LOGGLY_TOKEN="{{ loggly_token }}" + -e MONGO=mongodb://{{ pheidi_mongo_auth }}@{{ mongo_hosts }}/{{ pheidi_mongo_database }} + -e MONGO_REPLSET_NAME={{ pheidi_mongo_replset_name }} + -e MONGO_CACERT=/opt/ssl/mongo-client/ca.pem + -e MONGO_CERT=/opt/ssl/mongo-client/cert.pem + -e MONGO_KEY=/opt/ssl/mongo-client/key.pem + -e NODE_ENV="{{ node_env }}" + -e RABBITMQ_HOSTNAME="{{ rabbit_host_address }}" + -e RABBITMQ_PASSWORD="{{ rabbit_password }}" + -e RABBITMQ_PORT="{{ rabbit_port }}" + -e RABBITMQ_USERNAME="{{ rabbit_username }}" + -e ROLLBAR_KEY={{ pheidi_rollbar_token }} + -e RUNNABOT_GITHUB_ACCESS_TOKENS={{ pheidi_runnabot_tokens }} + -e SENDGRID_KEY={{ sendgrid_key }} + -e USER_CONTENT_DOMAIN={{ user_content_domain }} + -e WEB_URL=https://app.{{ domain }} + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/mongodb-client:/opt/ssl/mongo-client:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-prometheus-alerts.yml b/ansible/group_vars/alpha-prometheus-alerts.yml new file mode 100644 index 00000000..1af392ae --- /dev/null +++ b/ansible/group_vars/alpha-prometheus-alerts.yml @@ -0,0 +1,21 @@ +name: prometheus-alerts + +# container_kill_start settings +container_image: prom/alertmanager +container_tag: v0.5.0 + +prometheus_alert_root: /opt/runnable/prometheus-alerts + +memory_hard_limit: 10g + +container_run_opts: > + -d + -p {{ prometheus_alert_port }}:{{ prometheus_alert_port }} + -v {{ prometheus_alert_root }}:/prometheus-alerts + +container_run_args: > + -config.file=/prometheus-alerts/prometheus-alerts.yml + -data.retention 720h0m0s + -storage.path "data" + -web.external-url http://localhost + -web.listen-address ":{{ prometheus_alert_port }}" diff --git a/ansible/group_vars/alpha-prometheus.yml b/ansible/group_vars/alpha-prometheus.yml new file mode 100644 index 00000000..c72a526b --- /dev/null +++ b/ansible/group_vars/alpha-prometheus.yml @@ -0,0 +1,24 @@ +name: prometheus + +# container_kill_start settings +container_image: prom/prometheus +container_tag: v1.4.1 + +prometheus_root: /opt/runnable/prometheus + +memory_hard_limit: 15g + +stop_time: 60 + +container_run_opts: > + -d + -p {{ prometheus_port }}:{{ prometheus_port }} + -v {{ prometheus_root }}:/prometheus + +container_run_args: > + -alertmanager.url "{{ prometheus_alert_url }}" + -config.file=/prometheus/prometheus.yml + -storage.local.path "data" + -storage.local.retention 168h0m0s + -web.external-url http://localhost + -web.listen-address ":{{ prometheus_port }}" diff --git a/ansible/group_vars/alpha-proxy-socket-server.yml b/ansible/group_vars/alpha-proxy-socket-server.yml new file mode 100644 index 00000000..6090603d --- /dev/null +++ b/ansible/group_vars/alpha-proxy-socket-server.yml @@ -0,0 +1,20 @@ +--- +name: nginx + +docker_image: runnable/sticky-nginx +docker_image_version: v1.8.1 + +docker_restart_command: kill -s SIGHUP + +docker_container_run_opts: > + -d + -h {{ name }} + -p 0.0.0.0:443:443 + -p 0.0.0.0:80:80 + -v /etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro + -v /etc/nginx/sites-available/:/etc/nginx/sites-enabled/:ro + -v /etc/nginx/ssl/dhparam.pem:/etc/nginx/ssl/dhparam.pem:ro + -v /etc/ssl/certs/{{ domain }}:/etc/ssl/certs/{{ domain }}:ro + -v /etc/ssl/certs/{{ user_content_domain }}:/etc/ssl/certs/{{ user_content_domain }}:ro + -v /etc/ssl/private:/etc/ssl/private:ro + -v /var/log/nginx:/var/log/nginx diff --git a/ansible/group_vars/alpha-rabbitmq.yml b/ansible/group_vars/alpha-rabbitmq.yml new file mode 100644 index 00000000..7712c509 --- /dev/null +++ b/ansible/group_vars/alpha-rabbitmq.yml @@ -0,0 +1,14 @@ +# database vars +name: rabbitmq + +db_path: "/opt/rabbitmq/db" +config_path: "/opt/rabbitmq/config" + +# container_kill_start settings +container_image: rabbitmq +container_tag: "3.5.3-management" +container_run_opts: "-d --name prod-rabbit -v {{ db_path }}:/var/lib/rabbitmq:rw -v {{ config_path }}/rabbitmq.config:/etc/rabbitmq/rabbitmq.config:ro -p 54320:15672 -p 54321:5672" + +hosted_ports: [ 5672, 15672 ] +# DD integration for RabbitMQ +has_dd_integration: true diff --git a/ansible/group_vars/alpha-redis-tls.yml b/ansible/group_vars/alpha-redis-tls.yml new file mode 100644 index 00000000..2481eb74 --- /dev/null +++ b/ansible/group_vars/alpha-redis-tls.yml @@ -0,0 +1,10 @@ +name: redis + +# container_kill_start settings +container_image: runnable/redis-stunnel +container_tag: v0.1.0 +container_run_opts: > + --link redis:redis + -d + -p {{ ansible_default_ipv4.address }}:6380:6380 + -v /opt/redis-tls/stunnel.pem:/stunnel/private.pem diff --git a/ansible/group_vars/alpha-redis.yml b/ansible/group_vars/alpha-redis.yml new file mode 100644 index 00000000..807f32a1 --- /dev/null +++ b/ansible/group_vars/alpha-redis.yml @@ -0,0 +1,10 @@ +# database vars +name: redis +db_path: "/opt/redis/db" + +# container_kill_start settings +container_image: redis +container_tag: "latest" +container_run_opts: "-d --name redis -v {{ db_path }}:/data -p {{ redis_port }}:{{ redis_port }}" + +hosted_ports: [ "{{ redis_port }}" ] diff --git a/ansible/group_vars/alpha-registrator.yml b/ansible/group_vars/alpha-registrator.yml new file mode 100644 index 00000000..400ba645 --- /dev/null +++ b/ansible/group_vars/alpha-registrator.yml @@ -0,0 +1,17 @@ +name: registrator + +# container_kill_start settings +container_image: gliderlabs/registrator +container_tag: v7 + +restart_policy: always + +container_run_opts: > + -d + --hostname={{ ansible_hostname }} + --name={{ name }} + --volume=/var/run/docker.sock:/tmp/docker.sock + +container_run_args: > + -ip {{ ansible_default_ipv4.address }} + consul://{{ consul_host_address }}:{{ consul_api_port }} diff --git a/ansible/group_vars/alpha-registry.yml b/ansible/group_vars/alpha-registry.yml new file mode 100644 index 00000000..376674a7 --- /dev/null +++ b/ansible/group_vars/alpha-registry.yml @@ -0,0 +1,16 @@ +# container start +name: "registry" + +container_image: "registry" +container_tag: "2.1.1" +hosted_ports: ["80"] + +container_envs: > + -e REGISTRY_STORAGE_S3_ACCESSKEY={{ registry_s3_access_key }} + -e REGISTRY_STORAGE_S3_SECRETKEY={{ registry_s3_secret_key }} + -e REGISTRY_STORAGE_S3_REGION={{ registry_s3_region }} + -e REGISTRY_STORAGE_S3_BUCKET={{ registry_s3_bucket }} + -e REGISTRY_ENVIRONMENT=production + -e REGISTRY_STORAGE=s3 + +container_run_opts: "-d -p {{ hosted_ports[0] }}:5000 {{container_envs}}" diff --git a/ansible/group_vars/alpha-sauron.yml b/ansible/group_vars/alpha-sauron.yml new file mode 100644 index 00000000..a0d9bdb1 --- /dev/null +++ b/ansible/group_vars/alpha-sauron.yml @@ -0,0 +1,38 @@ +name: sauron + +container_image: "registry.runnable.com/runnable/{{ name }}" +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "4.2.2" +npm_version: "2.14.7" + +weave_path: "/bin/weave" +docker_path: "/bin/docker" + +dockerfile_pre_install_commands: [ + "curl -L https://get.docker.com/builds/Linux/x86_64/docker-1.6.2 -o {{ docker_path }}", + "chmod a+x {{ docker_path }}", + "curl -L https://github.com/weaveworks/weave/releases/download/v1.5.0/weave -o {{ weave_path }}", + "chmod a+x {{ weave_path }}" +] + +container_envs: > + -e CONSUL_HOST={{ consul_host_address }}:{{ consul_api_port }} + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e LOGGLY_TOKEN="{{ loggly_token }}" + -e NODE_ENV="{{ node_env }}" + -e RABBITMQ_HOSTNAME="{{ rabbit_host_address }}" + -e RABBITMQ_PASSWORD="{{ rabbit_password }}" + -e RABBITMQ_PORT="{{ rabbit_port }}" + -e RABBITMQ_USERNAME="{{ rabbit_username }}" + -e ROLLBAR_KEY={{ sauron_rollbar_key }} + -e SWARM_HOSTNAME={{ swarm_host_address }} + -e SWARM_PORT={{ swarm_master_port }} + -e WEAVE_PATH={{ weave_path }} + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-shiva.yml b/ansible/group_vars/alpha-shiva.yml new file mode 100644 index 00000000..9aaa672f --- /dev/null +++ b/ansible/group_vars/alpha-shiva.yml @@ -0,0 +1,36 @@ +name: shiva +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: git@github.com:CodeNow/astral.git +node_version: "4.2.1" +npm_version: "2.8.3" + +# Overrides the start command in the builder role +npm_start_command: run shiva-start + +# container settings +redis_ca_cert_path: /opt/ssl/{{ name }}/redis/ca.pem +container_envs: > + -e DATADOG_HOST={{ datadog_host_address }} + -e DATADOG_PORT={{ datadog_port }} + -e RABBITMQ_HOSTNAME={{ rabbit_host_address }} + -e RABBITMQ_PASSWORD={{ rabbit_password }} + -e RABBITMQ_PORT={{ rabbit_port }} + -e RABBITMQ_USERNAME={{ rabbit_username }} + -e POSTGRES_CONNECT_STRING=postgres://{{ pg_user }}:{{ pg_pass }}@{{ pg_host }}/{{ pg_database }} + -e AWS_ACCESS_KEY_ID={{ aws_access_key_id }} + -e AWS_SECRET_ACCESS_KEY={{ aws_secret_access_key }} + -e NODE_ENV={{ node_env }} + -e REDIS_CACERT={{ redis_ca_cert_path }} + -e REDIS_PORT={{ redis_tls_port }} + -e REDIS_IPADDRESS={{ redis_host_address }} + -e REGISTRY_HOST={{ registry_host }} + -e ROLLBAR_KEY={{ shiva_rollbar_token }} + -e DOCKER_PORT={{ docker_port }} + +container_run_opts: > + -h {{ name }} + -d + -P + -v {{ redis_ca_cert_path }}:{{ redis_ca_cert_path }} + {{ container_envs }} diff --git a/ansible/group_vars/alpha-socket-server.yml b/ansible/group_vars/alpha-socket-server.yml new file mode 100644 index 00000000..f7f0b40c --- /dev/null +++ b/ansible/group_vars/alpha-socket-server.yml @@ -0,0 +1,23 @@ +name: api-socket-server + +container_image: registry.runnable.com/runnable/{{ name }} + +hosted_ports: [ "{{ api_port }}" ] + +# for redis +redis_key: frontend:{{ api_socket_server_hostname }} +is_redis_update_required: yes + +# for container settings +container_envs: > + {{ api_base_container_envs }} + -e NUM_WORKERS=1 + -e ROLLBAR_KEY={{ api_socket_server_rollbar_key }} + +container_run_opts: > + -h {{ name }} + -d + -P + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + -v /opt/ssl/mongodb-client:/opt/ssl/mongodb-client:ro + {{ container_envs }} diff --git a/ansible/group_vars/alpha-swarm-daemon.yml b/ansible/group_vars/alpha-swarm-daemon.yml new file mode 100644 index 00000000..643d73f3 --- /dev/null +++ b/ansible/group_vars/alpha-swarm-daemon.yml @@ -0,0 +1,20 @@ +name: "swarm-deamon" + +# container_kill_start settings +container_image: swarm +container_tag: 1.2.3 + +container_run_opts: > + --name {{ swarm_container_name }} + -d + +container_run_args: > + join + --heartbeat 15s --ttl 30s + --addr={{ ansible_default_ipv4.address }}:{{ docker_port }} + consul://{{ consul_host_address }}:{{ consul_api_port }}/swarm + +# consul values +consul_values: + - key: "swarm/version" + value: "{{ container_tag }}" diff --git a/ansible/group_vars/alpha-swarm-manager-metrics.yml b/ansible/group_vars/alpha-swarm-manager-metrics.yml new file mode 100644 index 00000000..3a489f2b --- /dev/null +++ b/ansible/group_vars/alpha-swarm-manager-metrics.yml @@ -0,0 +1,28 @@ +--- +name: swarm-cloudwatch-reporter + +repo: git@github.com:CodeNow/furry-cactus.git +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +node_version: "4.4.7" +npm_version: 3 +do_not_push: true + +log_driver: json-file + +dockerfile_pre_install_commands: +- npm config set progress false + +container_run_opts: >- + --log-driver={{ log_driver }} + -e AWS_ACCESS_KEY={{ aws_access_key }} + -e AWS_SECRET_KEY={{ aws_secret_key }} + -e DOCKER_CERT_PATH=/opt/ssl/docker/swarm-manager + -e ENVIRONMENT={{ environment_name }} + -e SWARM_HOSTNAME={{ ansible_default_ipv4.address }} + -e SWARM_PORT={{ swarm_master_port }} + -v /opt/runnable/get-info.js:/get-info.js:ro + -v /opt/ssl/docker/swarm-manager:/opt/ssl/docker/swarm-manager:ro + -v /var/log:/var/log + +container_run_args: npm start >> /var/log/{{ name }}.log 2>&1 diff --git a/ansible/group_vars/alpha-swarm-manager.yml b/ansible/group_vars/alpha-swarm-manager.yml new file mode 100644 index 00000000..84e0ef8b --- /dev/null +++ b/ansible/group_vars/alpha-swarm-manager.yml @@ -0,0 +1,20 @@ +name: "swarm-manager" + +# container_kill_start settings +container_image: swarm +container_tag: 1.2.3-0 + +memory_hard_limit: 10g + +container_run_opts: > + -d + -p {{ swarm_master_port }}:2375 + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + +container_run_args: > + manage + --tlsverify + --tlscacert={{ docker_ca_path }} + --tlscert={{ docker_cert_path }} + --tlskey={{ docker_key_path }} + consul://{{ consul_host_address }}:{{ consul_api_port }}/swarm diff --git a/ansible/group_vars/alpha-vault.yml b/ansible/group_vars/alpha-vault.yml new file mode 100644 index 00000000..3d1f8c45 --- /dev/null +++ b/ansible/group_vars/alpha-vault.yml @@ -0,0 +1,41 @@ +name: vault + +# for database role +db_path: /opt/runnable/vault + +container_image: runnable/vault +container_tag: v0.5.1 + +container_run_opts: > + -d + -h {{ inventory_hostname }} + -v /opt/runnable/vault/vault.hcl:/vault.hcl:ro + -v /opt/vault/client-consul:/opt/vault/client-consul:ro + -v /opt/vault/server:/opt/vault/server:ro + -v {{ app_log_dir }}:{{ app_log_dir }}:rw + -p {{ ansible_default_ipv4.address }}:8200:8200 + -p {{ ansible_default_ipv4.address }}:8201:8201 + --cap-add IPC_LOCK + --restart=always + +container_run_args: > + vault server + -log-level=warn + -config=/vault.hcl + +# vault seed data +# pulled 2015/16/12 - Bryan +vault_seed_values: + - key: secret/loggly + data: + token: "{{ loggly_token }}" + - key: secret/rabbitmq + data: + username: "{{ rabbit_username }}" + password: "{{ rabbit_password }}" + - key: secret/github/hellorunnable + data: + token: "{{ vault_hello_runnable_github_token }}" + +# for the love of all that you find holy, don't change the following unless you _KNOW WHAT YOU ARE DOING_. +vault_seed_policy: "{\\\"Version\\\": \\\"2012-10-17\\\", \\\"Statement\\\": [{\\\"Action\\\": [\\\"ec2:DescribeInstances\\\", \\\"ec2:DescribeTags\\\"], \\\"Resource\\\": [\\\"*\\\"], \\\"Effect\\\": \\\"Allow\\\", \\\"Sid\\\": \\\"Stmt1445655064000\\\"}]}" diff --git a/ansible/group_vars/alpha-web.yml b/ansible/group_vars/alpha-web.yml new file mode 100644 index 00000000..d076d944 --- /dev/null +++ b/ansible/group_vars/alpha-web.yml @@ -0,0 +1,29 @@ +name: "runnable-angular" + +container_image: registry.runnable.com/runnable/{{ name }} +container_tag: "{{ git_branch }}" +repo: "git@github.com:CodeNow/{{ name }}.git" +node_version: "0.10.38" +npm_version: "2.1.18" +rollbar_web_token: "162a053bebd34e9eb6e2860543c7ae79" +do_not_push: yes + +dockerfile_enviroment: [ + "API_SOCK_URL https://{{ api_socket_server_hostname }}", + "API_URL https://{{ api_hostname }}", + "MIXPANEL_PROXY_URL {{ mixpanel_proxy_url }}", + "AWS_ACCESS_KEY {{ aws_access_key }}", + "AWS_BUCKET app.{{ domain }}", + "AWS_REGION {{ web_aws_bucket_region | default('us-standard') }}", + "AWS_SECRET_KEY {{ aws_secret_key }}", + "INTERCOM_APP_ID {{ web_intercom_id }}", + "MARKETING_URL {{ marketing_url }}", + "NODE_ENV production", + "SIFT_API_KEY {{ web_sift_public_key }}", + "STRIPE_TOKEN {{ cream_stripe_publishable_key }}", + "USER_CONTENT_DOMAIN {{ user_content_domain }}", +] + +dockerfile_post_install_commands: [ + "npm run gruntDeploy" +] diff --git a/ansible/group_vars/alpha-workers.yml b/ansible/group_vars/alpha-workers.yml new file mode 100644 index 00000000..a2085c3f --- /dev/null +++ b/ansible/group_vars/alpha-workers.yml @@ -0,0 +1,18 @@ +name: api-worker + +container_image: registry.runnable.com/runnable/{{ name }} + +memory_hard_limit: 1g + +# for container settings +container_envs: > + {{ api_base_container_envs }} + -e IS_QUEUE_WORKER=true + -e ROLLBAR_KEY={{ api_workers_rollbar_key }} + +container_run_opts: > + -h {{ name }} + -d + -v /opt/ssl/docker/{{ name }}:/etc/ssl/docker:ro + -v /opt/ssl/mongodb-client:/opt/ssl/mongodb-client:ro + {{ container_envs }} diff --git a/ansible/group_vars/current_versions.yml b/ansible/group_vars/current_versions.yml new file mode 100644 index 00000000..5f5fe7e2 --- /dev/null +++ b/ansible/group_vars/current_versions.yml @@ -0,0 +1,13 @@ +detention_branch=v1.3.2 +eru_branch=v1.1.0 +khronos_branch=v2.5.2 +link_branch=v1.3.1 +metis_branch=v7.0.6 +optimus_branch=v4.0.3 +palantiri_branch=v0.1.10 +sauron_branch=v4.0.2 +shiva_branch=v7.0.6 +angular_branch=v4.2.0 +api_branch=v6.32.4 +navi_branch=v5.0.2 + diff --git a/ansible/group_vars/ec2_sg.yml b/ansible/group_vars/ec2_sg.yml new file mode 100644 index 00000000..c1323c34 --- /dev/null +++ b/ansible/group_vars/ec2_sg.yml @@ -0,0 +1,26 @@ +--- +app_name: "ec2" +consul_api_port: 8500 +consul_api_ssl_port: 8501 +consul_cli_port: 8400 +consul_rpc_min_port: 8300 +consul_rpc_max_port: 8302 +container_min_port: 32768 +container_max_port: 65535 +detention_port: 9123 +docker_port: 4242 +git_branch: "null" +http_port: 80 +https_port: 443 +ip_all: "0.0.0.0/0" +krain_port: 3100 +named_port: 53 +navi_http_port: 3567 +redis_port: 6379 +redis_tls_port: 6380 +region: us-west-2 +sshd_port: 22 +swarm_port: 2375 +vault_api_port: 8200 +vault_api_ssl_port: 8201 +weave_port: 6783 diff --git a/ansible/group_vars/migration-router.yml b/ansible/group_vars/migration-router.yml new file mode 100644 index 00000000..e4b95db5 --- /dev/null +++ b/ansible/group_vars/migration-router.yml @@ -0,0 +1 @@ +name: migration-router diff --git a/ansible/group_vars/node-base.yml b/ansible/group_vars/node-base.yml new file mode 100644 index 00000000..266e8301 --- /dev/null +++ b/ansible/group_vars/node-base.yml @@ -0,0 +1,4 @@ +name: "node_base" +container_image: "registry.runnable.com/runnable/{{ name }}" +dockerfile: "node_base/Dockerfile" +container_tag: "latest" diff --git a/ansible/hipache.yml b/ansible/hipache.yml new file mode 100644 index 00000000..a4930226 --- /dev/null +++ b/ansible/hipache.yml @@ -0,0 +1,10 @@ +--- +- hosts: redis + +- hosts: hipache + vars_files: + - "group_vars/alpha-hipache.yml" + roles: + - { role: notify, tags: "notify" } + - { role: hipache } + - { role: container_kill_start } diff --git a/ansible/image-builder.yml b/ansible/image-builder.yml new file mode 100644 index 00000000..0fef89f5 --- /dev/null +++ b/ansible/image-builder.yml @@ -0,0 +1,12 @@ +--- +- hosts: consul + +- hosts: "{{ dock | default('docks') }}" + vars_files: + - group_vars/alpha-image-builder.yml + roles: + - { role: notify, tags: [notify] } + - { role: node, tags: [setup] } + - { role: git_repo } + - { role: image-builder } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/ingress-proxy.yml b/ansible/ingress-proxy.yml new file mode 100644 index 00000000..223a8001 --- /dev/null +++ b/ansible/ingress-proxy.yml @@ -0,0 +1,20 @@ +--- +- hosts: ingress + vars_files: + - group_vars/alpha-ingress-proxy.yml + roles: + - role: datadog + has_dd_integration: yes + + - role: runnable-domain-proxy + + - role: runnable-domain-proxy + domain: runnable.com + when: secondary_domain is defined + + - role: container_kill_start + +- include: consul-template-updater.yml + vars: + host: ingress + var_file: alpha-ingress-proxy.yml diff --git a/ansible/khronos.yml b/ansible/khronos.yml new file mode 100644 index 00000000..be7c78fe --- /dev/null +++ b/ansible/khronos.yml @@ -0,0 +1,17 @@ +--- +- hosts: rabbitmq +- hosts: mongodb +- hosts: consul +- hosts: swarm-manager +- hosts: big-poppa +- hosts: khronos + vars_files: + - group_vars/alpha-khronos.yml + roles: + - role: notify + rollbar_token: "{{ rollbar_khronos_token }}" + tags: [ notify ] + - { role: docker_client } + - { role: builder, tags: [ build ] } + - { role: khronos, tags: [ deploy ] } + - { role: container_kill_start } diff --git a/ansible/krain.yml b/ansible/krain.yml new file mode 100644 index 00000000..c865517f --- /dev/null +++ b/ansible/krain.yml @@ -0,0 +1,13 @@ +--- +- hosts: consul + +- hosts: "{{ dock | default('docks') }}" + vars_files: + - group_vars/alpha-krain.yml + roles: + - { role: notify, tags: [notify] } + - { role: build_essential } + - { role: git_repo } + - { role: node_service } + - { role: loggly } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/library/nodejs b/ansible/library/nodejs new file mode 100644 index 00000000..506c0cb5 --- /dev/null +++ b/ansible/library/nodejs @@ -0,0 +1,82 @@ +#!/bin/bash + +# From http://jpmens.net/2012/07/05/shell-scripts-as-ansible-modules/ +eval $(sed -e "s/\s?\([^=]+\)\s?=\s?\(\x22\([^\x22]+\)\x22|\x27\([^\x27]+\)\x27|\(\S+\)\)\s?/\1='\2'/p" $1) + +if [ -z $version ] +then + echo "{\"failed\": true, \"msg\": \"version is required. E.g. 0.10.8\"}" + exit 1 +fi + +have=$(node -v 2>/dev/null) + +# check if node is already installed +if [ ! $have ] +then + # NO NODE + cd /tmp + file="node-v$version-linux-x64" + wget http://nodejs.org/dist/v$version/$file.tar.gz &>/dev/null + if [ ! -f $file.tar.gz ] + then + echo "{\"failed\": true, \"msg\": \"Failed to download node.js binary\"}" + exit + fi + tar xf $file.tar.gz + cd $file + ./bin/npm install n &>/dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install 'n' locally\"}" + exit 1 + fi + ./node_modules/.bin/n $version &>/dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js to system\"}" + exit 1 + fi + installed=$(node -v 2>/dev/null) + if [ $installed != "v$version" ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js version $version\"}" + exit 1 + else + echo "{\"changed\": true, \"version\": \"$version\"}" + exit + fi +else + # WRONG VERSION + if [ $have != "v$version" ] + then + # do we have n? + nversion=$(n -V 2> /dev/null) + if [ ! $nversion ] + then + npm install -g n &> /dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install 'n' globally\"}" + exit 1 + fi + fi + n $version &> /dev/null + if [ ! $? -eq 0 ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js version $version\"}" + exit 1 + fi + installed=$(node -v 2>/dev/null) + if [ $installed != "v$version" ] + then + echo "{\"failed\": true, \"msg\": \"Failed to install node.js version $version\"}" + exit 1 + else + echo "{\"changed\": true, \"version\": \"$version\"}" + exit + fi + else + echo "{\"changed\": false, \"version\": \"$version\"}" + fi +fi \ No newline at end of file diff --git a/ansible/library/rollbar/rollbar.py b/ansible/library/rollbar/rollbar.py new file mode 100644 index 00000000..bbc2aa5d --- /dev/null +++ b/ansible/library/rollbar/rollbar.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014, Max Riveiro, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rollbar_deployment +version_added: 1.6 +author: "Max Riveiro (@kavu)" +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments + (see https://rollbar.com/docs/deploys_other/) +options: + token: + description: + - Your project access token. + required: true + environment: + description: + - Name of the environment being deployed, e.g. 'production'. + required: true + revision: + description: + - Revision number/sha being deployed. + required: true + user: + description: + - User who deployed. + required: false + rollbar_user: + description: + - Rollbar username of the user who deployed. + required: false + comment: + description: + - Deploy comment (e.g. what is being deployed). + required: false + url: + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. + This should only be used on personally controlled sites using + self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] +''' + +EXAMPLES = ''' +- rollbar_deployment: token=AAAAAA + environment='staging' + user='ansible' + revision=4.2, + rollbar_user='admin', + comment='Test Deploy' +''' + +import urllib + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(required=False), + rollbar_user=dict(required=False), + comment=dict(required=False), + url=dict( + required=False, + default='https://api.rollbar.com/api/1/deploy/' + ), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception, e: + module.fail_json(msg='Unable to notify Rollbar: %s' % e) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() \ No newline at end of file diff --git a/ansible/link.yml b/ansible/link.yml new file mode 100644 index 00000000..1f4a5e37 --- /dev/null +++ b/ansible/link.yml @@ -0,0 +1,12 @@ +--- +- hosts: mongo-navi +- hosts: rabbitmq +- hosts: consul + +- hosts: link + vars_files: + - "group_vars/alpha-link.yml" + roles: + - { role: notify, tags: "notify" } + - { role: builder, tags: "build" } + - { role: container_kill_start } diff --git a/ansible/local/hosts b/ansible/local/hosts new file mode 100644 index 00000000..8bb7ba6b --- /dev/null +++ b/ansible/local/hosts @@ -0,0 +1,2 @@ +[local] +localhost diff --git a/ansible/marketing.yml b/ansible/marketing.yml new file mode 100644 index 00000000..64a8a07b --- /dev/null +++ b/ansible/marketing.yml @@ -0,0 +1,7 @@ +--- +- hosts: marketing + vars_files: + - "group_vars/alpha-marketing.yml" + roles: + - { role: notify, tags: "notify" } + - { role: builder, tags: "build" } diff --git a/ansible/metabase.yml b/ansible/metabase.yml new file mode 100644 index 00000000..badef968 --- /dev/null +++ b/ansible/metabase.yml @@ -0,0 +1,10 @@ +--- +- hosts: consul + +- hosts: metabase + vars_files: + - group_vars/alpha-metabase.yml + roles: + - role: notify + tags: [ notify ] + - { role: container_kill_start } diff --git a/ansible/metis.yml b/ansible/metis.yml new file mode 100644 index 00000000..4b09fef4 --- /dev/null +++ b/ansible/metis.yml @@ -0,0 +1,12 @@ +--- +- hosts: redis +- hosts: rabbitmq + +- hosts: metis + vars_files: + - group_vars/alpha-metis.yml + roles: + - { role: notify, tags: [ notify ] } + - { role: builder, tags: [ build ] } + - { role: tls-server-ca, ca_dest: "{{ redis_ca_cert_path }}" } + - { role: container_kill_start } diff --git a/ansible/metrics.yml b/ansible/metrics.yml new file mode 100644 index 00000000..bd5771cc --- /dev/null +++ b/ansible/metrics.yml @@ -0,0 +1,4 @@ +--- +- hosts: [docks, api, eru] + roles: + - { role: custom_metrics } diff --git a/ansible/migration-router.yml b/ansible/migration-router.yml new file mode 100644 index 00000000..a62438e7 --- /dev/null +++ b/ansible/migration-router.yml @@ -0,0 +1,4 @@ +--- +- hosts: migration-router + roles: + - { role: nginx } diff --git a/ansible/mongo-navi.yml b/ansible/mongo-navi.yml new file mode 100644 index 00000000..ec9fac7d --- /dev/null +++ b/ansible/mongo-navi.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + +- hosts: mongo-navi + vars_files: + - group_vars/alpha-mongo-navi.yml + roles: + - role: notify + tags: [ notify ] + - { role: database } + - { role: container_kill_start } diff --git a/ansible/mongo.yml b/ansible/mongo.yml new file mode 100644 index 00000000..36ec141b --- /dev/null +++ b/ansible/mongo.yml @@ -0,0 +1,10 @@ +--- +- hosts: mongodb + vars_files: + - group_vars/alpha-mongo.yml + roles: + - { role: notify, tags: [ notify ] } + - { role: database } + - { role: mongo-server, tags: [ tls ] } + - { role: container_kill_start } + - { role: datadog, tags: [ datadog ] } diff --git a/ansible/navi-proxy.yml b/ansible/navi-proxy.yml new file mode 100644 index 00000000..57bf55c5 --- /dev/null +++ b/ansible/navi-proxy.yml @@ -0,0 +1,16 @@ +--- +- hosts: userland + vars_files: + - group_vars/alpha-navi-proxy.yml + roles: + - role: datadog + has_dd_integration: yes + + - role: content-domain-proxy + + - role: container_kill_start + +- include: consul-template-updater.yml + vars: + host: userland + var_file: alpha-navi-proxy.yml diff --git a/ansible/navi.yml b/ansible/navi.yml new file mode 100644 index 00000000..4525e90f --- /dev/null +++ b/ansible/navi.yml @@ -0,0 +1,23 @@ +--- +- hosts: redis +- hosts: detention +- hosts: mongo-navi +- hosts: rabbitmq +- hosts: consul + +- hosts: navi + vars_files: + - group_vars/alpha-navi.yml + roles: + - role: notify + rollbar_token: "{{ navi_rollbar_token }}" + tags: [ notify ] + + - role: builder + tags: [ build ] + + - role: tls-server-ca + ca_dest: "{{ redis_ca_cert_path }}" + + - role: container_start + number_of_containers: "{{ ansible_processor_cores }}" diff --git a/ansible/node-base.yml b/ansible/node-base.yml new file mode 100644 index 00000000..4c638deb --- /dev/null +++ b/ansible/node-base.yml @@ -0,0 +1,6 @@ +--- +- hosts: "{{ target_host }}" + vars_files: + - "group_vars/node-base.yml" + roles: + - { role: builder, tags: "build" } diff --git a/ansible/node-exporter.yml b/ansible/node-exporter.yml new file mode 100644 index 00000000..20a3768f --- /dev/null +++ b/ansible/node-exporter.yml @@ -0,0 +1,7 @@ +--- +- hosts: docks + vars_files: + - "group_vars/alpha-node-exporter.yml" + roles: + - { role: notify, tags: "notify" } + - { role: container_kill_start } diff --git a/ansible/oneOffScripts/add-new-ssh-key.yml b/ansible/oneOffScripts/add-new-ssh-key.yml new file mode 100644 index 00000000..b52024ea --- /dev/null +++ b/ansible/oneOffScripts/add-new-ssh-key.yml @@ -0,0 +1,16 @@ +--- +- name: Rotate SSH Keys + hosts: all + gather_facts: no + vars: + - new_pub_key: "{{ pub_key_path }}.pub" + - new_priv_key: "{{ pub_key_path }}.pem" + + tasks: + - name: Add new key to authorized_keys + authorized_key: key="{{ lookup('file', new_pub_key) }}" + user=ubuntu state=present + + - name: Make use of new private key when connecting + set_fact: ansible_private_ssh_key={{ new_priv_key }} + diff --git a/ansible/oneOffScripts/changeCharonLogLevel.yml b/ansible/oneOffScripts/changeCharonLogLevel.yml new file mode 100644 index 00000000..d6fcdc49 --- /dev/null +++ b/ansible/oneOffScripts/changeCharonLogLevel.yml @@ -0,0 +1,21 @@ +- hosts: docks + gather_facts: no + tasks: + - fail: msg="`log_level` needs to be defined for this role" + when: log_level is not defined + + - name: put log level in charon upstart file + become: yes + lineinfile: + dest: /etc/init/charon.conf + state: present + backup: yes + regexp: ^env LOG_LEVEL=.+$ + insertafter: ^env DATADOG_PORT=[0-9]+$ + line: env LOG_LEVEL={{ log_level }} + + - name: restart charon + become: yes + service: + name: charon + state: restarted diff --git a/ansible/oneOffScripts/killWeaveContainers.yml b/ansible/oneOffScripts/killWeaveContainers.yml new file mode 100644 index 00000000..b43c1826 --- /dev/null +++ b/ansible/oneOffScripts/killWeaveContainers.yml @@ -0,0 +1,7 @@ +- hosts: docks + gather_facts: no + serial: 1 + tasks: + - name: kill weave containers + become: true + shell: 'sudo docker ps | grep weaveworks/weave | cut -d" " -f 1 | xargs sudo docker kill' diff --git a/ansible/oneOffScripts/ntp.yml b/ansible/oneOffScripts/ntp.yml new file mode 100644 index 00000000..74f80749 --- /dev/null +++ b/ansible/oneOffScripts/ntp.yml @@ -0,0 +1,6 @@ +--- +- hosts: all + gather_facts: no + + roles: + - { role: package_ntp, tags "ntp" } diff --git a/ansible/oneOffScripts/ntpForceUpdate.yml b/ansible/oneOffScripts/ntpForceUpdate.yml new file mode 100644 index 00000000..45a92f8f --- /dev/null +++ b/ansible/oneOffScripts/ntpForceUpdate.yml @@ -0,0 +1,10 @@ +--- +- hosts: all + gather_facts: no + tasks: + +# pl test on test box + - name: Froce ntp update + become: true + shell: + "service ntp stop && /usr/sbin/ntpdate pool.ntp.org && service ntp start" diff --git a/ansible/oneOffScripts/pullWeaveImage.yml b/ansible/oneOffScripts/pullWeaveImage.yml new file mode 100644 index 00000000..7633bf89 --- /dev/null +++ b/ansible/oneOffScripts/pullWeaveImage.yml @@ -0,0 +1,6 @@ +- hosts: docks + gather_facts: no + tasks: + - name: pull weave image + become: true + shell: 'sudo docker pull weaveworks/weave:1.4.6' diff --git a/ansible/oneOffScripts/removeDockService.yml b/ansible/oneOffScripts/removeDockService.yml new file mode 100644 index 00000000..5ebd99ab --- /dev/null +++ b/ansible/oneOffScripts/removeDockService.yml @@ -0,0 +1,22 @@ +- hosts: docks + gather_facts: no + tasks: + - fail: msg="`name` needs to be defined for this role" + when: name is not defined + + - name: stop service + become: true + service: + name={{ name }} + state=stopped + + - name: remove files + become: true + file: + path={{ item }} + state=absent + with_items: + - /etc/init/{{ name }}.conf + - /etc/init/{{ name }}.conf.bak + - /etc/init/{{ name }}.override + - /opt/runnable/{{ name }} diff --git a/ansible/oneOffScripts/removeOldRsyslogConfigs.yml b/ansible/oneOffScripts/removeOldRsyslogConfigs.yml new file mode 100644 index 00000000..cdd42858 --- /dev/null +++ b/ansible/oneOffScripts/removeOldRsyslogConfigs.yml @@ -0,0 +1,23 @@ +- hosts: "{{ host }}" + gather_facts: no + tasks: + - fail: msg="`host` needs to be defined for this role" + when: host is not defined + - fail: msg="`name` needs to be defined for this role" + when: name is not defined + + - name: stop service + become: true + service: + name=rsyslog + state=stopped + + - name: remove files + become: true + file: + path="{{ item }}" + state=absent + with_items: + - /etc/rsyslog.d/21-filemonitoring-{{ name }}.conf + - /etc/rsyslog.d/21-rotated-{{ name }}.conf + - /etc/rsyslog.d/15-loggly-{{ name }}.conf diff --git a/ansible/oneOffScripts/reset-registry.sh.j2 b/ansible/oneOffScripts/reset-registry.sh.j2 new file mode 100644 index 00000000..49a2c12a --- /dev/null +++ b/ansible/oneOffScripts/reset-registry.sh.j2 @@ -0,0 +1,29 @@ +#!/bin/bash +# PLAYBOOK DOCUMENTATION: https://github.com/CodeNow/devops-scripts/wiki/Reset-Customer-Local-Registry +set -e + +export DOCK_INIT_BASE="/opt/runnable/dock-init" +export CONSUL_HOSTNAME="{{ consul_host_address }}" +export CONSUL_PORT="8500" +export ORG_ID="$(cat /etc/default/docker | perl -n -e'/org=(\d+)/ && print $1')" +export VAULT_TOKEN="{{ vault_token }}" + +REGISTRY_STATUS=$(curl localhost/debug/health) +if [[ "$REGISTRY_STATUS" == "404 page not found" ]]; then + echo "do not need to fix registry" + exit 0 +fi + +CURRENT_REGISTRY=$(docker ps | awk '/bin\/registry/{ print $1 }') +docker stop -t 5 "$CURRENT_REGISTRY" +docker rm "$CURRENT_REGISTRY" + +source "$DOCK_INIT_BASE/lib/container.sh" + +echo "starting container" +container::_start_registry_container + +sleep 5 + +echo "checking container" +curl localhost/debug/health diff --git a/ansible/oneOffScripts/resetRegistry.yml b/ansible/oneOffScripts/resetRegistry.yml new file mode 100644 index 00000000..44d810bc --- /dev/null +++ b/ansible/oneOffScripts/resetRegistry.yml @@ -0,0 +1,27 @@ +--- +# DOCUMENTATION https://github.com/CodeNow/devops-scripts/wiki/Reset-Customer-Local-Registry +- hosts: consul + +- hosts: docks + gather_facts: no + tasks: + - fail: msg="`vault_token` needs to be defined for this role" + when: vault_token is not defined + + - fail: msg="`consul_host_address` needs to be defined for this role" + when: consul_host_address is not defined + + - name: put script on machine + become: yes + template: + dest: /tmp/reset-registry.sh + src: reset-registry.sh.j2 + mode: 0550 + + - name: run script + become: yes + command: /tmp/reset-registry.sh + + - name: remove script + become: yes + command: shred -u /tmp/reset-registry.sh diff --git a/ansible/oneOffScripts/resetWeave.yml b/ansible/oneOffScripts/resetWeave.yml new file mode 100644 index 00000000..ef8ebe32 --- /dev/null +++ b/ansible/oneOffScripts/resetWeave.yml @@ -0,0 +1,6 @@ +- hosts: docks + gather_facts: no + tasks: + - name: reset weave + become: true + shell: 'sudo weave reset' diff --git a/ansible/oneOffScripts/restartServiceOnAllHosts.yml b/ansible/oneOffScripts/restartServiceOnAllHosts.yml new file mode 100644 index 00000000..f14b9d43 --- /dev/null +++ b/ansible/oneOffScripts/restartServiceOnAllHosts.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + gather_facts: no + tasks: + - fail: msg="`name` needs to be defined for this role" + when: name is not defined + + - name: restart {{ name }} + become: true + service: + name={{ name }} + state=restarted diff --git a/ansible/oneOffScripts/runSingleRole.yml b/ansible/oneOffScripts/runSingleRole.yml new file mode 100644 index 00000000..e8536afe --- /dev/null +++ b/ansible/oneOffScripts/runSingleRole.yml @@ -0,0 +1,5 @@ +--- +- hosts: "{{ host }}" + gather_facts: no + roles: + - { role: "../roles/{{ role }}" } diff --git a/ansible/oneOffScripts/set-ssh-key.yml b/ansible/oneOffScripts/set-ssh-key.yml new file mode 100644 index 00000000..1fa77239 --- /dev/null +++ b/ansible/oneOffScripts/set-ssh-key.yml @@ -0,0 +1,12 @@ +--- +- name: Rotate SSH Keys + hosts: all + gather_facts: no + vars: + - new_pub_key: "{{ pub_key_path }}.pub" + - new_priv_key: "{{ pub_key_path }}.pem" + + tasks: + - name: Make our new key exclusive + authorized_key: key="{{ lookup('file', new_pub_key) }}" + user=ubuntu state=present exclusive=yes diff --git a/ansible/optimus.yml b/ansible/optimus.yml new file mode 100644 index 00000000..ba2d8e33 --- /dev/null +++ b/ansible/optimus.yml @@ -0,0 +1,14 @@ +--- +- hosts: redis +- hosts: consul + +- hosts: optimus + vars_files: + - group_vars/alpha-optimus.yml + roles: + - role: notify + rollbar_token: "{{ optimus_rollbar_token }}" + tags: [ notify ] + - { role: redis_key, tags: [ setup, redis_key ] } + - { role: builder, tags: [ build ] } + - { role: container_start } diff --git a/ansible/package.json b/ansible/package.json new file mode 100644 index 00000000..39adf67a --- /dev/null +++ b/ansible/package.json @@ -0,0 +1,25 @@ +{ + "name": "ansible", + "version": "0.0.1", + "description": "ansible", + "main": "index.js", + "directories": { + "test": "test" + }, + "scripts": { + "test": "mocha test" + }, + "repository": { + "type": "git", + "url": "http://github.com/CodeNow/devops-scripts" + }, + "author": "", + "license": "BSD-2-Clause", + "bugs": { + "url": "https://github.com/CodeNow/devops-scripts/issues" + }, + "homepage": "https://github.com/CodeNow/devops-scripts", + "dependencies": { + "aws-sdk": "^2.1.48" + } +} diff --git a/ansible/palantiri.yml b/ansible/palantiri.yml new file mode 100644 index 00000000..4ac880a3 --- /dev/null +++ b/ansible/palantiri.yml @@ -0,0 +1,15 @@ +--- +- hosts: rabbitmq +- hosts: consul +- hosts: swarm-manager + +- hosts: palantiri + vars_files: + - group_vars/alpha-palantiri.yml + roles: + - role: notify + rollbar_token: "{{ palantiri_rollbar_token }}" + tags: [ notify ] + - { role: docker_client } + - { role: builder, tags: [ build ] } + - { role: container_kill_start } diff --git a/ansible/pheidi.yml b/ansible/pheidi.yml new file mode 100644 index 00000000..3eaa4ff5 --- /dev/null +++ b/ansible/pheidi.yml @@ -0,0 +1,15 @@ +--- +- hosts: consul +- hosts: mongodb +- hosts: rabbitmq + +- hosts: pheidi + vars_files: + - group_vars/alpha-pheidi.yml + roles: + - role: notify + rollbar_token: "{{ pheidi_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [build] } + - { role: tls-client, tls_service: mongodb, tags: [ tls ] } + - { role: container_kill_start } diff --git a/ansible/prometheus-alerts.yml b/ansible/prometheus-alerts.yml new file mode 100644 index 00000000..f325bb0a --- /dev/null +++ b/ansible/prometheus-alerts.yml @@ -0,0 +1,8 @@ +--- +- hosts: prometheus + vars_files: + - "group_vars/alpha-prometheus-alerts.yml" + roles: + - { role: notify, tags: "notify" } + - { role: prometheus-alerts } + - { role: container_kill_start } diff --git a/ansible/prometheus.yml b/ansible/prometheus.yml new file mode 100644 index 00000000..8cc44048 --- /dev/null +++ b/ansible/prometheus.yml @@ -0,0 +1,8 @@ +--- +- hosts: prometheus + vars_files: + - "group_vars/alpha-prometheus.yml" + roles: + - { role: notify, tags: "notify" } + - { role: prometheus } + - { role: container_kill_start } diff --git a/ansible/rabbitmq.yml b/ansible/rabbitmq.yml new file mode 100644 index 00000000..fa3cec4a --- /dev/null +++ b/ansible/rabbitmq.yml @@ -0,0 +1,10 @@ +--- +- hosts: rabbitmq + vars_files: + - "group_vars/alpha-rabbitmq.yml" + roles: + - { role: notify, tags: "notify" } + - { role: database } + - { role: rabbitmq } + - { role: container_kill_start } + - { role: datadog, tags: "datadog" } diff --git a/ansible/redis-slave.yml b/ansible/redis-slave.yml new file mode 100644 index 00000000..e1acede7 --- /dev/null +++ b/ansible/redis-slave.yml @@ -0,0 +1,10 @@ +--- +- hosts: redis +- hosts: redis-slave + vars_files: + - "group_vars/alpha-redis.yml" + roles: + - { role: notify, tags: "notify" } + - { role: database } + - { role: container_kill_start, + container_run_args: "redis-server --slaveof {{ redis_host_address }} 6379" } diff --git a/ansible/redis-tls.yml b/ansible/redis-tls.yml new file mode 100644 index 00000000..8fc2c6f1 --- /dev/null +++ b/ansible/redis-tls.yml @@ -0,0 +1,8 @@ +--- +- hosts: redis + vars_files: + - group_vars/alpha-redis-tls.yml + roles: + - { role: notify, tags: [ notify ] } + - { role: redis-tls } + - { role: container_kill_start } diff --git a/ansible/redis.yml b/ansible/redis.yml new file mode 100644 index 00000000..41f5af90 --- /dev/null +++ b/ansible/redis.yml @@ -0,0 +1,8 @@ +--- +- hosts: redis + vars_files: + - "group_vars/alpha-redis.yml" + roles: + - { role: notify, tags: "notify" } + - { role: database } + - { role: container_kill_start } diff --git a/ansible/registrator-api.yml b/ansible/registrator-api.yml new file mode 100644 index 00000000..030fce43 --- /dev/null +++ b/ansible/registrator-api.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + +- hosts: api + vars_files: + - group_vars/alpha-registrator.yml + roles: + - role: notify + tags: notify + + - role: container_kill_start diff --git a/ansible/registrator-navi.yml b/ansible/registrator-navi.yml new file mode 100644 index 00000000..04b58726 --- /dev/null +++ b/ansible/registrator-navi.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + +- hosts: navi + vars_files: + - group_vars/alpha-registrator.yml + roles: + - role: notify + tags: notify + + - role: container_kill_start diff --git a/ansible/registry.yml b/ansible/registry.yml new file mode 100644 index 00000000..aa2d95e2 --- /dev/null +++ b/ansible/registry.yml @@ -0,0 +1,6 @@ +--- +- hosts: registry + vars_files: + - "group_vars/alpha-registry.yml" + roles: + - { role: container_kill_start } diff --git a/ansible/roles/apt_update/tasks/main.yml b/ansible/roles/apt_update/tasks/main.yml new file mode 100644 index 00000000..36fd4b1f --- /dev/null +++ b/ansible/roles/apt_update/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: update cache for apt repository + become: true + apt: + update_cache=yes diff --git a/ansible/roles/apt_upgrade/tasks/main.yml b/ansible/roles/apt_upgrade/tasks/main.yml new file mode 100644 index 00000000..77cd827e --- /dev/null +++ b/ansible/roles/apt_upgrade/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: upgrade a server + become: true + apt: + upgrade=full diff --git a/ansible/roles/base_ubuntu/README.md b/ansible/roles/base_ubuntu/README.md new file mode 100644 index 00000000..92ca70fa --- /dev/null +++ b/ansible/roles/base_ubuntu/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to Install ubuntu base deps + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/base_ubuntu/tasks/main.yml b/ansible/roles/base_ubuntu/tasks/main.yml new file mode 100644 index 00000000..ee624a85 --- /dev/null +++ b/ansible/roles/base_ubuntu/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: ensure registry.runnable in /etc/hosts + become: true + when: dock is not defined + lineinfile: + dest=/etc/hosts + line="{{ registry_host }} registry.runnable.com" + state=present + regexp=".+ registry\.runnable\.com" diff --git a/ansible/roles/bash_aliases/tasks/main.yml b/ansible/roles/bash_aliases/tasks/main.yml new file mode 100644 index 00000000..cbd9b465 --- /dev/null +++ b/ansible/roles/bash_aliases/tasks/main.yml @@ -0,0 +1,9 @@ +--- +- name: copy bash aliases to ubuntu + tags: [ loggly, bash_aliases ] + template: + src=dot_bash_aliases.sh.j2 + dest=/home/ubuntu/.bash_aliases + owner=ubuntu + group=ubuntu + mode=0700 diff --git a/ansible/roles/bash_aliases/templates/dot_bash_aliases.sh.j2 b/ansible/roles/bash_aliases/templates/dot_bash_aliases.sh.j2 new file mode 100644 index 00000000..84eb9c94 --- /dev/null +++ b/ansible/roles/bash_aliases/templates/dot_bash_aliases.sh.j2 @@ -0,0 +1,51 @@ +# Follows the logfile for a given app_name interpolating the datetime string into the logpath (/var/log/runnable/YYYY/MM/DD/HH/.log) +# Usage: logtail + +logpath() { + local app_name="$1" + local datetime=`date +%Y/%m/%d/%H` + local app_log_dir="{{ app_log_dir }}" + echo "${app_log_dir}/${datetime}/${app_name}.log" +} + +logtail() { + local app_name="$1" + tail -f "$(logpath ${app_name})" | bunyan +} + +# Follows the logfile for a given app_name interpolating the datetime string into the logpath (/var/log/runnable/YYYY/MM/DD/HH/.log) +# Usage: lograw +lograw() { + local app_name="$1" + tail -f "$(logpath ${app_name})" +} + +# Just display the last few lines of a log (2nd arg specifies # of lines or tail default if left blank) +# Usage: loglast [ <#_of_lines> ] +loglast() { + local app_name="$1" + local tailopts="" + # do not check contents of $2, just if exists, escape jinja2 keyword. + if [ 2 -eq "{{ '${#}' }}" ] ; then + tailopts="-${2}" + fi + tail "${tailopts}" "$(logpath ${app_name})" | bunyan +} + +# Follow a log, grep for "${regexp}" +# Usage: greplog +greplog() { + local app_name="$1" + local regexp="" + # again, not checking arg2, just making sure it exists, and espace jinja2 keyword. + if [ 2 -eq "{{ '${#}' }}" ] ; then + regexp="${2}" + fi + tail -f "$(logpath ${app_name})" | grep "${regexp}" | bunyan +} + +# Outputs contents of an npm start log for , if it exists, into a pager for reading. +# Usage: npmlog +npmlog() { + less "$(logpath ${app_name})" +} diff --git a/ansible/roles/bastion_sshd/tasks/main.yml b/ansible/roles/bastion_sshd/tasks/main.yml new file mode 100644 index 00000000..5c3c80d8 --- /dev/null +++ b/ansible/roles/bastion_sshd/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Install Bastion SSHd Configuration + tags: bastion-sshd + become: yes + template: + src=sshd_config.j2 + dest=/etc/ssh/sshd_config + owner=root + group=root + mode=444 + +- name: Restart SSHd + tags: bastion-sshd + become: yes + service: + name=ssh + state=restarted diff --git a/ansible/roles/bastion_sshd/templates/sshd_config.j2 b/ansible/roles/bastion_sshd/templates/sshd_config.j2 new file mode 100644 index 00000000..6620a031 --- /dev/null +++ b/ansible/roles/bastion_sshd/templates/sshd_config.j2 @@ -0,0 +1,90 @@ +# Package generated configuration file +# See the sshd_config(5) manpage for details + +# What ports, IPs and protocols we listen for +# - always use 22 as a backup, block via SG +Port 22 +Port {{ bastion_sshd_port }} +# Use these options to restrict which interfaces/protocols sshd will bind to +#ListenAddress :: +#ListenAddress 0.0.0.0 +Protocol 2 +# HostKeys for protocol version 2 +HostKey /etc/ssh/ssh_host_rsa_key +HostKey /etc/ssh/ssh_host_dsa_key +HostKey /etc/ssh/ssh_host_ecdsa_key +HostKey /etc/ssh/ssh_host_ed25519_key +#Privilege Separation is turned on for security +UsePrivilegeSeparation yes + +# Lifetime and size of ephemeral version 1 server key +KeyRegenerationInterval 3600 +ServerKeyBits 1024 + +# Logging +SyslogFacility AUTH +LogLevel INFO + +# Authentication: +LoginGraceTime 120 +PermitRootLogin no +StrictModes yes + +RSAAuthentication yes +PubkeyAuthentication yes +#AuthorizedKeysFile %h/.ssh/authorized_keys + +# Don't read the user's ~/.rhosts and ~/.shosts files +IgnoreRhosts yes +# For this to work you will also need host keys in /etc/ssh_known_hosts +RhostsRSAAuthentication no +# similar for protocol version 2 +HostbasedAuthentication no +# Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication +#IgnoreUserKnownHosts yes + +# To enable empty passwords, change to yes (NOT RECOMMENDED) +PermitEmptyPasswords no + +# Change to yes to enable challenge-response passwords (beware issues with +# some PAM modules and threads) +ChallengeResponseAuthentication no + +# Change to no to disable tunnelled clear text passwords +PasswordAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosGetAFSToken no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes + +# GSSAPI options +#GSSAPIAuthentication no +#GSSAPICleanupCredentials yes + +X11Forwarding yes +X11DisplayOffset 10 +PrintMotd no +PrintLastLog yes +TCPKeepAlive yes +#UseLogin no + +#MaxStartups 10:30:60 +#Banner /etc/issue.net + +# Allow client to pass locale environment variables +AcceptEnv LANG LC_* + +Subsystem sftp /usr/lib/openssh/sftp-server + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +UsePAM yes diff --git a/ansible/roles/build_essential/tasks/main.yml b/ansible/roles/build_essential/tasks/main.yml new file mode 100644 index 00000000..852c4429 --- /dev/null +++ b/ansible/roles/build_essential/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: install build essentials + become: yes + action: apt + pkg=build-essential + state=latest + update_cache=yes + cache_valid_time=604800 diff --git a/ansible/roles/build_with_dockerfile/defaults/main.yml b/ansible/roles/build_with_dockerfile/defaults/main.yml new file mode 100644 index 00000000..90b7599d --- /dev/null +++ b/ansible/roles/build_with_dockerfile/defaults/main.yml @@ -0,0 +1,2 @@ +--- +build_dir: /opts/builds/docker_build diff --git a/ansible/roles/build_with_dockerfile/meta/main.yml b/ansible/roles/build_with_dockerfile/meta/main.yml new file mode 100644 index 00000000..4a1f6483 --- /dev/null +++ b/ansible/roles/build_with_dockerfile/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: docker } diff --git a/ansible/roles/build_with_dockerfile/tasks/main.yml b/ansible/roles/build_with_dockerfile/tasks/main.yml new file mode 100644 index 00000000..d61c1d15 --- /dev/null +++ b/ansible/roles/build_with_dockerfile/tasks/main.yml @@ -0,0 +1,33 @@ +--- +# commands to build an image from a repo that contains a Dockerfile +- name: Ensure Tag Deploy For Prod + tags: deploy + when: not git_branch | match("^v([0-9]+)\.([0-9]+)\.([0-9]+)$") and node_env=="production-delta" + fail: msg="only tag can be deployed on prod not {{ git_branch }}" + +- name: create build folder + become: true + file: + path: "{{ build_dir }}/{{ name }}" + state: directory + +- name: pull the git repository + tags: deploy + become: true + git: + repo: "{{ repo }}" + dest: "{{ build_dir }}/{{ name }}/repo" + version: "{{ git_branch }}" + update: yes + accept_hostkey: yes + force: yes + +- name: build docker image and tag + tags: deploy + become: yes + command: docker build {{ build_args | default("") }} --tag="{{ container_image }}:{{ container_tag }}" "{{ build_dir }}/{{ name }}/repo" + +- name: push docker image + become: yes + when: not do_not_push + command: docker push {{ container_image }}:{{ container_tag }} diff --git a/ansible/roles/builder/defaults/main.yml b/ansible/roles/builder/defaults/main.yml new file mode 100644 index 00000000..ad8e029c --- /dev/null +++ b/ansible/roles/builder/defaults/main.yml @@ -0,0 +1,3 @@ +--- +build_dir: /opts/builds/docker_build +npm_start_command: start diff --git a/ansible/roles/builder/meta/main.yml b/ansible/roles/builder/meta/main.yml new file mode 100644 index 00000000..4a1f6483 --- /dev/null +++ b/ansible/roles/builder/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: docker } diff --git a/ansible/roles/builder/tasks/main.yml b/ansible/roles/builder/tasks/main.yml new file mode 100644 index 00000000..b747286c --- /dev/null +++ b/ansible/roles/builder/tasks/main.yml @@ -0,0 +1,61 @@ +--- +# commands to build an image +# +- name: Ensure Tag Deploy For Prod + tags: deploy + when: not git_branch | match("^v([0-9]+)\.([0-9]+)\.([0-9]+)$") and node_env=="production-delta" + fail: msg="only tag can be deployed on prod not {{ git_branch }}" + +- name: create build folder + become: true + file: + path: "{{ build_dir }}/{{ name }}" + state: directory + +- name: pull the git repository + tags: deploy + become: true + git: + repo: "{{ repo }}" + dest: "{{ build_dir }}/{{ name }}/repo" + version: "{{ git_branch }}" + update: yes + accept_hostkey: yes + force: yes + +- name: get new tags from remote + tags: deploy + become: true + shell: "git fetch --tags" + args: + chdir: "{{ build_dir }}/{{ name }}/repo" + +- name: get latest tag name + tags: deploy + become: true + shell: "git describe --tags `git rev-list --tags --max-count=1`" + args: + chdir: "{{ build_dir }}/{{ name }}/repo" + register: latest_tag + +- name: ensure latest tag is deployed + tags: deploy + fail: msg="Cannot deploy {{ git_branch }} because latest is {{latest_tag.stdout}}. Bypass with `-t i_am_deploying_an_old_tag`" + when: node_env=="production-delta" and latest_tag.stdout != git_branch and i_am_deploying_an_old_tag is not defined + +- name: copy dockerfile to build folder + tags: deploy + become: true + template: + src: "{{ dockerfile }}" + dest: "{{ build_dir }}/{{ name }}" + +- name: build docker image and tag + tags: deploy + become: yes + command: docker build {{ build_args | default("") }} --tag="{{ container_image }}:{{ container_tag }}" "{{ build_dir }}/{{ name }}" + +- name: push docker image + become: yes + when: not do_not_push + command: docker push {{ container_image }}:{{ container_tag }} diff --git a/ansible/roles/builder/templates/basic_node/Dockerfile b/ansible/roles/builder/templates/basic_node/Dockerfile new file mode 100644 index 00000000..594ddb5f --- /dev/null +++ b/ansible/roles/builder/templates/basic_node/Dockerfile @@ -0,0 +1,48 @@ +FROM registry.runnable.com/runnable/{{ base_dockerfile }}:latest + +{% if hosted_ports is defined %} +# Expose port to Host +EXPOSE {% for hosted_port in hosted_ports %}{{ hosted_port }} {% endfor %} +{% endif %} + +{% if dockerfile_enviroment is defined %} +# Envs +{% for env in dockerfile_enviroment %} +ENV {{ env }} +{% endfor %} +{% endif %} + +# setup node and npm versions +RUN npm install -g n@2.1.0 +RUN n {{ node_version }} && npm install -g npm@{{ npm_version }} + +{% if dockerfile_pre_install_commands is defined %} +{% for command in dockerfile_pre_install_commands %} +RUN {{ command }} +{% endfor %} +{% endif %} + +# Add package.json from the current build context (`.` is the repo) second +ADD ./repo/package.json /{{ name }}/package.json + +# Add shrinkwrap from the current build context (`.` is the repo) first +# If we change a non first-level dependency, this will break cache as expected +{% if has_shrinkwrap is defined %} +ADD ./repo/npm-shrinkwrap.json /{{ name }}/npm-shrinkwrap.json +{% endif %} + +# install, should will skip if no package.json change +WORKDIR /{{ name }} +RUN npm install --production + +# move the current build context (`.` is the repo) to /{{ name }} +ADD ./repo /{{ name }} + +{% if dockerfile_post_install_commands is defined %} +{% for command in dockerfile_post_install_commands %} +RUN {{ command }} +{% endfor %} +{% endif %} + +# Define default command. +CMD ulimit -c unlimited && /usr/local/bin/npm {{ npm_start_command | default('start') }} diff --git a/ansible/roles/builder/templates/node_base/Dockerfile b/ansible/roles/builder/templates/node_base/Dockerfile new file mode 100644 index 00000000..a8501822 --- /dev/null +++ b/ansible/roles/builder/templates/node_base/Dockerfile @@ -0,0 +1,28 @@ +# +# node_base Dockerfile +# +# Pull base image. +FROM registry.runnable.com/runnable/base:latest + +# Node.js @ 0.10.28 +# npm @ 2.1.18 +# curl +# lsof +# sass +# ruby + +WORKDIR / + +ENV PATH=./node_modules/.bin:$PATH +# HACK FOR SASS +ENV LC_ALL en_US.UTF-8 + +RUN apt-get update && \ + apt-get install -y curl && \ + curl -sL https://deb.nodesource.com/setup | sudo bash - && \ + apt-get install -y nodejs lsof ruby && \ + locale-gen en_US.UTF-8 && \ + gem install sass && \ + npm install -g n && \ + n 0.10.28 && \ + npm install -g npm@2.1.18 diff --git a/ansible/roles/consul-services/tasks/main.yml b/ansible/roles/consul-services/tasks/main.yml new file mode 100644 index 00000000..26458199 --- /dev/null +++ b/ansible/roles/consul-services/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: make /etc/consul.d folder + become: true + file: + path=/etc/consul.d + state=directory + +- name: remove all current configs + become: true + shell: rm -f /etc/consul.d/*.json + +- name: put service files in place + become: true + run_once: true + template: + dest=/etc/consul.d/{{ item.name }}.json + src=service.json + with_items: + - name: 'datadog' + host_address: '{{ datadog_host_address }}' + tags: ['master'] + port: '{{ datadog_port }}' + - name: 'rabbitmq' + host_address: '{{ rabbit_host_address }}' + tags: ['master'] + port: '{{ rabbit_port }}' + - name: 'redis' + host_address: '{{ redis_host_address }}' + tags: ['master'] + port: '{{ redis_port }}' + - name: 'registry' + host_address: '{{ registry_host }}' + tags: ['master'] + port: '{{ registry_port }}' + +- name: send consul SIGUP to reload services + become: true + shell: pkill --signal SIGHUP consul diff --git a/ansible/roles/consul-services/templates/service.json b/ansible/roles/consul-services/templates/service.json new file mode 100644 index 00000000..c875e3c5 --- /dev/null +++ b/ansible/roles/consul-services/templates/service.json @@ -0,0 +1,8 @@ +{ + "service": { + "name": "{{ item.name }}", + "tags": [ "{{ item.tags | join('","') }}" ], + "address": "{{ item.host_address }}", + "port": {{ item.port }} + } +} diff --git a/ansible/roles/consul/meta/main.yml b/ansible/roles/consul/meta/main.yml new file mode 100644 index 00000000..bb817eb0 --- /dev/null +++ b/ansible/roles/consul/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: tls-server-cert } diff --git a/ansible/roles/consul/tasks/main.yml b/ansible/roles/consul/tasks/main.yml new file mode 100644 index 00000000..d759b59c --- /dev/null +++ b/ansible/roles/consul/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: create configuration directory + become: yes + file: + path: /opt/runnable/consul + state: directory + recurse: yes + +- name: create server tls certificate directory + become: yes + file: + path: /opt/consul/server + state: directory + recurse: yes + +- name: install server certificates (3) + become: yes + copy: + content: "{{ item.content }}" + dest: /opt/consul/server/{{ item.file }} + mode: 0400 + owner: root + group: root + with_items: + - file: ca.pem + content: "{{ new_certs.data.issuing_ca }}" + - file: cert.pem + content: "{{ new_certs.data.certificate }}" + - file: key.pem + content: "{{ new_certs.data.private_key }}" + +- name: copy vault config + become: yes + template: + src: consul.json.j2 + dest: /opt/runnable/consul/consul.json + +- name: add datadog monitoring + become: true + tags: datadog + template: + src: datadog-consul.yaml.j2 + dest: /etc/dd-agent/conf.d/consul.yaml + mode: 0444 + owner: root + group: root + +- name: restart datadog agent + become: true + tags: datadog + service: + name: datadog-agent + state: restarted diff --git a/ansible/roles/consul/templates/consul.json.j2 b/ansible/roles/consul/templates/consul.json.j2 new file mode 100644 index 00000000..5516b137 --- /dev/null +++ b/ansible/roles/consul/templates/consul.json.j2 @@ -0,0 +1,32 @@ +{ + "advertise_addr": "{{ ansible_default_ipv4.address }}", + "bind_addr": "0.0.0.0", + {% if consul_host_address == ansible_default_ipv4.address %} + "bootstrap_expect": {{ groups['consul'] | length }}, + {% endif %} + "ca_file": "/opt/consul/server/ca.pem", + "cert_file": "/opt/consul/server/cert.pem", + "client_addr": "0.0.0.0", + "data_dir": "/data", + "key_file": "/opt/consul/server/key.pem", + "log_level": "info", + "node_name": "{{ inventory_hostname }}", + "ports": { + "https": {{ consul_https_port }} + }, + "recursors": [ + "8.8.8.8" + ], + "dogstatsd_addr": "{{ ansible_default_ipv4.address }}:{{ datadog_port }}", + {% if consul_host_address != ansible_default_ipv4.address %} + "retry_join": [ + "{{ consul_host_address }}" + ], + {% endif %} + "server": true, + {% if consul_host_address == ansible_default_ipv4.address %} + "ui_dir": "/ui", + {% endif %} + "verify_incoming": true, + "verify_outgoing": true +} diff --git a/ansible/roles/consul/templates/datadog-consul.yaml.j2 b/ansible/roles/consul/templates/datadog-consul.yaml.j2 new file mode 100644 index 00000000..6ac41458 --- /dev/null +++ b/ansible/roles/consul/templates/datadog-consul.yaml.j2 @@ -0,0 +1,22 @@ +init_config: + +instances: + # Where your Consul HTTP Server Lives + - url: http://{{ ansible_default_ipv4.address }}:8500 + + # Whether to perform checks against the Consul service Catalog + # catalog_checks: yes + + # Whether to enable new leader checks from this agent + # Note: if this is set on multiple agents in the same cluster + # you will receive one event per leader change per agent + new_leader_checks: {% if consul_host_address == ansible_default_ipv4.address %}yes{% else %}no{% endif %} + + # Services to restrict catalog querying to + # The default settings query up to 50 services. So if you have more than + # this many in your Consul service catalog, you will want to fill in the + # whitelist + # service_whitelist: + # - zookeeper + # - haproxy + # - redis diff --git a/ansible/roles/consul_value/tasks/main.yml b/ansible/roles/consul_value/tasks/main.yml new file mode 100644 index 00000000..e750812f --- /dev/null +++ b/ansible/roles/consul_value/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Debug role + when: debug_info is defined + debug: + msg="Consul Server {{ consul_host_address }}:{{ consul_api_port }}" + +- name: make sure httplib2 is installed + run_once: true + become: true + apt: + package=python-httplib2 + state=present + update_cache=yes + cache_valid_time=604800 + +- name: put values into consul + tags: deploy + run_once: true + when: consul_host_address is defined and consul_api_port is defined and consul_values is defined and dock is not defined + uri: + method=PUT + url=http://{{ consul_host_address }}:{{ consul_api_port }}/v1/kv/{{ item.key }} + body="{{ item.value }}" + with_items: "{{ consul_values }}" diff --git a/ansible/roles/container_kill_start/README.md b/ansible/roles/container_kill_start/README.md new file mode 100644 index 00000000..85ed4801 --- /dev/null +++ b/ansible/roles/container_kill_start/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to Install base_centos deps + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/container_kill_start/defaults/main.yml b/ansible/roles/container_kill_start/defaults/main.yml new file mode 100644 index 00000000..9701f0e1 --- /dev/null +++ b/ansible/roles/container_kill_start/defaults/main.yml @@ -0,0 +1,2 @@ +--- +container_tag: latest diff --git a/ansible/roles/container_kill_start/files/findTagRunning.sh b/ansible/roles/container_kill_start/files/findTagRunning.sh new file mode 100755 index 00000000..1cfaee40 --- /dev/null +++ b/ansible/roles/container_kill_start/files/findTagRunning.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +IMAGE_NAME="$1" +CONTAINERS=`docker ps -a | grep -v '^CONTAINER' | awk '{print $1}'` + +if [ "" = "${CONTAINERS}" ] ; then + exit 0 +else + for container in ${CONTAINERS} ; do + docker inspect "${container}" 2>/dev/null| grep -q '"Image": "'"${IMAGE_NAME}": > /dev/null 2>&1 + if [ ${?} -eq 0 ] ; then + if [ -z "${RUNNING_CONTAINERS}" ] ; then + RUNNING_CONTAINERS="${container}" + else + RUNNING_CONTAINERS="${RUNNING_CONTAINERS} ${container}" + fi + fi + done +fi + +if [ ! -z "${RUNNING_CONTAINERS}" ] ; then + echo "${RUNNING_CONTAINERS//['\t\r\n']}" +fi diff --git a/ansible/roles/container_kill_start/files/normalize.sh b/ansible/roles/container_kill_start/files/normalize.sh new file mode 100755 index 00000000..d1a68a0f --- /dev/null +++ b/ansible/roles/container_kill_start/files/normalize.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# $1 should be image id to keep $2 = image name +for CONT in `sudo docker ps --no-trunc -q ` +do + IMAGE_NAME=`sudo docker inspect $CONT | grep Image | grep $2` + if [[ $IMAGE_NAME ]]; then + IMAGE_ID=`sudo docker inspect $CONT | grep Image | grep -v $2 | awk -F\" '{print $4}'` + if [[ "$IMAGE_ID" != "$1" ]]; then + echo "kill" + sudo docker kill $CONT + sudo docker rm $CONT + fi + fi +done \ No newline at end of file diff --git a/ansible/roles/container_kill_start/handlers/main.yml b/ansible/roles/container_kill_start/handlers/main.yml new file mode 100644 index 00000000..5572ad47 --- /dev/null +++ b/ansible/roles/container_kill_start/handlers/main.yml @@ -0,0 +1,14 @@ +--- +- name: get new container ports + tags: deploy + when: hosted_ports is defined + become: yes + shell: docker port {{new_container_id.stdout}} {{ hosted_ports[0] }} | awk --field-separator ':' '{print $2}' + register: container_ports + +# this assumes only one container is running, ever +- name: update redis key + tags: deploy + when: container_ports is defined and is_redis_update_required is defined + become: yes + command: docker run --rm redis redis-cli -h {{redis_host_address}} lset {{redis_key}} 1 {{hosted_protocol | default('http') }}://{{ansible_default_ipv4.address}}:{{container_ports.stdout}} diff --git a/ansible/roles/container_kill_start/meta/main.yml b/ansible/roles/container_kill_start/meta/main.yml new file mode 100644 index 00000000..d24b245d --- /dev/null +++ b/ansible/roles/container_kill_start/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: docker, tags: docker } + - { role: loggly } diff --git a/ansible/roles/container_kill_start/tasks/main.yml b/ansible/roles/container_kill_start/tasks/main.yml new file mode 100644 index 00000000..aba821e9 --- /dev/null +++ b/ansible/roles/container_kill_start/tasks/main.yml @@ -0,0 +1,85 @@ +- name: look for stopped {{ container_image }} containers + tags: deploy + become: yes + shell: > + docker ps -a | + grep Exited | + grep {{ container_image }} | + awk '{print $1}' + register: stopped_containers + changed_when: stopped_containers.stdout != '' + +- name: removed stopped {{ container_image }} containers + tags: deploy + become: yes + when: stopped_containers.changed + shell: docker rm {{ item }} + with_items: "{{ stopped_containers.stdout_lines }}" + +# get current running container with this image +- name: look for running containers running {{ container_image }} + tags: deploy + become: true + script: findTagRunning.sh {{ container_image }} + register: old_containers_id + changed_when: old_containers_id.stdout | length > 4 + +# get latest image +# tag will default to latest +- name: pull down docker image {{ container_image }}:{{ container_tag }} + tags: deploy + become: yes + command: docker pull {{ container_image }}:{{ container_tag }} + when: not do_not_push + +# get id of new image +- name: get id of the docker image + tags: deploy + become: yes + shell: > + docker images + --no-trunc | + grep {{ container_image }}.*{{ container_tag }} | + awk '{print $3}' + register: new_image_id + +# stop the old containers that were running this previously +- name: stop old containers + tags: deploy + when: old_containers_id.changed + become: yes + command: docker stop -t {{ stop_time }} {{ old_containers_id.stdout }} + +# remove the old containers that were running previously +- name: remove old containers + tags: deploy + when: old_containers_id.changed + become: yes + command: docker rm {{ old_containers_id.stdout }} + +- name: pause to allow for any odd conditions in system caching tables + tags: deploy + when: pause_length_minutes is defined and old_containers_id.changed + pause: minutes={{ pause_length_minutes }} + +# start our new container with options and args +- name: start container + tags: deploy + become: yes + command: | + docker run \ + --memory {{ memory_hard_limit | default ( "1g" ) }} \ + --log-driver={{ log_driver }} \ + {% if log_driver == "syslog" %} + --log-opt syslog-facility={{ log_facility }} \ + --log-opt tag="{{ log_tag | default ( name ) }}" \ + {% endif %} + -v {{ app_log_dir }}:{{ app_log_dir }}:rw \ + --restart={{ restart_policy | default('always') }} \ + {{ container_run_opts | trim }} \ + {{ container_image }}:{{ container_tag }} \ + {{ container_run_args | trim }} + register: new_container_id + notify: + - get new container ports + - update redis key diff --git a/ansible/roles/container_restart/handlers/main.yml b/ansible/roles/container_restart/handlers/main.yml new file mode 100644 index 00000000..c576eed5 --- /dev/null +++ b/ansible/roles/container_restart/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: clean up stopped containers + tags: [ deploy ] + become: true + shell: docker ps --filter status=exited -q | xargs docker rm diff --git a/ansible/roles/container_restart/meta/main.yml b/ansible/roles/container_restart/meta/main.yml new file mode 100644 index 00000000..cb7d8e04 --- /dev/null +++ b/ansible/roles/container_restart/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - role: docker diff --git a/ansible/roles/container_restart/tasks/main.yml b/ansible/roles/container_restart/tasks/main.yml new file mode 100644 index 00000000..200e57fe --- /dev/null +++ b/ansible/roles/container_restart/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: look for current container + become: yes + shell: docker ps | grep {{ docker_image }} || true + register: running_containers + changed_when: "running_containers.stdout_lines | length > 0" + tags: [ deploy ] + +- name: get version of running container + when: running_containers.changed + set_fact: + running_container_version: "{{ running_containers.stdout_lines[0] | split_regex('\\s+') | split_get_index(1) | split_regex(':') | split_get_index(1) }}" + running_container_id: "{{ running_containers.stdout_lines[0] | split_regex('\\s+') | split_get_index(0) }}" + tags: [ deploy ] + +- name: restart container if it is the same version + when: running_containers.changed and running_container_version == docker_image_version + become: yes + shell: docker {{ docker_restart_command | default('restart') }} {{ running_container_id }} + register: restarted_container + changed_when: "restarted_container.rc == 0" + tags: [ deploy ] + +- name: stop old container + when: force_stop is defined and running_container_id != '' or (running_containers.changed and running_container_version != docker_image_version) + become: yes + shell: docker stop {{ running_container_id }} + register: stopped_container + tags: [ deploy ] + notify: + - clean up stopped containers + +- name: start new container + when: force_stop is defined or (stopped_container.skipped and not running_containers.changed) + tags: [ deploy ] + become: yes + shell: docker run {{ docker_container_run_opts | trim }} {{ docker_image }}:{{ docker_image_version }} diff --git a/ansible/roles/container_start/README.md b/ansible/roles/container_start/README.md new file mode 100644 index 00000000..85ed4801 --- /dev/null +++ b/ansible/roles/container_start/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to Install base_centos deps + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/container_start/files/findTagRunning.sh b/ansible/roles/container_start/files/findTagRunning.sh new file mode 100755 index 00000000..5f7f98a0 --- /dev/null +++ b/ansible/roles/container_start/files/findTagRunning.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +IMAGE_NAME="$1" +CONTAINERS=`docker ps | grep -v '^CONTAINER' | awk '{print $1}'` + +if [ "" = "${CONTAINERS}" ] ; then + exit 0 +else + for container in ${CONTAINERS} ; do + docker inspect "${container}" | grep -q '"Image": "'"${IMAGE_NAME}": + if [ ${?} -eq 0 ] ; then + if [ -z "${RUNNING_CONTAINERS}" ] ; then + RUNNING_CONTAINERS="${container}" + else + RUNNING_CONTAINERS="${RUNNING_CONTAINERS} ${container}" + fi + fi + done +fi + +if [ ! -z "${RUNNING_CONTAINERS}" ] ; then + echo "${RUNNING_CONTAINERS}" +fi diff --git a/ansible/roles/container_start/files/normalize.sh b/ansible/roles/container_start/files/normalize.sh new file mode 100755 index 00000000..ac5521c4 --- /dev/null +++ b/ansible/roles/container_start/files/normalize.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# $1 should be image id to keep $2 = image name +echo "ARGS" $1 $2 +for CONT in `sudo docker ps --no-trunc -q ` +do + IMAGE_NAME=`sudo docker inspect $CONT | grep Image | grep $2` + if [[ $IMAGE_NAME ]]; then + echo "image found $IMAGE_NAME" + IMAGE_ID=`sudo docker inspect $CONT | grep Image | grep -v $2 | awk -F\" '{print $4}'` + if [[ "$IMAGE_ID" != "$1" ]]; then + echo "stoping $IMAGE_ID does not match $1" + sudo docker kill $CONT + fi + fi +done \ No newline at end of file diff --git a/ansible/roles/container_start/handlers/main.yml b/ansible/roles/container_start/handlers/main.yml new file mode 100644 index 00000000..604c0d6f --- /dev/null +++ b/ansible/roles/container_start/handlers/main.yml @@ -0,0 +1,27 @@ +--- +- name: get new container ports + tags: deploy + when: hosted_ports is defined + become: yes + shell: docker port {{ item }} {{ hosted_ports[0] }} | awk --field-separator ':' '{ print $2 }' + register: container_ports + with_items: "{{ new_container_ids.stdout_lines }}" + +- name: remove all hosts from redis key + tags: deploy + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} ltrim {{ redis_key }} 0 0 + when: is_redis_update_required is defined and container_ports is defined + +- name: put new hosts on redis keys + tags: deploy + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} rpush {{ redis_key }} {{ hosted_protocol | default('http') }}://{{ ansible_default_ipv4.address }}:{{ item.stdout }} + when: is_redis_update_required is defined and container_ports is defined + with_items: "{{ container_ports.results }}" + +- name: stop old containers + tags: deploy + become: yes + command: docker stop --time={{ stop_time }} {{ item }} + with_items: "{{ old_containers_ids.stdout_lines }}" diff --git a/ansible/roles/container_start/meta/main.yml b/ansible/roles/container_start/meta/main.yml new file mode 100644 index 00000000..9bed9e9e --- /dev/null +++ b/ansible/roles/container_start/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: docker } + - { role: loggly } \ No newline at end of file diff --git a/ansible/roles/container_start/tasks/main.yml b/ansible/roles/container_start/tasks/main.yml new file mode 100644 index 00000000..4559a955 --- /dev/null +++ b/ansible/roles/container_start/tasks/main.yml @@ -0,0 +1,64 @@ +--- +# get current running container with this image +- name: "look for running containers running {{ container_image }}" + tags: deploy + become: true + script: findTagRunning.sh {{ container_image }} + register: old_containers_ids + changed_when: old_containers_ids.stdout != '' + +- name: get latest images + tags: deploy + when: not do_not_push + become: yes + command: docker pull {{ container_image }}:{{ container_tag }} + +- name: get id of latest image + tags: deploy + become: yes + shell: > + docker images --no-trunc | + grep {{ container_image }}.*{{ container_tag }} | + awk '{print $3}' + register: new_image_id + +- name: default number of containers + tags: deploy + when: number_of_containers is undefined + set_fact: + number_of_containers: 1 + +- name: print number of contaienrs + tags: deploy + debug: + msg: starting this many containers -- {{ number_of_containers }} + +- name: start new container + tags: deploy + become: yes + shell: | + for i in {{ '{' }}1..{{ number_of_containers }}{{ '}' }}; do + docker run \ + {% if memory_hard_limit is defined %} + --memory {{ memory_hard_limit }} \ + {% endif %} + --log-driver={{ log_driver }} \ + {% if log_driver == "syslog" %} + --log-opt syslog-facility={{ log_facility }} \ + --log-opt tag="{{ log_tag | default ( name ) }}" \ + {% endif %} + -v {{ app_log_dir }}:{{ app_log_dir }}:rw \ + -v /dev/log:/dev/log:rw \ + --restart={{ restart_policy | default('no') }} \ + {{ container_run_opts | trim }} \ + {{ container_image }}:{{ container_tag }} \ + {{ container_run_args | trim }} + done + args: + executable: /bin/bash + register: new_container_ids + notify: + - get new container ports + - remove all hosts from redis key + - put new hosts on redis keys + - stop old containers diff --git a/ansible/roles/content-domain-proxy/tasks/main.yml b/ansible/roles/content-domain-proxy/tasks/main.yml new file mode 100644 index 00000000..808e65c5 --- /dev/null +++ b/ansible/roles/content-domain-proxy/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: make sure cert directory is in place + tags: [ configure_proxy, certs ] + become: true + file: + dest: /etc/ssl/certs/{{ user_content_domain }} + state: directory + +- name: put certs in place + tags: [ configure_proxy, certs ] + become: true + register: add_certs + copy: + src: "{{ user_content_domain }}/{{ item }}" + dest: /etc/ssl/certs/{{ user_content_domain }}/{{ item }} + mode: 0400 + owner: root + group: root + with_items: + - ca.pem + - key.pem + - cert.pem + +- name: create chained cert + tags: [ configure_proxy, certs ] + become: true + when: add_certs.changed + shell: > + cat + /etc/ssl/certs/{{ user_content_domain }}/cert.pem + /etc/ssl/certs/{{ user_content_domain }}/ca.pem + > + /etc/ssl/certs/{{ user_content_domain }}/chained.pem + +- name: create dhparam.pem + tags: [ configure_proxy, certs ] + become: yes + command: openssl dhparam -out /etc/ssl/certs/{{ user_content_domain }}/dhparam.pem 2048 + +- name: make sure nginx directory is in place + tags: [ configure_proxy ] + become: true + file: + dest: /etc/nginx + state: directory + +- name: put nginx configuration in place + tags: [ configure_proxy ] + become: yes + template: + src: proxy-nginx.conf + dest: /etc/nginx/nginx.conf + +- name: assert nginx template directory + tags: [ configure_proxy ] + become: yes + file: + state: directory + dest: /etc/nginx/template + +- name: put navi template in place + tags: [ configure_proxy ] + become: yes + template: + src: navi.tmpl + dest: /etc/nginx/template/navi.tmpl + +- name: setup ip table rule to redir all to port 80 + tags: [ configure_proxy, iptables ] + become: yes + iptables: + table=nat + chain=PREROUTING + in_interface=eth0 + protocol=tcp + destination_port={{ item }} + jump=REDIRECT + to_ports=80 + comment="Redirect {{ item }} traffic to port 80" + with_items: + - 81:442 + - 444:65535 diff --git a/ansible/roles/content-domain-proxy/templates/navi.tmpl b/ansible/roles/content-domain-proxy/templates/navi.tmpl new file mode 100644 index 00000000..dd803f29 --- /dev/null +++ b/ansible/roles/content-domain-proxy/templates/navi.tmpl @@ -0,0 +1,77 @@ +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +upstream {{ name }} { + {{ '{{' }}range service "{{ proxy_service_name }}"{{ '}}' }} server {{ '{{' }}.Address{{ '}}' }}:{{ '{{' }}.Port{{ '}}' }} max_fails=0 fail_timeout=1s; + {{ '{{' }}end{{ '}}' }} +} + +server { + listen 80; + client_max_body_size 200m; + server_name {{ user_content_domain }}; + access_log /var/log/nginx/{{ name }}.access.log; + + location / { + proxy_pass http://{{ name }}; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + } +} + +server { + listen 443 ssl; + client_max_body_size 200m; + server_name {{ user_content_domain }}; + access_log /var/log/nginx/{{ name }}.ssl.access.log; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ user_content_domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ user_content_domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ user_content_domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ user_content_domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + location / { + proxy_pass http://{{ name }}; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + } +} diff --git a/ansible/roles/content-domain-proxy/templates/proxy-nginx.conf b/ansible/roles/content-domain-proxy/templates/proxy-nginx.conf new file mode 100644 index 00000000..dc663d03 --- /dev/null +++ b/ansible/roles/content-domain-proxy/templates/proxy-nginx.conf @@ -0,0 +1,29 @@ +user www-data; +worker_processes 4; +pid /run/nginx.pid; + +events { + worker_connections 5000; +} + +http { + ## + # Basic Settings + ## + tcp_nodelay on; + keepalive_timeout 65; + server_tokens off; + + ## + # Logging Settings + ## + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + ## + # Virtual Host Configs + ## + + include /etc/nginx/sites-enabled/*; +} diff --git a/ansible/roles/copy_secret_file/tasks/main.yaml b/ansible/roles/copy_secret_file/tasks/main.yaml new file mode 100644 index 00000000..58b264fc --- /dev/null +++ b/ansible/roles/copy_secret_file/tasks/main.yaml @@ -0,0 +1,18 @@ +--- +- name: create secret folder + tags: [ secrets ] + become: true + file: + path: "{{ secret_root }}" + state: directory + +- name: copy {{ file_names }} into secrets + tags: [ secrets ] + become: true + copy: + src=./secrets/{{ item }} + dest={{ secret_root }} + owner=ubuntu + group=ubuntu + mode=0700 + with_items: "{{ file_names }}" diff --git a/ansible/roles/create_sg/tasks/main.yml b/ansible/roles/create_sg/tasks/main.yml new file mode 100644 index 00000000..6e572bfd --- /dev/null +++ b/ansible/roles/create_sg/tasks/main.yml @@ -0,0 +1,6 @@ +- name: create AWS SG + ec2_group: + name: "{{ sg_name }}" + description: "{{ env }} security policy for {{ descr }}" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" diff --git a/ansible/roles/custom_metrics/files/docker_log_lines.sh b/ansible/roles/custom_metrics/files/docker_log_lines.sh new file mode 100755 index 00000000..d3ee4ae1 --- /dev/null +++ b/ansible/roles/custom_metrics/files/docker_log_lines.sh @@ -0,0 +1,23 @@ +#!/bin/bash +set -e + +echo "don't use this script. change it (and test) with filesize, not length" +# du -d /docker/containers/$id/$id-json.log ?? -- anand +exit 0 + +# don't run this if datadog is not installed +if [ ! -e /usr/bin/dd-agent ]; then exit 0; fi + +# don't run this if docker is not installed +if [ ! -e /usr/bin/docker ]; then exit 0; fi + +all_lines='0' + +for id in $(docker ps -q); do + file=$(docker inspect $id | awk 'match($0, /"LogPath": "(.+)",/, a) { print a[1]; }') + lines=$(wc -l $file | awk '{ print $1 }') + all_lines=$(echo $all_lines + $lines | bc) +done + +data="bryan.docker.logs.lines:$all_lines|g" +echo "$data" | nc -u -w 1 localhost 8125 diff --git a/ansible/roles/custom_metrics/files/docker_proc_status.sh b/ansible/roles/custom_metrics/files/docker_proc_status.sh new file mode 100755 index 00000000..18560a04 --- /dev/null +++ b/ansible/roles/custom_metrics/files/docker_proc_status.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -e + +# don't run this if datadog is not installed +if [ ! -e /usr/bin/dd-agent ]; then exit 0; fi + +docker_pid=$(ps ax | awk '/\/usr\/bin\/docker.+\-d.+/{ print $1 }') + +docker_vmrss_value=$(awk '/VmRSS/{ print $2 }' /proc/$docker_pid/status) +data="bryan.docker.proc.vmrss:$docker_vmrss_value|g" +echo "$data" | nc -u -w 1 localhost 8125 + +docker_fdsize_value=$(awk '/FDSize/{ print $2 }' /proc/$docker_pid/status) +data="bryan.docker.proc.fdsize:$docker_fdsize_value|g" +echo "$data" | nc -u -w 1 localhost 8125 + +docker_vmsize_value=$(awk '/VmSize/{ print $2 }' /proc/$docker_pid/status) +data="bryan.docker.proc.vmsize:$docker_vmsize_value|g" +echo "$data" | nc -u -w 1 localhost 8125 diff --git a/ansible/roles/custom_metrics/files/docker_rss.sh b/ansible/roles/custom_metrics/files/docker_rss.sh new file mode 100755 index 00000000..cdcadd3f --- /dev/null +++ b/ansible/roles/custom_metrics/files/docker_rss.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -e + +# don't run this if datadog is not installed +if [ ! -e /usr/bin/dd-agent ]; then exit 0; fi + +docker_pid=$(ps ax | awk '/\/usr\/bin\/docker.+\-d.+/{ print $1 }') +docker_rss_value=$(echo 0 $(awk '/Rss/ {print "+", $2}' "/proc/$docker_pid/smaps") | bc) +data="bryan.docker.mem.rss:$docker_rss_value|g" + +echo "$data" | nc -u -w 1 localhost 8125 diff --git a/ansible/roles/custom_metrics/tasks/main.yml b/ansible/roles/custom_metrics/tasks/main.yml new file mode 100644 index 00000000..fee6bab4 --- /dev/null +++ b/ansible/roles/custom_metrics/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: create scripts directory + become: true + file: path=/opt/scripts state=directory + +- name: put script in place + become: true + copy: + src={{ item }}.sh + dest=/opt/scripts/{{ item }}.sh + mode=0500 + with_items: + - docker_rss + # - docker_log_lines + - docker_proc_status + +- name: put cron in place + become: true + cron: + name="{{ item.name }}" + minute={{ item.minutes }} + state={{ item.state | default("present") }} + job="/opt/scripts/{{ item.name }}.sh > /dev/null" + with_items: + - { name: docker_rss, minutes: '*' } + # - { name: docker_log_lines, minutes: '*/5', state: absent } + - { name: docker_proc_status, minutes: '*' } diff --git a/ansible/roles/database/tasks/main.yml b/ansible/roles/database/tasks/main.yml new file mode 100644 index 00000000..976fa1cc --- /dev/null +++ b/ansible/roles/database/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: create database directory + become: true + file: path={{ db_path }} state=directory diff --git a/ansible/roles/datadog/handlers/main.yml b/ansible/roles/datadog/handlers/main.yml new file mode 100644 index 00000000..4c23028e --- /dev/null +++ b/ansible/roles/datadog/handlers/main.yml @@ -0,0 +1,5 @@ +- name: restart datadog-agent + become: true + service: + name=datadog-agent + state=restarted diff --git a/ansible/roles/datadog/tasks/main.yml b/ansible/roles/datadog/tasks/main.yml new file mode 100644 index 00000000..9c717f04 --- /dev/null +++ b/ansible/roles/datadog/tasks/main.yml @@ -0,0 +1,78 @@ +--- +- name: add https transport + become: true + apt: + name=apt-transport-https + state=latest + +- name: add datadog apt key + become: true + apt_key: + id=C7A7DA52 + keyserver=keyserver.ubuntu.com + state=present + +- name: add datadog repository + become: true + apt_repository: + repo='deb http://apt.datadoghq.com/ stable main' + state=present + update_cache=yes + +- name: install dd agent + become: true + apt: + name=datadog-agent + state=latest + +- name: add docker to dd-agent group + become: true + when: no_datadog_docker_monitoring is not defined + user: + name=dd-agent + groups=docker + +- name: create dd configdir + become: true + file: + path=/etc/dd-agent/conf.d + owner=root + mode=0555 + state=directory + +- name: Create main Datadog agent configuration file + become: true + template: + src=datadog.conf.j2 + dest=/etc/dd-agent/datadog.conf + notify: restart datadog-agent + +- name: install network checks for dock services + become: true + when: docker_config == "docks" + template: + src=tcp_check.yaml.j2 + dest=/etc/dd-agent/conf.d/tcp_check.yaml + notify: restart datadog-agent + +- name: install docker integration + become: true + when: no_datadog_docker_monitoring is not defined + template: + src=docker_daemon.yaml.j2 + dest=/etc/dd-agent/conf.d/docker_daemon.yaml + notify: restart datadog-agent + +# assumes a config for {{ name }} exists +- name: install datadog integrations + become: true + when: has_dd_integration is defined + template: + src="{{ name }}.yaml.j2" + dest="/etc/dd-agent/conf.d/{{ name }}.yaml" + notify: restart datadog-agent + +- name: force restart + command: echo restart datadog + when: force_restart is defined + notify: restart datadog-agent diff --git a/ansible/roles/datadog/templates/datadog.conf.j2 b/ansible/roles/datadog/templates/datadog.conf.j2 new file mode 100644 index 00000000..a3d0b063 --- /dev/null +++ b/ansible/roles/datadog/templates/datadog.conf.j2 @@ -0,0 +1,10 @@ +# Managed by Ansible +[Main] + +dd_url: https://app.datadoghq.com + +api_key: {{ datadog_api_key }} + +use_mount: no + +bind_host: 0.0.0.0 diff --git a/ansible/roles/datadog/templates/docker_daemon.yaml.j2 b/ansible/roles/datadog/templates/docker_daemon.yaml.j2 new file mode 100644 index 00000000..bdf73b33 --- /dev/null +++ b/ansible/roles/datadog/templates/docker_daemon.yaml.j2 @@ -0,0 +1,12 @@ +init_config: + +instances: + - url: "unix://var/run/docker.sock" + + collect_labels_as_tags: ["instanceName"] + + collect_container_size: false + + performance_tags: ["container_id", "image_name", "image_tag"] + + container_tags: ["image_name", "image_tag"] diff --git a/ansible/roles/datadog/templates/mongo.yaml.j2 b/ansible/roles/datadog/templates/mongo.yaml.j2 new file mode 100644 index 00000000..03a1b1b2 --- /dev/null +++ b/ansible/roles/datadog/templates/mongo.yaml.j2 @@ -0,0 +1,4 @@ +init_config: + +instances: + - server: mongodb://{{ datadog_mongodb_user }}:{{ datadog_mongodb_pwd }}@localhost:27017 diff --git a/ansible/roles/datadog/templates/nginx.yaml.j2 b/ansible/roles/datadog/templates/nginx.yaml.j2 new file mode 100644 index 00000000..2f7b5811 --- /dev/null +++ b/ansible/roles/datadog/templates/nginx.yaml.j2 @@ -0,0 +1,6 @@ +init_config: + +instances: + # For every instance, you have an `nginx_status_url` and (optionally) + # a list of tags. + - nginx_status_url: http://{{ ansible_default_ipv4.address }}/basic_status diff --git a/ansible/roles/datadog/templates/rabbitmq.yaml.j2 b/ansible/roles/datadog/templates/rabbitmq.yaml.j2 new file mode 100644 index 00000000..db5221e0 --- /dev/null +++ b/ansible/roles/datadog/templates/rabbitmq.yaml.j2 @@ -0,0 +1,6 @@ +init_config: + +instances: + - {{ name }}_api_url: http://localhost:54320/api/ + {{ name }}_user: {{ rabbit_username }} + {{ name }}_pass: {{ rabbit_password }} diff --git a/ansible/roles/datadog/templates/tcp_check.yaml.j2 b/ansible/roles/datadog/templates/tcp_check.yaml.j2 new file mode 100644 index 00000000..924c0406 --- /dev/null +++ b/ansible/roles/datadog/templates/tcp_check.yaml.j2 @@ -0,0 +1,14 @@ +init_config: + +instances: + - name: docker-listener + host: 127.0.0.1 + port: 3000 + collect_response_time: true + skip_event: true + + - name: krain + host: 127.0.0.1 + port: 3100 + collect_response_time: true + skip_event: true diff --git a/ansible/roles/dock-images/tasks/main.yml b/ansible/roles/dock-images/tasks/main.yml new file mode 100644 index 00000000..6a75f343 --- /dev/null +++ b/ansible/roles/dock-images/tasks/main.yml @@ -0,0 +1,57 @@ +--- +- name: starting docker + become: true + service: + name=docker + state=started + +- name: add runnable registry to /etc/hosts + become: true + blockinfile: + dest: /etc/hosts + insertafter: EOF + block: | + {{ registry_address }} registry.runnable.com + +- name: pulling docker images + become: true + command: docker pull {{ item }} + with_items: + - "registry.runnable.com/runnable/image-builder:v4.3.0" + - "swarm:1.2.5" + - "registry:2.3.1" + - "google/cadvisor:v0.24.1" + - "prom/node-exporter:v0.13.0" + - "weaveworks/weaveexec:1.5.0" + - "weaveworks/weavedb:latest" + - "weaveworks/weave:1.5.0" + - "node:argon" + - "ruby:2.3" + - "python:2.7" + - "php:7.0-apache" + - "runnable/node-starter" + - "runnable/rails-starter" + - "runnable/django-starter" + - "runnable/laravel-starter" + - "runnable/mysql:5.6" + - "runnable/postgres:9.4" + - "runnable/mongo:3.2" + - "runnable/redis:3.2" + +- name: stopping docker + become: true + service: + name=docker + state=stopped + +- name: removing docker key file + become: true + file: + path: /etc/docker/key.json + state: absent + +- name: removing docker pid file + become: true + file: + path: /var/run/docker.pid + state: absent diff --git a/ansible/roles/dock-init/tasks/main.yml b/ansible/roles/dock-init/tasks/main.yml new file mode 100644 index 00000000..fd0bc261 --- /dev/null +++ b/ansible/roles/dock-init/tasks/main.yml @@ -0,0 +1,65 @@ +--- +- fail: msg="value tokens need to be defined for this role" + when: vault_auth_token is not defined or vault_token_01 is not defined or vault_token_02 is not defined or vault_token_03 is not defined + +- name: create vault auth directory + tags: vault_files + become: true + file: + dest="/opt/runnable/dock-init/consul-resources/vault/{{ node_env }}" + state=directory + owner=ubuntu + group=ubuntu + mode="0711" + +- name: copy vault auth files + tags: vault_files + become: true + copy: + dest="/opt/runnable/dock-init/consul-resources/vault/{{ node_env }}/{{ item.file_name }}" + content="{{ item.value }}" + mode="0400" + with_items: + - { file_name: 'auth-token', value: "{{ vault_auth_token }}" } + - { file_name: 'token-01', value: "{{ vault_token_01 }}" } + - { file_name: 'token-02', value: "{{ vault_token_02 }}" } + - { file_name: 'token-03', value: "{{ vault_token_03 }}" } + +- fail: msg="docks_rollbar_key needs to be defined for this role" + when: docks_rollbar_key is not defined + +- name: copy rollbar token + tags: rollbar + become: true + lineinfile: + dest="/opt/runnable/dock-init/key/rollbar.token" + line="{{ docks_rollbar_key }}" + create=yes + +- name: docker upstart override + become: true + lineinfile: + dest="/etc/init/docker.override" + line="manual" + create=yes + +- name: create ssh config for root + become: true + lineinfile: + dest="/root/.ssh/config" + line="StrictHostKeyChecking no" + create=yes + +- name: create ssh config for ubuntu + lineinfile: + dest="/home/ubuntu/.ssh/config" + line="StrictHostKeyChecking no" + create=yes + +- name: enforce sane permissions for dock-init RSA keys + become: true + file: + owner="root" + group="root" + path="/opt/runnable/dock-init/key/id_rsa_runnabledock" + mode="0400" diff --git a/ansible/roles/docker/README.md b/ansible/roles/docker/README.md new file mode 100644 index 00000000..72610499 --- /dev/null +++ b/ansible/roles/docker/README.md @@ -0,0 +1,39 @@ +# Role Name + +Ansible Role to Install Docker on Ubuntu + +## Manual Setup + +*Important: You must set up the following certificates on new boxes manually (for now):* + +For the Docker daemon: +- `/etc/ssl/docker/`: + - `ca.pem`: CA certificate that also signed the client keys + - `cert.pem`: Docker _server_ certificate + - `key.pem`: Key used to sign the Docker server certificate + +For the Docker client: +- `/home/ubuntu/.docker/`: + - `ca.pem`: CA certificate that also signed the client keys (should be the same one as in `/etc/ssl/docker`) + - `cert.pem`: Docker _client_ certificate + - `key.pem`: Key used to sign the Docker client certificate + +To ensure docker verifies the local client, you need to either pass `--tlsverify` to the docker command, or you need to set `DOCKER_TLSVERIFY=1` in the environment. + +## Role Variables + +``` +docker_centos_packages: + - { package: "docker" } +``` + +## Example Playbook + + - hosts: docker-servers + roles: + - { role: docker-centos, + tags: ["docker"] } + +## Author Information + +anandkumarpatel diff --git a/ansible/roles/docker/defaults/main.yml b/ansible/roles/docker/defaults/main.yml new file mode 100644 index 00000000..d9c35d9f --- /dev/null +++ b/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,2 @@ +--- +docker_package_name: docker-engine=1.12.0-0~trusty diff --git a/ansible/roles/docker/files/docker.conf b/ansible/roles/docker/files/docker.conf new file mode 100644 index 00000000..3b0e0877 --- /dev/null +++ b/ansible/roles/docker/files/docker.conf @@ -0,0 +1,61 @@ +description "Docker daemon" + +start on (local-filesystems and net-device-up IFACE!=lo) +stop on runlevel [!2345] +limit nofile 524288 1048576 +limit nproc 524288 1048576 + +respawn + +kill timeout 20 + +pre-start script + # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount + if grep -v '^#' /etc/fstab | grep -q cgroup \ + || [ ! -e /proc/cgroups ] \ + || [ ! -d /sys/fs/cgroup ]; then + exit 0 + fi + if ! mountpoint -q /sys/fs/cgroup; then + mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup + fi + ( + cd /sys/fs/cgroup + for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do + mkdir -p $sys + if ! mountpoint -q $sys; then + if ! mount -n -t cgroup -o $sys cgroup $sys; then + rmdir $sys || true + fi + fi + done + ) +end script + +script + # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) + DOCKER=/usr/bin/$UPSTART_JOB + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + # redirect STDOUT/STDERR to logger app for rsyslog/loggly + exec "$DOCKER" daemon $DOCKER_OPTS 2>&1 | logger -t docker_engine -p local7.info +end script + +# Don't emit "started" event until docker.sock is ready. +# See https://github.com/docker/docker/issues/6647 +post-start script + DOCKER_OPTS= + if [ -f /etc/default/$UPSTART_JOB ]; then + . /etc/default/$UPSTART_JOB + fi + if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then + while ! [ -e /var/run/docker.sock ]; do + initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 + echo "Waiting for /var/run/docker.sock" + sleep 0.1 + done + echo "/var/run/docker.sock is up" + fi +end script diff --git a/ansible/roles/docker/meta/main.yml b/ansible/roles/docker/meta/main.yml new file mode 100644 index 00000000..36877817 --- /dev/null +++ b/ansible/roles/docker/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: base_ubuntu, when: "ansible_distribution == 'Ubuntu'"} diff --git a/ansible/roles/docker/tasks/main.yml b/ansible/roles/docker/tasks/main.yml new file mode 100644 index 00000000..16ba50f3 --- /dev/null +++ b/ansible/roles/docker/tasks/main.yml @@ -0,0 +1,120 @@ +--- +- name: create docker cert directory + become: true + when: dock is defined + file: + path=/etc/ssl/docker + state=directory + +- name: copy docker upstart config + become: true + copy: + src=docker.conf + dest=/etc/init/docker.conf + mode=0444 + owner=root + group=root + +- name: copy docker certs + become: true + when: dock is defined + copy: + src=certs/ + dest=/etc/ssl/docker + mode=0440 + owner=root + group=root + +- name: create core file dir + become: true + when: docker_config == "runnable" and core_file_dir != "/var/log" + tags: coreDump + file: + path="{{ core_file_dir }}" + state=directory + owner=root + group=root + mode=0755 + +- name: change core dump path + become: true + when: docker_config == "runnable" + tags: coreDump + sysctl: + name=kernel.core_pattern + reload=true + state=present + value="{{ core_file_dir }}/core.%h.%e.%t" + +- name: install aufs with linux-image-extra-{{ ansible_kernel }} + become: true + tags: aufs + apt: + pkg="linux-image-extra-{{ ansible_kernel }}" + state=present + update_cache=true + cache_valid_time=604800 + +- name: install apt-transport-https + become: true + apt: + pkg="apt-transport-https" + state=present + update_cache=yes + cache_valid_time=604800 + +- name: add docker repository key + become: true + apt_key: + id: 2C52609D + url: https://apt.dockerproject.org/gpg + state: present + +- name: add docker repository + become: true + apt_repository: + repo: "deb https://apt.dockerproject.org/repo ubuntu-{{ ansible_distribution_release }} main" + state: present + update_cache: yes + +- name: copy docker config file + become: true + template: + src={{ docker_config }} + dest=/etc/default/docker + register: copied_config + tags: genDockerConfig + +- name: apt-get update + become: true + apt: + update_cache=yes + +- name: install docker + become: true + when: restart is defined or dock is defined + apt: + pkg={{ docker_package_name }} + state=present + force=yes + update_cache=yes + cache_valid_time=604800 + +- name: enforce modes on /docker + become: true + file: + path=/docker + state=directory + owner=root + group=root + mode=0755 + +- name: restart docker + when: (copied_config.changed and restart is defined) or dock is defined + command: sudo service docker restart + +- name: create docker group + become: true + group: + name: docker + state: present \ No newline at end of file diff --git a/ansible/roles/docker/templates/dock b/ansible/roles/docker/templates/dock new file mode 100644 index 00000000..c3f219ba --- /dev/null +++ b/ansible/roles/docker/templates/dock @@ -0,0 +1,6 @@ +DOCKER_OPTS="-H=unix:///var/run/docker.sock -H=0.0.0.0:4242" +DOCKER_OPTS="$DOCKER_OPTS --tlsverify --tlscacert=/etc/ssl/docker/ca.pem" +DOCKER_OPTS="$DOCKER_OPTS --tlscert=/etc/ssl/docker/cert.pem --tlskey=/etc/ssl/docker/key.pem" +DOCKER_OPTS="$DOCKER_OPTS -g /docker --insecure-registry registry.runnable.com --icc=false" +DOCKER_OPTS="$DOCKER_OPTS --bip 172.17.42.1/16" +DOCKER_OPTS="$DOCKER_OPTS --dns=172.17.42.1 --dns={{ ansible_dns.nameservers[0] }} --dns=8.8.8.8" diff --git a/ansible/roles/docker/templates/runnable b/ansible/roles/docker/templates/runnable new file mode 100644 index 00000000..2a42c223 --- /dev/null +++ b/ansible/roles/docker/templates/runnable @@ -0,0 +1 @@ +DOCKER_OPTS="-g /docker --insecure-registry registry.runnable.com --icc=false" \ No newline at end of file diff --git a/ansible/roles/docker_client/README.md b/ansible/roles/docker_client/README.md new file mode 100644 index 00000000..3b95b530 --- /dev/null +++ b/ansible/roles/docker_client/README.md @@ -0,0 +1,14 @@ +# Role Name + +Ansible Role to Install Docker Client Certs on Ubuntu + +## Manual Setup + +Creating new docker client certs: +1. cd into this dir ```cd ``` +2. ensure you have ca-key.pem here `roles/docker_client/ca-key.pem` +3. run cert generator ```sudo ./scripts/genClientCert.sh ``` + +## Author Information + +anandkumarpatel diff --git a/ansible/roles/docker_client/ca.pem b/ansible/roles/docker_client/ca.pem new file mode 100644 index 00000000..d6b36004 --- /dev/null +++ b/ansible/roles/docker_client/ca.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnjCCA4agAwIBAgIJANIFZy8wwSmYMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAwMTQxNFoXDTI2MDUyMDAwMTQxNFowgZAxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMREwDwYDVQQK +EwhSdW5uYWJsZTEMMAoGA1UECxMDT3BzMRIwEAYDVQQDEwlsb2NhbGhvc3QxHzAd +BgkqhkiG9w0BCQEWEG9wc0BydW5uYWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCkX4cwQDcimGvnJg0HBl+A9da0zpUjJJVPbba3A2wJ/S7l +gKlYID5TXNYpSAepdmmWO+NEXcNVPUYVhoBe4DWkJFc+lxtLPy0UOseZ+TvMac7i +Zp0k/GSLl3ASloDPfKsBlpOpM+OhWvl5jzAzSJ1l6dGcCEAXE6dhtEUgPMUzfAfl +bUuQ7ri8iMB67Ktix8FJCEpwczlKfebzmxw3VxwGiNQSGbbyIknuCk5eGbMVPtdY +DBl+5R7h0S0enXxYtPtL7CRKs0uHxm8Kmvvo2htSf9bdOSsjnFzQvZdBLrrQipN+ +i8m/ZOL8IOzV/Wfwqd7Zo3w3hUE8rzrBP0Ce0f0BAgMBAAGjgfgwgfUwHQYDVR0O +BBYEFKoY1K08hkkW4dt/bo0153ccq9sMMIHFBgNVHSMEgb0wgbqAFKoY1K08hkkW +4dt/bo0153ccq9sMoYGWpIGTMIGQMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2Fs +aWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzERMA8GA1UEChMIUnVubmFi +bGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJbG9jYWxob3N0MR8wHQYJKoZIhvcN +AQkBFhBvcHNAcnVubmFibGUuY29tggkA0gVnLzDBKZgwDAYDVR0TBAUwAwEB/zAN +BgkqhkiG9w0BAQsFAAOCAQEAg9gyj48wThPw61lxZ/KpQsb5Vhme/EQD0FE8UR74 +uLXFBw0KOvmwd5MXUDJBmS2Qzcy7ja86YETUnYX6AWpyKaS5ahsVHxzsNlG8IwHO +lh9gMR1ZCiwYRiVUEq4d7PvwsgZ/xbzi9i4OeXQSsPGDcD2gSO7fqE+uQI5JSTM3 +RP961DrpabOUYVd8/B+TA0coYke+VgHNPMWzCAfKQv9SUCqzykJ8Gx5cKsofQAcV +Us2OqQgIbJtca4eds2bz6pDxfRux+7A5n/hfj86YqzQvrHUVRtzsL0ukIOM5G31Z +D80lBQLQV7QbTVu8plmZ+is8v76BS5eCLmKC0UnjEve7Jg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api-socket-server/cert.pem b/ansible/roles/docker_client/files/certs/api-socket-server/cert.pem new file mode 100644 index 00000000..654f4042 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-socket-server/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3MMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMDMwNloXDTE3MDUyMjAxMDMwNlowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz8LATQMVobZxyD2vjyUV6bH/ +ggrCZK1fge0XgHMThaevYylBrnxhPAZrPxp2ruLJkN2Si9WLvBXW5TauVS66elF0 +G6mKD6Up53XgOpPfQOYdXsrFGgO+zBqQstPi1vWugvFNKNLIRcJm3klxx6BPw4ap +9WZL5BZUowxO0bC8knty8iyDOBcEB2KMZd9YY8W8H3aKjbr4VziSJZ5tbUtiIkQQ +nTMQJoFB61FgJnCJlpnNyKTF0AzcIHtuF7CoRI1hPQ3VexidoXSpFYhjvXRH4pmm +0rlT4AFgQB6zT7f4bPRIDRt1raUSZhcu6LpGPsp65OOrCbTBMubw9QEnGvOvDwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAF2FXHVZjNmbViQuXeRBstiD4RBdkc73OAzySsGAYlkLXFR2 +4wSIdh1/gSET+BMReh6mqxfdmpd2tUj1FVMWZyy8gEuc+/NQpZ3Z24SRSsPicl17 +TyksNiZCO67yUXNVDDV6eG/Or3xn0xn/Y6gdkqbDBlkN6Oy3eq1/SP+ySskF0UYw +JP3nctP1F7SD+mbYJs5LTpL7lJmfiDStrjCfeMxC+rrji9hnim5i9AeL+t1iQ7it +WIHNqVeWSwYaWZxZxRKo40Pfpor72ZfDwlHkQ6HZbdM779BPhnyclbK4z9DF3yxW +eVnMLsIA3g+2LkXac8gwsvKckn1Zvdg2cm6etKM= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api-socket-server/key.pem b/ansible/roles/docker_client/files/certs/api-socket-server/key.pem new file mode 100644 index 00000000..f1d4eedf --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-socket-server/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAz8LATQMVobZxyD2vjyUV6bH/ggrCZK1fge0XgHMThaevYylB +rnxhPAZrPxp2ruLJkN2Si9WLvBXW5TauVS66elF0G6mKD6Up53XgOpPfQOYdXsrF +GgO+zBqQstPi1vWugvFNKNLIRcJm3klxx6BPw4ap9WZL5BZUowxO0bC8knty8iyD +OBcEB2KMZd9YY8W8H3aKjbr4VziSJZ5tbUtiIkQQnTMQJoFB61FgJnCJlpnNyKTF +0AzcIHtuF7CoRI1hPQ3VexidoXSpFYhjvXRH4pmm0rlT4AFgQB6zT7f4bPRIDRt1 +raUSZhcu6LpGPsp65OOrCbTBMubw9QEnGvOvDwIDAQABAoIBAAp6Lspzcl7SXZCU +k8rrOtyrLlB3jHAZUiyyMTI4G27tEuo0RWywNxMY28W15vBQ3YthsSc+IKreixTK +GiaPkbXjb7CJbz30vVU/24v4sxSJxkdmgGQcdj3CxTNPfiwK/b76vkJlxtXFxPxG +xeckTYF1xBJtEzywZ5pFX6AxgoD8g37lxlwHzSo/hDPSga4ZSs3s0fvg9i5NkERZ ++tzasMRC2Rnbh30JigwnXP4Zl9JHqgD1MBujQgAbWGcW+Robjg3QdtekJig2ynT2 +rY2ZM66PsUJJBpjVXuMaZh6YGMCLpwFtlFDdih8Tli7wmVk3Lopvs4L65eOObjwO +BW1gZFkCgYEA6nd+oQVx/wt2TmdXhifq4K5LQnP804Dcwepmtphqir7iIWyubrpm +ouon+9KiexpU7HWGMmcrz4nTyEHGeMBeooo9t3dMHO+SYckbhFnHKUDTb70R/jpm +JB916u+Akmu3ul9vVbG9vNcVGDVOwdOCjpQZeo2XZ8urlZ4MVVdBersCgYEA4tdg +QBetNwSzRn0K61xju9J8Yzv7y9EwUKn2yZbeZF3CCDs8aZu9H0ym3Pi+9VI/vvZj +C0BXC9z112ASS1FRqofcYdw34CxFruEijMwb8N9TIDxwMXwXWEkl3FxTpNnvDWas +14akkOvnRJfIwTz/STfAgtkIchPIz+CArALqib0CgYAg74Q0LkWjJAMMZGvhBn5x +7UnZHd3n//QCsWCYSbS9pK6zL0cZesmsHORc9Wupiztes+uT7PY0gxHi0YqFr9Ab +WPvuZ62jauvPHKbm4o2dd+O7kH2UjrOGY3z28YnIB6WZYoqLKRqUCkqGSga0J3fx +e1BQ7M7V38BMlhT5b/ZcvQKBgHD503YYVKySKrdwj3+pBAMHUewac82O4gFKpR7B +hQF3sKto1S3D4u0zv+dN+80yqYzyKWHxKWk/IzGhJ3tbhNDp2Vjj1PBRHGN8vThi +b4gHsd7cOwRqSTYvOCwHz6DK6cRC3tJ4vH1xAD2pWtLjDkHpAngon4wxTlI5/dut +mCIBAoGBANnKH7NnavZZyF7tkt3ZFdsvb2P7ZWsiH5Oisp/afhGGuxa19d5W8Dbx +1dklUDH6ZOjuXVKn6U1t3hzUVGEcQmRlfrVhyRKZTWkF8JfOqCCuzAu4Uejw+nm8 +j+JgOIYjhL0m3OVk8Az3Y7ipyTmjgl5mXFN2byOQ8rxXmW/LzA/P +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/api-worker/cert.pem b/ansible/roles/docker_client/files/certs/api-worker/cert.pem new file mode 100644 index 00000000..f2e54407 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-worker/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3LMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMDI1NVoXDTE3MDUyMjAxMDI1NVowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyThGl0FzHiYXWXX+pGbX6mUR +vX1yHdnXO2+hC9ztYX8dzC2kQoUXY+HxjBrLP0oAWKT2vp3GJ0Kk/slsDIS4bI3j +5w2mq4o4dkelWqKhXfYRi+QEFSwTfRh08DABbDKS+0uQrowxL8BfAq2rvADym8rQ +d7Q6UDOOEN2yeQznHxAY+VTUtCSktiLKtXpJG05PcqLxNs/wuN+rh0/JFpGQgnHr +MrSLmYWr9dRplmxNbBaQat9uCziPlvcp+SZOm5GkxLX+jkm07dIomdpBtSuglj0n +bK0RMkWKRsuSayQ6gqOJFgAPdU8xJqQ2X5uHClr9FzaTASxxhHPtOmtcj7TGbwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAExEEUEhRPuxixDA/vOfCkuJ/oICsP5ByEU6uK2fJsa+bN8y +1LARDZUjMXj2QImrEB2We4rAwYn1JyLIMUqtIS+C42LYCIJAt8f72oKZxOTQ7H34 +BCIvf624hdS6vj/hbvb7S/oh2T8BnFHtf3x7JfHHPGZRmEfUF2t47BFN3J0XHc0T +1+Hwcnlcxp7aVsnwUwAK3dZ11bKfGZS58V04M+qnSgSg8+6j8D0JZIx8zQ4dc13F +FrO0s2ZIpO8se4bpM69AcbK0HY95dpUyeU10tw2yzYf1/Fui10OFP5GE2HhdU8mm +tTRLAOYhS5C9tbf3bSmcABLs4PcwRjrg2zwWv3k= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api-worker/key.pem b/ansible/roles/docker_client/files/certs/api-worker/key.pem new file mode 100644 index 00000000..0dee661b --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api-worker/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAyThGl0FzHiYXWXX+pGbX6mURvX1yHdnXO2+hC9ztYX8dzC2k +QoUXY+HxjBrLP0oAWKT2vp3GJ0Kk/slsDIS4bI3j5w2mq4o4dkelWqKhXfYRi+QE +FSwTfRh08DABbDKS+0uQrowxL8BfAq2rvADym8rQd7Q6UDOOEN2yeQznHxAY+VTU +tCSktiLKtXpJG05PcqLxNs/wuN+rh0/JFpGQgnHrMrSLmYWr9dRplmxNbBaQat9u +CziPlvcp+SZOm5GkxLX+jkm07dIomdpBtSuglj0nbK0RMkWKRsuSayQ6gqOJFgAP +dU8xJqQ2X5uHClr9FzaTASxxhHPtOmtcj7TGbwIDAQABAoIBAQCsqamJKRRLDoyC +q4lyMP2dmFR8sSQ3ETJrQgKDV3QqcRV0msWUfvfvMXY/rs4w5KMfbY59So06KynB +p70bkNdf7vFc1o3qLJT+icQN3fdx1TCBRYtrT4L5mP7fC5Js8YjGMbVY0dtzCjN8 +7OGsVXPY01PHI9PV9ic1ucbnsHayY2wNfo0HgUbXXARG+tN/sIeeqAmeABHIlZmn +SG+t09ArZgjdecZgcWMRdazCqf4lEEB5hrd6YZR9MU6xMr3fla3oTZ/v7gtY3TcM +/tHR/hB+Qv5cIMUMQHwDYl/wzmjjvFFkjBvdr/qo4u8j6AZUOUCQpqdLsyuuwt9v +tdBnYwcBAoGBAOrb5k/BefDI1e2F+C1yUAx+VPcw7NnHZjrf4iwSbSDJIVwoUiBR +67RTQFsoqeFWS8kjAo6n1oosqTmatkvbyyWm9BaXF7/M1kZHEAJMFlCUou0VOWg6 +dBT+iFUGh2QCM6XC5PR7bei3oiGtESo59bO0zbpGw2q7gTvMP/iLXzxhAoGBANtV +MadH2yoZrkP2LBiESKX6ep+ye3acOQZT6YxhjoNIrqAIIfppDWoaGUNcVtJZ3VON ++JUEi7TmlzsIUh2mwh6eK1XCN1nGWvQaf2C2BEab47LC9iiNvnJek3QUC6G9k8p7 +0HcDqLAX6EdS2ynv4YE0WCrbr17fBhywCzutt3TPAoGBAI2Bhygg29b/Ox5M7PZ8 +/Mw6RflWSi8BXJA2mvCQ/Z2Wcfoz9R+yHEFfM9R5ELpLo4gCByVq7PH97Mxfcl0M +hj8ec800pkOTKLvZT0UDESLmG7CbbCZnqc+IeR45u9O/G91RK2A/Cj8AfbiqrT/z +Ah6P3rDQuCeT6E7eRuD7IJuhAoGAF1i6MFQ1wAV7x0As8W8jTIedBeK5waZfa40D +Dy5TPSY/cEltVHpaMzET/1eU5k8qf/pcu4AAXwEsl9wK4Z0ejKPJeRdzTNsQU2t+ +o4y8Z8ewNRcLnMV5wDLQ/0oi8tR7JtKHHiQFkLj6wJzRN2WRW2928xjvUenrfKJA +j9mt7YECgYA18dmM+0/3VdoI8J56bt7gruYonmzU0YBzD3FHa0BuCkYky12Y/Soc +Mn1ymnjZpcS302ajDWkou51Ero5AterKEj3XzvpINC+wSmqJfuOUMGMJyEMvqmA8 +DxI+Ub3l9juISvcviXa4eQQv3rGKxteNKdv6T/NWz/ImR0WsNV5ZAQ== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/api/cert.pem b/ansible/roles/docker_client/files/certs/api/cert.pem new file mode 100644 index 00000000..a6e652a4 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3KMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMDI0N1oXDTE3MDUyMjAxMDI0N1owETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2k3kbA7xGxxoP28GiT1fctTX +Msax59ADfdFVxG3OnzLo53vGwx6Ge1B2R42fdfUD7yuPwn1s2BhaHKSbh8BvJwAu +JOyMTO5gGmRnqR/r7PiWwY3Ef2EMT6mMNIpXLGdSn0V6f7hgyu/oJgzpJb8O3DT2 ++tZdoKQVFBhrrXMj8qftroZRciiiArddNgfd9OrVw0O4DOyvtyhOVjVpe0mg1n/r +c3zweOKSRpY1/TnpSa/WN3MCLFIIyFJDVQvWe/I1bpdCf0xYaxnOefiq6HFMgu6S +9OStkbimQphRVVy8toa6uLt38oqKX1MC0//8O+eZjGKZAF8CMmx/5qgXIKh+LwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAC3aYEO6zq4/pYZMSR+EBRwxr0E64ck5swBhXF/xq9nKqhvS +Xh5Y+rPbzwnNefSsAsEYkgsJnENTgPfvrpbt60txx+MmY0OPf0UHX4eQFdpzfFBG +C1jT+3Vo2Kr9z8x0OgFvfxRfHgKYQWjDgdqyFGZ4jz7iq6bhpR5FiVUfJ0y3nmmz +NXwS+QJzgyKhs7OjiZhyPElGTSUUi5KBBOk2cpzB3FworYTbZ3gIcgr1IFD/tH4M +PnjD5YTZtmZvpPaCVwXucOCOWAXqhvbYYpWypZApFL0NgFYYRbirEJSmUFqondDJ +jUANI0fuk4JqUOZzqyTM2Xtan8hVZtoipIXWjoI= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/api/key.pem b/ansible/roles/docker_client/files/certs/api/key.pem new file mode 100644 index 00000000..7f33350f --- /dev/null +++ b/ansible/roles/docker_client/files/certs/api/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA2k3kbA7xGxxoP28GiT1fctTXMsax59ADfdFVxG3OnzLo53vG +wx6Ge1B2R42fdfUD7yuPwn1s2BhaHKSbh8BvJwAuJOyMTO5gGmRnqR/r7PiWwY3E +f2EMT6mMNIpXLGdSn0V6f7hgyu/oJgzpJb8O3DT2+tZdoKQVFBhrrXMj8qftroZR +ciiiArddNgfd9OrVw0O4DOyvtyhOVjVpe0mg1n/rc3zweOKSRpY1/TnpSa/WN3MC +LFIIyFJDVQvWe/I1bpdCf0xYaxnOefiq6HFMgu6S9OStkbimQphRVVy8toa6uLt3 +8oqKX1MC0//8O+eZjGKZAF8CMmx/5qgXIKh+LwIDAQABAoIBAQC5KgBpt3LCcCHs +TBvbJHdlSysNIoOcAFA10+1Po2PynJf4YICC181xoxnwLyvmPWCM6QITEZfU4zLr +wKM7Tfn5tqltIEcQqdBSEn/qb+0JHwGgQsM81/A9Ua5ekkjGnQzzk1cIJjfTahIa +1DCdl6TfGv2yXUklDIfkd6I9iMAg/MWVrGB84g3P/39tWM+hmUCtJSHo7v8zoxNV +xKO23kcKCEkYObeFZdEkgPRzKIOzm1wEJeN7BVQ3yMgpr39KA+We4sl2LHWXHelI +8MO5kwQh4nZgJ0gxilWjf+DUizt1pVbNWVgox3ogHIZA2gV2Kt4BP6MokVTxD73A +3IWx1idBAoGBAPhCXpHdrXdLbCSmXH/B5uNsUo8MZNwAbfd/WYfvji0Z5raQUdYG +qmp36Z9qUwTL3a+KZMv0V81rI6vTZO/KEyDq5S7IMYrAr4a8V8jmqaohPqqj64Yp +Ppq0a73CaUZ19L5kWfhzLWsbHUVqG6PFqTqOlwEtwkLn9pyN6SR19NonAoGBAOEc +a0oChJYMD/ZjYjZ0HK0HIgSEvFOxQ8Qzy1BCf6XgvnoOiZZHIDunnIn0a2bbqbyS +aN1LjjaWd6tMRM21aXsIxM9QENtkNs83QSUDPat1iIWMSDKo4lmZBGqok8gvlQqh +avWo9em3wRE5VRKptgjfHclpaATIftikIjCWkWi5AoGACw+p9wXRfMc3gkqHRIn3 +J5rj8suykrrSK3zZGF1sEEZfKTM08jxJy4ScPCYYnNYYgoHAC/ZPEy/63uUEUkrG +n2jX3UW+L1h2eaJapvDgDlx81k2HaOrdhzcStdm7pxAKX5FJEPDQzTzgCP+5S4q7 +B4ji4YKQbTq4mjEYei9nQXECgYEAti6YyfHVudWTSZVPE8DOwF3Icx21iIoqzOwZ +/B+FTQrhPMjtItj3RjA1KRTNoxXdfcaD5azTMGx/9vsnp8/MyQzkst8adWPaU8Nl +Dn5b95qMXYCVoaI6ZcbH40YBK/8OQOc4OA6AzqCvOLBMoGkG8JEZezq6OzzEI7Ja +FTaWAckCgYAGHGGukuxyp9T4awqH6iAHd1LhZHLseGouwiF1MZpHVO+H8XKhYJoi +1Xd4FXDYClHzoLiTbPmiiRee1g3hgvrvOl1JqHmS8zNNJCY0Skx1wIBKGIywJEZb +GgrXeEVtylz0b0ifA8LHJmUiIEnkcuSWwRrPeWpfowsVOIXEbHu+/w== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/ca.pem b/ansible/roles/docker_client/files/certs/ca.pem new file mode 100644 index 00000000..d6b36004 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/ca.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEnjCCA4agAwIBAgIJANIFZy8wwSmYMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAwMTQxNFoXDTI2MDUyMDAwMTQxNFowgZAxCzAJBgNVBAYTAlVTMRMwEQYD +VQQIEwpDYWxpZm9ybmlhMRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMREwDwYDVQQK +EwhSdW5uYWJsZTEMMAoGA1UECxMDT3BzMRIwEAYDVQQDEwlsb2NhbGhvc3QxHzAd +BgkqhkiG9w0BCQEWEG9wc0BydW5uYWJsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCkX4cwQDcimGvnJg0HBl+A9da0zpUjJJVPbba3A2wJ/S7l +gKlYID5TXNYpSAepdmmWO+NEXcNVPUYVhoBe4DWkJFc+lxtLPy0UOseZ+TvMac7i +Zp0k/GSLl3ASloDPfKsBlpOpM+OhWvl5jzAzSJ1l6dGcCEAXE6dhtEUgPMUzfAfl +bUuQ7ri8iMB67Ktix8FJCEpwczlKfebzmxw3VxwGiNQSGbbyIknuCk5eGbMVPtdY +DBl+5R7h0S0enXxYtPtL7CRKs0uHxm8Kmvvo2htSf9bdOSsjnFzQvZdBLrrQipN+ +i8m/ZOL8IOzV/Wfwqd7Zo3w3hUE8rzrBP0Ce0f0BAgMBAAGjgfgwgfUwHQYDVR0O +BBYEFKoY1K08hkkW4dt/bo0153ccq9sMMIHFBgNVHSMEgb0wgbqAFKoY1K08hkkW +4dt/bo0153ccq9sMoYGWpIGTMIGQMQswCQYDVQQGEwJVUzETMBEGA1UECBMKQ2Fs +aWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzERMA8GA1UEChMIUnVubmFi +bGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJbG9jYWxob3N0MR8wHQYJKoZIhvcN +AQkBFhBvcHNAcnVubmFibGUuY29tggkA0gVnLzDBKZgwDAYDVR0TBAUwAwEB/zAN +BgkqhkiG9w0BAQsFAAOCAQEAg9gyj48wThPw61lxZ/KpQsb5Vhme/EQD0FE8UR74 +uLXFBw0KOvmwd5MXUDJBmS2Qzcy7ja86YETUnYX6AWpyKaS5ahsVHxzsNlG8IwHO +lh9gMR1ZCiwYRiVUEq4d7PvwsgZ/xbzi9i4OeXQSsPGDcD2gSO7fqE+uQI5JSTM3 +RP961DrpabOUYVd8/B+TA0coYke+VgHNPMWzCAfKQv9SUCqzykJ8Gx5cKsofQAcV +Us2OqQgIbJtca4eds2bz6pDxfRux+7A5n/hfj86YqzQvrHUVRtzsL0ukIOM5G31Z +D80lBQLQV7QbTVu8plmZ+is8v76BS5eCLmKC0UnjEve7Jg== +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/docker-listener/cert.pem b/ansible/roles/docker_client/files/certs/docker-listener/cert.pem new file mode 100644 index 00000000..2d06f70b --- /dev/null +++ b/ansible/roles/docker_client/files/certs/docker-listener/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3NMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMTQ0NFoXDTE3MDUyMjAxMTQ0NFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnH5qoWShLw++mR2eZGh3W//j +qfYvL8ihli92GDu9n0Vo0FPe9Wbe2mJ763LvRS+r3DMNjE28seiy7ReIbmRCcgWy +yymY9O0pVA6ovxID/Q6eMPcoXMI+dvMfFAvbNV7tkbSbnrnfoTjtKsIlYCzrYYOh +GEnF2/IJnSNiFa1+WHP/XliCcD0sHfN4StGsycaFDXs6nqqYhX/+ojR7RlyNVeJ+ +z1pkSBpiQJkv5KxowIl7c3fDBISk+U2yyxd5tuvA4e/8Eg79iRxGAIwI6HaJvD4V +zdg0L2cwHLmzcqkKeiX1Co17EMPBAPWzVoFg/ngF5ds/3k1XB614lqfMw2lqOQID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBABKM+cHuBZdkNitpMj/2hRnK/TOsyQ+BAP0OkKWIuOSoQTM8 +GTTeCdwwTbZJnI4l5HhSq3tu0YAKAFj8qMr50os3dgxuDcla4XxRRjM9tCD1SieB +DB7jNf71V45Z8Iw7L0xCViHpyZJEiz18aAILLExdDqyDpBCLQYGuyAcinuqXa1uY +mjFlJCVWz5LMdZwqskaMs/iXexNRr0t9EgDqOvXSfNnTVMoUOS5Qyts0H5cJX1xp +RL1hKsK6aIT40ro2lfp2Qfsn5fUjmKMFyGUSXhrKW2vR35REBEJMpdA/ycKvTh6i +HpUkjKAYolWteVI6z0/O05ReDJ+r7zCgg1AZmF8= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/docker-listener/key.pem b/ansible/roles/docker_client/files/certs/docker-listener/key.pem new file mode 100644 index 00000000..372e988c --- /dev/null +++ b/ansible/roles/docker_client/files/certs/docker-listener/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAnH5qoWShLw++mR2eZGh3W//jqfYvL8ihli92GDu9n0Vo0FPe +9Wbe2mJ763LvRS+r3DMNjE28seiy7ReIbmRCcgWyyymY9O0pVA6ovxID/Q6eMPco +XMI+dvMfFAvbNV7tkbSbnrnfoTjtKsIlYCzrYYOhGEnF2/IJnSNiFa1+WHP/XliC +cD0sHfN4StGsycaFDXs6nqqYhX/+ojR7RlyNVeJ+z1pkSBpiQJkv5KxowIl7c3fD +BISk+U2yyxd5tuvA4e/8Eg79iRxGAIwI6HaJvD4Vzdg0L2cwHLmzcqkKeiX1Co17 +EMPBAPWzVoFg/ngF5ds/3k1XB614lqfMw2lqOQIDAQABAoIBAH6iLVb8PxH+A5WT +SiCmCDACGQ/njrdz+o21SjLB5OeFGxO5Sqn53EuLdgtIriWmNcOx7DykixTL4D3K +1TO/HaV2xv9vYb6QSukiE5rJ0UBtG8Uzuw2UgUdv5K4sCe5eKo/FPkhq0HN9xSzc +fbIdpzr5cNpemCUq7myhvTUddyfLMdcRrQX2J2Msxq7tjPYmXrgx29/fN7iY6jAw +gu94XD2Tcjd/OExiGAZrEklosiskaoN5o5ksTzAXfmKamYa+kF5C3GELru2Vve3s +QNRCZaKFivN/T+NJZBHwmqk8pdq+/MkXQPACmFRJx6PEjhmJSo4MUsjdfrE9m5mc +2/mcMaECgYEAy4+q1wudK8DfqrXx9uhoCoZP2Y76C3aMTAQ6cfbXX045W3xA4GtC +9aD6rq8bH678rH8qppwAEGG4W86oyG9ivRPtolnfTFUCw+MDLu74620L3A2e1mZY +Tck7CIWu5qr8qgCoDt3v4wMU0Rd79gCnlRWXjTm+/MxcnPCcxFFhVDMCgYEAxM7G ++7h9BrDIEFWirMmGinIUxJJvmOBgFeYuAFnuZX7gZtCj9lkkVJm7941o1bxxPxA9 +oSn+2Alvp+GMo+k6qa9ocbicXSoJT248bp0P0VbAAVMd5HkcwEmWAmz2DP0h15Hj +1Fr5NZVKSt/sRLrkTy4mk2ApcfztPQJC2RF8O+MCgYBPgwOpUgkHcSQsqzsjRa52 +BfaktVtoJNsySiiGIXQI7yYqXQN+ZYdXKJUirJU8r/5UZqvDPoYr3+/CNJvrBs8b +qfAVjWNSjpYw67hvmbqzQvnx74QOnRV69NJRRt6FjV56DR2YCQrNsoNsFO8/rQvI +iRGUgWzfk/fM+ZNSf7MzzwKBgHBPK8BEFJz3zwN7Svzdd/iTh4RxqFuOV2bH1yJ+ +L4wd7kLd92NClXftzxVCzjuCcQGy6+iHhZWVNPGRxedX1hE1Nx4GYrzLm0qHttbM +rFUi8ykOXHWgI6cblw5FTdg1r9VLvL7XN3W7fwzhYfwKxvTkekEJ1DznQB7bpngn +AUVPAoGBAMFiKdaONJnzRUIPM0ObJBGSarHUsGFXacf9j/FRQWpIphR5pqpdA6Qw +ctsSk9Sh5vjp9nBP0vaiA3vq93mvB6dbt/aeZIYGHn2A93Yes8URa1BU8+QPOJgS +ZrLLBn8mYnzRzzjKdcpI5zL3oZfsD25yyqe9kENNAh2FurB2zDkB +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/khronos/cert.pem b/ansible/roles/docker_client/files/certs/khronos/cert.pem new file mode 100644 index 00000000..6b4c5d8a --- /dev/null +++ b/ansible/roles/docker_client/files/certs/khronos/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3RMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMzI0MVoXDTE3MDUyMjAxMzI0MVowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt5VkdXxdEcgJNR5r07lsFfsk +WGMZzmXMfJ2CbYthcFsZ6kpXHz9dagQdr7Td4JrtyzluQ492xsdWJNqfZ+X+fdIA +fZRXpZzOgv8GpaFf2TPckFDB4Knm/7DTDdjBZH0uf/Vw0PNc+xFZaDB2JaujsDSj +Q4ncMIEcYMngf9Au5RAJr9QkMM5WuE1NNcihHb/SYs4HNI2ndf/WY8dAjcEtt3dv +0MdAR2gbW+hmWlxKw7UdpcfSaMv/fjtABRyeEmNQkuUjvPFmllYJ7DnSaV8eYsJe +ahp/3IAF/zJIG7PYnng9lIKmULo0SwUh87ZJV2BT9NA+WVgtyCITx5OldctVQQID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAEAkWi8L5bgZ4awj2dW7oEpaFI3Ypt/ROIbrENPcXvLKNf92 +JfpfttMR7Nz7kAZEDgFKS4k4aLN8pX/RKYmPI/QluYuJsrRykrnB1FDAKO4NwEu0 +yHtMW3ZtSAZ8fQeU6bpgA1u1EdYGGe4JOORkR0rfuvqwRwBhAhMQo7HowhbyvfhT ++IM6XZGlhyPZeE/6fm1qJCr1hH8kEZk/68ljnLoBfCqXWtFvwDlrZU/p+1dLQQOZ +Qssomi0tEMJdPqn0l/vyk29A43iRPSpwJjdRqe4A7sp7UB13Le3/EKvDRFkGcu7b +GmMd5RhD/7AsWBPdHSgDThFANEfL4CU8vj9zsyA= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/khronos/key.pem b/ansible/roles/docker_client/files/certs/khronos/key.pem new file mode 100644 index 00000000..7739e6e0 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/khronos/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAt5VkdXxdEcgJNR5r07lsFfskWGMZzmXMfJ2CbYthcFsZ6kpX +Hz9dagQdr7Td4JrtyzluQ492xsdWJNqfZ+X+fdIAfZRXpZzOgv8GpaFf2TPckFDB +4Knm/7DTDdjBZH0uf/Vw0PNc+xFZaDB2JaujsDSjQ4ncMIEcYMngf9Au5RAJr9Qk +MM5WuE1NNcihHb/SYs4HNI2ndf/WY8dAjcEtt3dv0MdAR2gbW+hmWlxKw7UdpcfS +aMv/fjtABRyeEmNQkuUjvPFmllYJ7DnSaV8eYsJeahp/3IAF/zJIG7PYnng9lIKm +ULo0SwUh87ZJV2BT9NA+WVgtyCITx5OldctVQQIDAQABAoIBAGuJOehL6eSBjUst +P7Is2jBiBR81JaeuH8MbLIxoabI72wlPdIscPebc9Fq5EoL/na0+PeWbQtMbbs11 +toxQv+sJuTnWTCT5nkpDgqxAw/liFKAAfiDQxTwfYvGod1gar8NKjUQIWf3DYuDS +ClL0oJSVsNDeDznmVy4m8rdLxjxsUitkwciZnF6FbJZFm/Ki2IOfugGh7zwBu3N2 +ivGwCtwoj/YqmCzgtZMu93bb7ZO2p2kka+tqLi1wGaVZlEdltURm3MBv+I8LAG+r +mrJAP33Wig4c7/OlZ6RYg7KdZ+efDU813atC9cZdjJFVhrN5vBqYcSOravqDIvSJ +PcL40WUCgYEA8O9Wb6DrIv+QZ3/pBX7uJCSFuomF2X6x0RMTTYlht/TaOba/wu8E +ROqicGNm61LDDrDl0JVopyN+BCDKaPFaUucsTwOxw1yIF0tD4pdsw2XKI/NvjYKE +qf1+cKUGbeSb9Jx/kW15iOlYpkmPkVwyitTjMESFreuvunLkxwpF+VsCgYEAwxAH +E6q7iZytifI5CDQEOjoXy3LciHW4oNTykIOVL2DzMfZZQee0hIvglmKX5zPVShMq +fMueVozAbXn2ZaSiBg1t2uj6OXLgiVxcW2YHjMS9cFx+X5y7YRAm+yO9WDvfEPiT +4/rBUcRcII17fv3SyEn3vWXBTAAEHvTNDH+aUpMCgYAKIcZ8w0OgISctcw+Vj5Ts +KC0HfykF0HIh3K9/HzvgxIc4zhqaU/rV+xgBqex2EPDq+T1w3Beej7uI6OgaKawm +uFoAqKMdGBMRB7U/HicQwIMTp/UU+lwh2esrSYVPscFtM/eZYgUuYMG9nHJRJpNp +izJSoU8N1U96g8JjLGds6QKBgQCMvB+e/OvZ2YULpgun83+pUwif7PtYKTdQMtPf +mFAmothUD91OQ5Jmn1Umq3PVh/w3OaTJ7U8MOvkLA5fWtfWAUGgWLUl78Xo3IMAx +4WdJFRlo3BMBgN9gpHq7b4nJA0lpwS9SqTSZfgbKJcoYUaGmiFeJ+sR/xPM0MoNO +pTH+qQKBgGVIPtAAA2/o8QcBv1utAh2LU49dt7+2/cIgM9z1USoCiEdxh0PQXc37 +/GAKFPGWSc9wUjG50Ucz3ADoGMieT3OcGtNx6ZB+FSDWvO9XJ8gs5qX5BB3y4qfs +/tXJUgJ1tYVrUs+zg79UXidQfUX3sXEUG0pXtIZJ/a6Cg/t43it0 +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/mavis/cert.pem b/ansible/roles/docker_client/files/certs/mavis/cert.pem new file mode 100644 index 00000000..b0ed3443 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/mavis/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3PMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMzIxNFoXDTE3MDUyMjAxMzIxNFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA02bY1wlp6NaiJ5yHBXGA3AYJ +dfOUCXDX8oG4xch8CzA436rzAThAQGsj4bW3+cDLJuIrjUIu1OudhC54I2p8J8pY +KjjvlVKJSXF4U65JzgRUYjQisxZPmEc/XWTlGx4IEdh0X7mJ86IyJJ1lu0bR5CfP +dvUy1yCwxE9L71S/DCXy9S3AsZAqpuhPWRhda6P8cEXxslpM2N9PwDG1J4/kTMhe +fNklAkPRo4x5C36EXJ+TutnFsomwV1ubL2A/klUGpxd/cZNbNuL9q7oNlTC6/D2+ +2AWWuCH1eJCeeB6ae598mgdntS+gmDazJXv25/rI5eN304QmV55f+rZSaz0PcwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAAn2lInr3jZmQH5kqpNlfZI4g8wqQ9RRHLIl9Mn71xZuGufY +/U8hbI2Gfx7fq08z9wOnu2BRrWuKZ60BFjnclEx4SyC4Pna4OXVVSyxPUzE2vXh9 +vdSb/LfpZAQ6T1lxr2KjAz1U3AAkV1uANPfvBuv3jIqmLhP1mwYe1Ipyx4NyfInv +riA6Q1Yd53upMU9RscogEzgQ8Lg1HHuHAx1VDKqE1YJlHl08sxjuGdKH/Hkg3a2k +wxgjNjvbXG/Nv5WAXxNFxked3B3lo4ER8KdZk9BHBikIt2OeTNLfr/e4touPwB2l +o0M3HEVza684EAp5zvTi0hIYz7y/cwYO6Xxho9Y= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/mavis/key.pem b/ansible/roles/docker_client/files/certs/mavis/key.pem new file mode 100644 index 00000000..4087279d --- /dev/null +++ b/ansible/roles/docker_client/files/certs/mavis/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA02bY1wlp6NaiJ5yHBXGA3AYJdfOUCXDX8oG4xch8CzA436rz +AThAQGsj4bW3+cDLJuIrjUIu1OudhC54I2p8J8pYKjjvlVKJSXF4U65JzgRUYjQi +sxZPmEc/XWTlGx4IEdh0X7mJ86IyJJ1lu0bR5CfPdvUy1yCwxE9L71S/DCXy9S3A +sZAqpuhPWRhda6P8cEXxslpM2N9PwDG1J4/kTMhefNklAkPRo4x5C36EXJ+TutnF +somwV1ubL2A/klUGpxd/cZNbNuL9q7oNlTC6/D2+2AWWuCH1eJCeeB6ae598mgdn +tS+gmDazJXv25/rI5eN304QmV55f+rZSaz0PcwIDAQABAoIBAQC/WqcI7rJ5Wl7p +MBxf5v/XKXfC1rq3WPd/wcA492Kg1gOFIj5Yakxugwy1sEFZxxOcnLkDYlb10FiC +X3XLyQWdFuF7RpJHPbCbVCSwymBwJSckxUBrjtR5aHSI210ukJ6JWL7uLwoaCEUP +M5H0VQSEWyzF5ukgJa0b5RM9Dh7opIPJE8r8Povhves057G22h7DBJ9s2PsZnU3n +Anf0iqG+3uZnurgbGxW/8rAQKU5a/VajMuWbom4VjwFmw0rbNQa3aMVv9Z5YogE1 +7V2PhBUkYpN3hVkRz/BxOEGXbELLVEzmcwrATY8c9Q5o+LTD8DGjAyVqIPEGkqg2 +A5sJTpmxAoGBAPBRxQlZKXTkiE2Q936D5CPXj4nolU+rgRaNr99SNHuTxxtGSnxh +IVBXCUdIPgHADX4MSJGjLqgP5xnpxBZcczZAR/G59fTDYs069dGH2KtX/v/hqLbW +aYayrwOIDJlChEgFusO68VCG35o8zoT1WQISubkDW2dNtNE5n90+OOzHAoGBAOEy +C2Px9HQ/8kC5G1gkAI2mXE5SHjt44uhimvkLAQROA5QoxNgOiNH7/kdwlGZjLT5J +rKeXJRCVAd/VJ89aN/EX6xLhMZuoy3fg4QMX4AvOtcz6fB2fNOfpJxAaZY8ozD/Q +Pa/LxgSxM0guFQQ/KpGruav5nstQW9pOULhawuP1AoGALm1NqImuJaQE5R9NgQaP +65C4bUcemV6juKy0vmsOMpWREKbauv60xeWaqEe/muz0blaHKoOg/tzbo7jypfNE +WsW+YGexqZCZpNZwKQOaizebzulUg8YWauxYPVFqRa2BCrz4oNmbxxfXWN/Icg1I +HctqT+DjJqjvdsAwLzjXHH8CgYEAkllym5ZftrjkF/n0clibX3MRI/ksGpxaYonK +izUCiSimHMBOBNeVRVoOC6F3X/7uJT++DAAnH1qnKC8fuxq3pmBT7iYZlj6hVJv5 +v4fhm+rPrZHP3XRc7qZQM5/kBqCN9J7BqCC25M/AMxV3pRXj9V1zwfbsotbrirlA +rj6Vz40CgYAX3zjBmfThiBBBvwG4xdld/YT882AXbKFBoM5xieCKELTinhYNDYQP +z6hfZRvjJ2/JLbQyzJ+74hr9U3H6ue3edUoefUqB/kWN9T/ADeXqVPQYvu4D4nXt +f4J2fwwiNSo5pAoh7ViLJKgRW88W5MKDWnwCvZxPMn9HKz3ke87vYw== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/palantiri/cert.pem b/ansible/roles/docker_client/files/certs/palantiri/cert.pem new file mode 100644 index 00000000..766051bc --- /dev/null +++ b/ansible/roles/docker_client/files/certs/palantiri/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3QMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMzIyM1oXDTE3MDUyMjAxMzIyM1owETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxEra9ZEnGFAKb7p1g21tud9W +IBQps5/GYU51/+oSukrn6+U6fFNHdiOQ8E677odvWYMMGwaJ3ImHH0KfwcjNnHsQ +ooJfOApL0X/6HbCgrxs/721YU02odjmGsFO8ReavPy1suqbXhUR0UNHgcPRCgp9p +c8FznBF/2c0e9KcHiD26BCiJeapoxiQTHilnh4UCews9DorP4Zenfa7dBRdqbre3 +YYEa7t+oKmv0w302p0Bxv94U4sc5yTxQjwzh1Rf7GsbIZR8xyh3mbIREilOzrsgn +yYbWF+5ofxBNbNQkPssjnJDZlFB8Gqwotlko02mRAeiOSCmfWqCfjBxcT1ByBwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAJa8HURp0+0cx6eg1PfYwbpzHJtQPoYtj/Am36sPd73NeSJU +k7ksiWtQtoX3HAu2WK946BXMA9UF/ZB0+icvVgfl45nVYQQinM+nJT/LCcVfHglx +t/rMFcTARMxdu0bDNFAxzWf5CuEC3liMI+/uM3ySlvmddsRfJJnRxs3/rXDjvHu5 +Bqxk2Es1r79IT5V06N6a68hyecZ+CGMEy1fqHz4nsTzni5WEligtxTD8DHY9kI7j +6TLl4dY5hW0KURqNwnTTasuKoippvGA/5gNRhd7gcvBjNGmKLQKP5ltH3cF9jYaU +UKzjY2BQL6HCjYxCB0EtxfwFTMCTaDuYyt8hym0= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/palantiri/key.pem b/ansible/roles/docker_client/files/certs/palantiri/key.pem new file mode 100644 index 00000000..d43d47b9 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/palantiri/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAxEra9ZEnGFAKb7p1g21tud9WIBQps5/GYU51/+oSukrn6+U6 +fFNHdiOQ8E677odvWYMMGwaJ3ImHH0KfwcjNnHsQooJfOApL0X/6HbCgrxs/721Y +U02odjmGsFO8ReavPy1suqbXhUR0UNHgcPRCgp9pc8FznBF/2c0e9KcHiD26BCiJ +eapoxiQTHilnh4UCews9DorP4Zenfa7dBRdqbre3YYEa7t+oKmv0w302p0Bxv94U +4sc5yTxQjwzh1Rf7GsbIZR8xyh3mbIREilOzrsgnyYbWF+5ofxBNbNQkPssjnJDZ +lFB8Gqwotlko02mRAeiOSCmfWqCfjBxcT1ByBwIDAQABAoIBABp3DTZ2r5Kzl/Sg +ax+hLV4WIoKItWamcBTGg1pRsNy1JLfFXDFO+R+QhAYiV2w9jmG/LW6nXwcA89rf +HLL/49jpLsD6WuQvXzGL06kVG1UAk4owb+wrWD84WlSDtaqnA5G9Ch/QRtaM+DG9 +WklgZWc/HvSgbHnqUhaHdVj+/CD+FPBipYYm5tEDX0066wH1jhK01rH+BPzTgY5Q +LI0n8aE+HQwj1xOP/1xfX8lYr3nC6wyPgB4RB7O7eSF0l+rvkn3dkOzN0TjRO8wM +bCGKcXPVZNIYE54jigbcDVtKbrACbqBS9hJ9TOFVQg+AgRX39bM2x/qZNCQFNLqA +e4Bn2cECgYEA8MJ76VvACZ/pyyZf5k1exQHfoVNohPQvkX0qVmYsK5opV07MtyTu +KpwxxARFPvRTfUaT81MpHQYoA2wnHI5oByxDR8hWfMpPristGpny2wVUy3CukQV2 +NuZEf2/4JuWSoGuoPtJ265x/LdbLZN3sHrARMdG6jvkbpR3oYd0B5gkCgYEA0LfH +G90e9RzkoS93N2FNwe1wZ/ZVMp+/DMtGNc6mrIsW8VVQv1vgBbcOnIDbO2xlOY8e +a80Bfoz7l4KwMJp9f+J1oXTWq8SDzQUv/Oq9s/mQUGzeIW4L8rcZVBdJy61S+Yzh +AJUEzzqM1pPrYx6Zu7OjWaQtCyFkvT/P6FeOG48CgYBf7++2fpQYgbuE8ya1u9Du +yh9VxsnYaQFnhSvQv+gL++NLYiAbPsEVA6alCFqbinqWagBcrp+sbB6XtoojS+/S +I+YvfXpRQcDck2Hmvr/SXN58LsuxCC8iE0HqUjDdNw/lkuC5H965ZmQbXQMxj2ke +lWRUqNEhVcyZUECgdpPiGQKBgQC3fDiQKE4k0ae0g9PcXbBMAf7FhmrTvFfqWXF5 +sBgaq5QZyRTZhV1l6M3xi94vWVWLymPGBU2BAVPUmFFoCQi3OlCLBfoKuxHbVOoh +AASRiZ3y9iI5ZVMfpxOfN3Hqmo7NT6omJts5ipFxukw+sRIZTw0VqxzmIg87yDNw +YDhPxQKBgGDgwTfGJinjEp/c0AKFll+fMZF2Y9LbY4eF8JPsfTtauiirdiMNQtVS +QFWsOAjpcrwNyO1vn66j8F+aTeJ+6dfguuKtjaGGM5CATeuxHhasrME2Kn6JIOsL +BKF22VBdXZ6BgrXY95M/Of+RwHsGEyt5LxDTICxYEAK5iJGTTPiV +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/sauron/cert.pem b/ansible/roles/docker_client/files/certs/sauron/cert.pem new file mode 100644 index 00000000..8097df91 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/sauron/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3OMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAxMTcyNFoXDTE3MDUyMjAxMTcyNFowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1PMfE+VNxIAI234KT1xKncmu ++mwAmKkMg+HU0Ii6Ek2VkIsdVhRenUfbb5u+ijP/Att1QSyA+aynnLOh4lDL8okW +AJl52PFq1yMFSd5VYQ3ag9UYFvD5Vvl/vliyfbTMwlx572z9sqCBQPBHmog+J21S +ZnC4ztBM2NKCO6PKuchY95fHTkgmHwS2+Xbt3PFsiU+pr4Ch2m9fWj9vc+tJtmK+ +r8sosO5qcRuP/OwaLqCo34UEHHriCGyzK4FlhYHqeERkAOm+p6ruawkhooF9tmkw +kkQpyh39BkKl6N3spS8CWeGlBZebYCbr00d0yhHnpvdQW5nuTyLEdYiZqq6AiwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAH91usWaOqOUYpNyLnDTTnxC6yyClMBuxLn93qDZcWv2cJhz +MVOAgWm2BZjj4BZyGvAIocNoFXb2MhZZzbLCN1VaI5Rq5TnTo9jfrWHdyobdFYc/ +DzigDM/iV60LomxnDaWs2iBNhIwVKlNu5M/h4VY/r8tMFOtgWbQrYVsKq91GbLoI +iGs/OyNa7m5fBOvMhXSqU2NlipyrbbhYijlg5yEt50ry+rjl+d1JS7Wtn32akRhf +DF/bF9LdZHV6BC+PubUr2tMDAT+qXua2q5aXLTYTsBB2DL6lhLROoIYK5igLAbno +yWzL9xCJBgvLQJwfgxvQ6HrQ85G5ONA756J3iY4= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/sauron/key.pem b/ansible/roles/docker_client/files/certs/sauron/key.pem new file mode 100644 index 00000000..a17cbcd6 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/sauron/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA1PMfE+VNxIAI234KT1xKncmu+mwAmKkMg+HU0Ii6Ek2VkIsd +VhRenUfbb5u+ijP/Att1QSyA+aynnLOh4lDL8okWAJl52PFq1yMFSd5VYQ3ag9UY +FvD5Vvl/vliyfbTMwlx572z9sqCBQPBHmog+J21SZnC4ztBM2NKCO6PKuchY95fH +TkgmHwS2+Xbt3PFsiU+pr4Ch2m9fWj9vc+tJtmK+r8sosO5qcRuP/OwaLqCo34UE +HHriCGyzK4FlhYHqeERkAOm+p6ruawkhooF9tmkwkkQpyh39BkKl6N3spS8CWeGl +BZebYCbr00d0yhHnpvdQW5nuTyLEdYiZqq6AiwIDAQABAoIBAQCyMsjc41QSjXnc +AzspcffW1oEgeeR922jnq8FkyCW9T0NLc76W2/DY+Nq+KLxn9l79yxvx8AX16O9S +CFf9hNNGIEVTNxtp2Ywof9vbFrynvOoQy++eswRULfEiHiMvYm6+bphzLOeX5QyE +hQ3GWSB56de8dbORDWphPrdQpR3uHBugsebsv24TDDVBD8WBeWir9CLTqTgxxDmP +FGIBPr9v6zGy0gn/wPeOJSNK8/Bx01jMUZ/8TsTEiHqE/jFXdEUbkem8ntgqMM6u +epG2K30n198D+AJpH661O4IgdHFYE5zx3UVV+oouNRcN3w477JZm19Ntc8orii+E +iBvBrMDRAoGBAPz0kU8BjX30+qt4lQwCXbVvNc2b7riozkVSOGIRuTF91QdfqarQ +OB5MvVdWqWtcIyZkCrhAaHJCOPXIcqTuy0BimYEQaiBCRlCxGPs/QCmqXtjdS5HL +3T/EqFAEfCNGTbMi3Nl9Kzd3Q7zD3jx3iMMBgbucz8hVA78IJsj1PFH3AoGBANeD +SMG0d9YAVZu/JekOKblWEjW9TlHRGnZbgAzJrkS0EvXAYxT9YzOjsoItvMpC9gFg +qbsM+FGtsuidW2hsCnhhiSPxYfk6yep/tES+oSVaF9kALSczZHf1+yIbImpWhR8z +6KtwhigZsvB1W9Aqmik22KvO72G4fPHiRPms/qENAoGAUmTLTAPCdcutNtsxGVdP +9MBDszReftGlVpkgenkxbHiDQkKbvmLSVLREXEWDK9A4NtIRV9DK9KSzbwOarDSI +CfHT9GFHYDiWNMRq0bPZj3jKKjNy4QuZhqLk/CNp2AeXWYk6jLEjuY5khwD5y5JX +/sTn4BeE6skeTkQ2dfDDdCMCgYBnOFT3mdUvpTdbgkDJD+SCBSxxmrrRSwKOn6KD +/XC/LTRLihqqqfYCfKv516gtg8bJwB9rHHouvLAM45ktOVoGcEi781fGhBbDfui7 +aEZMWotDfucheGbtYd4nZrGkFs5ptHYC7sECodRngP45hL4TcVZza4iR2EfXnlUE +9It8XQKBgDZYL7xChu23zLI15iqPYrtO0SC3+wRoepva41Anf6nWdE+CZUXRBAYV +QydtW8orDrC3vW5RAT20zunfq7a3ddinrYXgVX8Vs88i1DExmrxUrocJ9muB2jtl +RRl4h7W2T5dfpanuf7uR/MkEHRIaoBhRSTzlLKR1Hb/DclBhY3aV +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/stage-service/cert.pem b/ansible/roles/docker_client/files/certs/stage-service/cert.pem new file mode 100644 index 00000000..2658849a --- /dev/null +++ b/ansible/roles/docker_client/files/certs/stage-service/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3TMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyNjIzMjU0NVoXDTE3MDUyNjIzMjU0NVowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3eSj7FcOf22PXl5OMQCi3tir +NZkka5GZhrPtf4/K24Rq8IuSFUsAZud2ZiNfcOcD4NFeUTgWsMBJ3b63yFmh+k1z +WyCSnskLX4xVZ70l3Z3Fb8QeJlOIZgNpfBzuypOzWRu+XDgyjvNZd8r+SItKZEmA +bndv1O0KBczf1B9NMqiwm+73VtNqO7S2MhZ2R1aOxVIgF0itTe0MRShzo6gXaImm +dyUi69fTjYJIy/P/9d/nSdvHpPgtfBDwIX+cuk/H+CI2+G6Cu1puwmYWIs+IhM/c +HKBX2t7v0W7ZgSxelqVDOeSbC8Q6bnp1rdUht9mgyk4BHo2bOlQKqzdZ5W9/fwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAAZcKd/apgiGE6nljkG8t3ep5jAuBmMZatUkSeDdE/MOSMrz +9jXuL3SytPouxroB3174zqJ1yJbi3h1G52PH5dDuUnCGoRZ3QcMRFlWpsNDlBAcc +i3clNJZtjn9Q5/vh88OzdZgC6Nuq5jTbebb0GgbWTKdPZmVc3baRjsphjQY7d++Q +EacJytXXijGNL/NWMOF5w6gXrAIloL3BLxKZBueFwYu4ikibPH4VkAxuTInw8Jlc +hWomTYoUbyOwczCWa+GTJa9bgjO1pu9L43c+MF8vOGAPhViIXxG8g61NIL6Lj+MN +DtxcyYXBef/v5tDdCvGJzdV4WXeW/vlNAZ/hGsk= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/stage-service/key.pem b/ansible/roles/docker_client/files/certs/stage-service/key.pem new file mode 100644 index 00000000..56d17af6 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/stage-service/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA3eSj7FcOf22PXl5OMQCi3tirNZkka5GZhrPtf4/K24Rq8IuS +FUsAZud2ZiNfcOcD4NFeUTgWsMBJ3b63yFmh+k1zWyCSnskLX4xVZ70l3Z3Fb8Qe +JlOIZgNpfBzuypOzWRu+XDgyjvNZd8r+SItKZEmAbndv1O0KBczf1B9NMqiwm+73 +VtNqO7S2MhZ2R1aOxVIgF0itTe0MRShzo6gXaImmdyUi69fTjYJIy/P/9d/nSdvH +pPgtfBDwIX+cuk/H+CI2+G6Cu1puwmYWIs+IhM/cHKBX2t7v0W7ZgSxelqVDOeSb +C8Q6bnp1rdUht9mgyk4BHo2bOlQKqzdZ5W9/fwIDAQABAoIBAQDW6WNHPg5hp0ir +2uUamwYklnBEyl22/d9X0x8dZLTSpk85ScUkcG8kO9y2El6XKHjeH4qdWZjIeoVQ +iy7PktjKaIdo5IR93DTyxyXVAyMQoYWfubCF6HP8ciORvSge9q1HihJ2mUuGnvWx +i997fcWAmpwU4gNjlG4XrhXACs0nWOKY4OjQonHxaqp+NdUDkGDTFK5UfHvZ9wQb +5pG1we7ipX84ThIqo7mrLMWVvS1YzBiE6OVX7FwQgr+zAvjqLiWEmKyzSlDUNDXq +OecZjaHN9mVM7BEiCNnptW7fIv2x/Yhpvd/k1rUWvwArSRDkDtfq4qDDqFYYWw6Y +4cR1ivUpAoGBAP2m3NvXkUynS092ziUnc+sUfD7VXIgR6f7TYsMtfJqI8LPq8OEW +wKB4pte/RJKeC/Oji0T97vh9o/81/C8axWaGEZ1Lc8Rr/NxMdCdm65/sL0vYR5hN +oFTkAiJz70laPmNfPkdnmd9M/7RgeKQyquf4lWm1UcxcjA/oahBsVK1NAoGBAN/y +gv+qqEBF211QmBDxyiQR7g+FNQakRIZ6RGb9VgnLjCWgonJ6APz0L/vsNjqDpD49 +1O12HKBY1Pg4h83tuLz2GFrO4apKrtWQwPIF6iFTwvDD6mkx50C5qq1QRN4TVgjg +pU/Vz282tZT9UuQPl0il7XmDPhmQd2HwiBT6KWn7AoGBALmoReX/3dEubjgmrHKT +cwww/VP8VXMfMZecX6CejaKK8yutnkwSnEGYrf4ieG2ckwDsxsZj22M0izIE8KYF +wVDudrwb+wtdQ0J4D1i7w2FVvbSVBKpcvysDjPBszUtya1HO58XZ9c11DYCYUBVX +JLMPx0UNO2cKolYOdnu43y3NAoGAHFMO8IlEKITtXtsPsKz0NSWzZ3EgqMtkA2I5 +fpEyLaD6A9RSV4pIz7WO0B0rKrF1t42YfcQ/kKRLo21697fGctfpcqrR7fjuLgLV +9IZ8+2QdttgAsPdn2U7iYHNTkob9jbfkELnpN5bC1qpHrqKuLrkV1RWdaBxWjSOg +Ao81Z7sCgYA/t/Rt33AzkKXMncPWFXJZb5neyJL+cZcFpRU3FsOuZVOT2honvqYu +cFl99Qh+RlIKo4R4y4/kDQMw4lvWHS0mdRmHrCuJ50AAmQ74jUK4ml7XmBAgOCMG +JRyO8TIOUumghgjPeTYmDt5JzxMdvod4iK/3d2dINtiWzl1PBgDbrw== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/staging-swarm-manager/cert.pem b/ansible/roles/docker_client/files/certs/staging-swarm-manager/cert.pem new file mode 100644 index 00000000..8ecbceed --- /dev/null +++ b/ansible/roles/docker_client/files/certs/staging-swarm-manager/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3SMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyNjIyNTc0MVoXDTE3MDUyNjIyNTc0MVowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAt7yf/uDuEMs5qgiOgKUW9K6k +IRE2bRAkGAvd3xTbANm553ZfdGWU6KMAWf80KIDeBxA0wvhKOjQ0D4JXdtlE48d/ +7G5O3k91Cj9WvgsEbLtKfYmIRPUwrAgkuipmzrsU1ge22iZk8Qe971l2xIc37H+6 +9QlTZS5te5X3pvTvrF5y4gpIrIYMsrbFtiZ22rkUrpbmu0MOiDZ7m/yt3VCOZN8x +c4XhrrA0CIQwS6gOMKY60LWl8xOCi0BNv0yILm3waPDEE+r81dAFk47FU+goh8MF +3PcNedJKwCxDEawmBqzI7kAVc4JmGvOdV4CNsPi+0GZ9vGFHjcJzYZJyafPE6QID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAIL+p4w2DxNSKnETolvToVoszuo89QrxRU9HulJ6ki2SbeoW +yt42O75KnIEqVK93OYU0XU4CGxu93ZueEzayFjfdYfzeku3oCy1SGpY25GlP6s+o +YQ8mJVNt24banGrUMmd6JJXsmbyO4hHpefw9DTPTTN7JHBhp4MK0CzbaMcUmdRqD +PZzf2YL0fJHevrKZZWy4oLLzSzdrV9xfcGcZ2bLkVnDaXCvTS1aEX5z482DGAz99 +3YfJ3As5EshLdRE1ksWMnittxNAJXTtJGOWMC35ipLTvkC1YQu5Xi6KCnRN8OUr/ +DWOp/FJdWDN2zOa7Cc5nJyp0zHM97+S5cByIMaM= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/staging-swarm-manager/key.pem b/ansible/roles/docker_client/files/certs/staging-swarm-manager/key.pem new file mode 100644 index 00000000..ad970197 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/staging-swarm-manager/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEoAIBAAKCAQEAt7yf/uDuEMs5qgiOgKUW9K6kIRE2bRAkGAvd3xTbANm553Zf +dGWU6KMAWf80KIDeBxA0wvhKOjQ0D4JXdtlE48d/7G5O3k91Cj9WvgsEbLtKfYmI +RPUwrAgkuipmzrsU1ge22iZk8Qe971l2xIc37H+69QlTZS5te5X3pvTvrF5y4gpI +rIYMsrbFtiZ22rkUrpbmu0MOiDZ7m/yt3VCOZN8xc4XhrrA0CIQwS6gOMKY60LWl +8xOCi0BNv0yILm3waPDEE+r81dAFk47FU+goh8MF3PcNedJKwCxDEawmBqzI7kAV +c4JmGvOdV4CNsPi+0GZ9vGFHjcJzYZJyafPE6QIDAQABAoIBAFLag0ebRJ9cFT/A +pgoK6CQkUKz8pw9dVPlm+IEtVWi3xeZDcd36JMEXKARtWabeHcEPpMs6zbYbblKE +E07rwO36O4xxXa1QJVYeo84FB9TETF350/JMossgzPcKQdv9bvLo/aK355LtS9AQ +BseutiA2pSb0MqAQ5JvzT38BOsb9+HuLXQ9ES2yI3N/PBKxoq+fypYUhZ8H7w1tY +DDfCKLzvrgtV1rIHOnMzkTkBnnEVgIkqCqTOJM9G8NwVr8ltaYyYM2uUDL+TNlgq +lbgrYMFfA+oW6ZHvC7PlxmhXDs0MdNbLlPFmXBMBb+cBSHJJ81wIN//25Rs83jpJ +1Vhez5UCgYEA43sYErcAlwDKQU0Dfe7rSZ9rsf2XyhVwhCtebuZa6qZkihfT7cLy +Mx2zzjBecgE7udNqm1qGo9zcSP1e67FZv+5ogcYQzeDz/SETHo5WPyDq5Gy2mxES +Osw8an74/HR25BLBeTcdiI6jS98+Tq+8YT/OR/SfJVHu8runK2gySncCgYEAzsWV +j6cdZqZf2oSQDcbHHJxEECTofoSJDuoCcqECBcaQ8Tjs3ZTpMud8y8hu2sPWj92c +FqfBL3jH/7UGbiBdClaIkhqlDdfmxDOV/qvZtwyiz6E9OAIzCqJ4tZWoioLbx6vs ++Ll8MqT2Zz5DK0jEOJOWNXMVkcLQCJX6zbbQ458CgYAisIDJSdLzNBGjx6TCrY5/ +2anIqM1e6coQvbRW4RX4Nouaa2vMFDBvDxZ+WO3uILYE7nKRYkqp7mJBVzIEazUu +iaKyN4wUeTt73HFX9Yi9c72eO93U49ON19tWuioqBXCj8yBB5d/BFU9doJuV0EK/ +6cUBQBKM1GPF7Evf2LHinQKBgCQNeXVxuwOh8MamJIPya9dGO+vgs5UTwN4T2Vdp +UXP2WmFk/o/oHf0dNbpGeWW1jdzJ1CRzHZEMazrfsn5TMEXULr3i3XKbeYYuRHIC +PtTW5DB1ubnhUEsDZy6qKJf2FyIqjQgVN12RO9FmnsmWR3hh62foaBNhN2q30+I4 +hRWbAn9TNtSr7uTV9K0pakaEuPeHOjPmhnm5gjN/juMB97A7IMcAGUJ8N1YFJ4Gx +PawnS/JGLoMpzVGD4Gjm7kJYdheXNcEuatwQTTA/b7FrST3so/S6bBZBbuOxGypp +hmMMJa5Hh8l7YKz9Hd8MVjjW1WzwsTruY9Vyxt6EgJZjcrVT +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/files/certs/swarm-manager/cert.pem b/ansible/roles/docker_client/files/certs/swarm-manager/cert.pem new file mode 100644 index 00000000..5c3f9506 --- /dev/null +++ b/ansible/roles/docker_client/files/certs/swarm-manager/cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDqTCCApGgAwIBAgIJAMVvyYYoDm3IMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD +VQQGEwJVUzETMBEGA1UECBMKQ2FsaWZvcm5pYTEWMBQGA1UEBxMNU2FuIEZyYW5j +aXNjbzERMA8GA1UEChMIUnVubmFibGUxDDAKBgNVBAsTA09wczESMBAGA1UEAxMJ +bG9jYWxob3N0MR8wHQYJKoZIhvcNAQkBFhBvcHNAcnVubmFibGUuY29tMB4XDTE2 +MDUyMjAwMTUxMloXDTE3MDUyMjAwMTUxMlowETEPMA0GA1UEAxMGY2xpZW50MIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwUuwLJTfkFxf/hwel4mpQiZI +nHWtB56EceQFO2uwnRPRJ0STkJuigwCOnTX6CCSFwIv6X0wcqf+kf//LnQe6Zq0g +Cn9Vaz0yGJlLmREjvF3ub5hNcCujCVbB1IIet1XVQGwpAVwvBtokoAfnJWD36oec +qJNy8loL2KU9vJa1ISKduNgPJWc9eiUzOvgDAtwa+VQ9BZ52FJsgEVEjWKBtIYgb +cw2UkN+uruFRWJDUDKhq3rNZcpx9r4uxU291T6t2SfTx7eG9x/ciO1UQk9FTSXvf +x+QRCBfu3TqzEA1dps8/p+u+yotuz+L3a70hY5AN2YcfcfE3P1Fnluaz7XVKTwID +AQABo4GDMIGAMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBfBgNVHREE +WDBWhwQKCAQohwQKDAyIhwQKCAU/hwQKCAY7hwQKBAb7hwR/AAABgglsb2NhbGhv +c3SCJXN3YXJtLXN0YWdpbmctY29kZW5vdy5ydW5uYWJsZWFwcC5jb20wDQYJKoZI +hvcNAQELBQADggEBAHFsocmzGWjBN4lSau7Rt7kQQV5nux/qJJgYVxptDnzXXQ8t +Vn68RFEYmWmm3E6gLjIxKARxwEISRl3yLStvjP6MFmp7j3mKkSNCt1Gptkd9QR0t +SfTIisKvgVZNXC2HRQ6KSYQJXgVhgc63AOXhTTPNS8u3KMwDWZs2j0wZZiYr7Vln +9t1gZ3koA1gsQfdUc1J6Nbgi9neawNXmjsAihNNCEYjDFSKIkxcOtl0PtqJMwuYF +OQP8oI0E3HEtOlnqfSsqxYvPsrLL696ywg9oa+bahqKrFKL0Nx6AaUTfcpERO5oj +gKgQODbKNn7xZYbJo0VeN6ujfmcvNJGs1AtOJ1c= +-----END CERTIFICATE----- diff --git a/ansible/roles/docker_client/files/certs/swarm-manager/key.pem b/ansible/roles/docker_client/files/certs/swarm-manager/key.pem new file mode 100644 index 00000000..37ca2fdd --- /dev/null +++ b/ansible/roles/docker_client/files/certs/swarm-manager/key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwUuwLJTfkFxf/hwel4mpQiZInHWtB56EceQFO2uwnRPRJ0ST +kJuigwCOnTX6CCSFwIv6X0wcqf+kf//LnQe6Zq0gCn9Vaz0yGJlLmREjvF3ub5hN +cCujCVbB1IIet1XVQGwpAVwvBtokoAfnJWD36oecqJNy8loL2KU9vJa1ISKduNgP +JWc9eiUzOvgDAtwa+VQ9BZ52FJsgEVEjWKBtIYgbcw2UkN+uruFRWJDUDKhq3rNZ +cpx9r4uxU291T6t2SfTx7eG9x/ciO1UQk9FTSXvfx+QRCBfu3TqzEA1dps8/p+u+ +yotuz+L3a70hY5AN2YcfcfE3P1Fnluaz7XVKTwIDAQABAoIBAQDBPJZR6z6YhhCc +66WVntRrwfQDHg0tucX0SZ0Fa0IztfT4ybCv9zb+udbYyXY9H63q8MUK8f8R3/Sm +1aTLRkeNj/OhKuzM+W+UcePGkQDltg3A2ERKKAWkN2wssyxb4d3/umE/79U/h2Ln +7R6iGo+dkZcGW+mk6GpBWegfGZbYGJMg4UsLffTRLHuD3sWJDFtIHdU21H3+SUoB +L4WnoVTaINaO7je3nyjKc6aiiLZt8hwNlfwiow/KUcJ7lfwa3OeDe5KgX/IKCvTD +LvbJ4MPbM3ByXgTdt89cG5f7y9njvJwGAxjvV11e8PV7iEDzaN/7+jqtIuaKw4UT +JMjE9vXhAoGBAOR5eDiGL3/bbkUCAyonh0haZJ5CSeYFpiykgFi1XHZWkrkZVsH6 +MEOvsvXI/v7B9pc4+QBHIHNxMd03BpvhTpThRDFdx5q+hbdYV62LW9wePHzYGrTU +CbD/KFXR+Qf9ugT2HGPI99fjI+Al/hZUR2BaTK4UGJnY0NKRal8B+zBRAoGBANiV +Pso+g4/wMa2kz5CvJ6bwOInYHWYkETw7vp9y6GODkWU+KQBqPz5CP+TGkZFO/ZtF +2TG876NlViwJlvwqF83n2xqY1jaU58XdmkK0N6jY9/v9VuZ5lLnFWZNvti78n+46 +g2FrnSjsRrnqU4WYtw1LfGObdZxfwwUdJDp3w8ifAoGAMttHA237DL7PYNT911hN +KLHWS9P9lxIS10UWvKhCGKVUq5cUVMFYFPj7IPLIr/SE60xnQMSUWJ4Nt2wUMIaK +c2FhSIvTboKSSSmE+sBWhjzEwhVofG/maFujotbGVB0KVyhhUWTeplONHMvYLyg7 +axsujmEyyKwWb4im4NcUDsECgYEAwrqNVfGXAFeJkwJXzVJuaF4bt3QkGq3bATkG +Tv5ViwPEOi8FVmYwrp2KLPuL7gYDZxQKD/o7y8FVFQsaQWbnOC56hRNdpn2DVhfE +lqzKTnmlnV9OPrKZ0d1qiHEscFqawOrDLVhAWYb2oDnbFocULlPsZvJjcFF5WqO0 +6wy7bykCgYAU+YNnB35TiIsdXLjauZ68DtPslVY7dRRiyu1Ynob3JZImHdP0CQ34 +4tdHwSF2osx3EG0hOCMpVrHqjzzMIeGj9637m+6u/St9J71CL4AZQr83veSYpN8R +Av+YuAPiinJUEGCYC8su1fVQ9KAKvF36mo5PLC9/PHiZXi4v8YXsjw== +-----END RSA PRIVATE KEY----- diff --git a/ansible/roles/docker_client/scripts/genClientCert.sh b/ansible/roles/docker_client/scripts/genClientCert.sh new file mode 100755 index 00000000..69f01776 --- /dev/null +++ b/ansible/roles/docker_client/scripts/genClientCert.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +if [[ $1 = '' ]]; then + echo 'script requires a client name' + exit 1 +fi +CLIENT=./files/certs/$1 + +echo 'WARN: hard coded alpha-api-old gamma-services and beta-services for SWARM' +# if [[ $2 = '' ]]; then +# echo 'script requires a client ip address' +# exit 1 +# fi + +mkdir $CLIENT + +# generate key for client +openssl genrsa -out "$CLIENT/key.pem" 2048 +chmod 400 "$CLIENT/key.pem" + +# generate CSR for client +openssl req \ + -subj '/CN=client' \ + -new \ + -key "$CLIENT/key.pem" \ + -out "$CLIENT/client.csr" + +chmod 400 "$CLIENT/client.csr" + +echo extendedKeyUsage=clientAuth,serverAuth > "$CLIENT/extfile.cnf" +echo subjectAltName=IP:10.8.4.40,IP:10.12.12.136,IP:10.8.5.63,IP:10.8.6.59,IP:10.4.6.251,IP:127.0.0.1,DNS:localhost,DNS:swarm-staging-codenow.runnableapp.com >> "$CLIENT/extfile.cnf" + +# generate cert for client +openssl x509 \ + -req \ + -days 365 \ + -sha256 \ + -in "$CLIENT/client.csr" \ + -CA ca.pem \ + -CAkey ca-key.pem \ + -CAcreateserial \ + -out "$CLIENT/cert.pem" \ + -extfile "$CLIENT/extfile.cnf" + +# set permissions for deploy +chmod 644 "$CLIENT/cert.pem" +chmod 644 "$CLIENT/key.pem" + +# cleanup files we do not need +rm $CLIENT/extfile.cnf +rm $CLIENT/client.csr diff --git a/ansible/roles/docker_client/tasks/main.yml b/ansible/roles/docker_client/tasks/main.yml new file mode 100644 index 00000000..79010a29 --- /dev/null +++ b/ansible/roles/docker_client/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: create docker cert directory + become: true + file: + path=/opt/ssl/docker/{{ name }} + state=directory + +- name: copy docker CA + become: true + copy: + src=certs/ca.pem + dest=/opt/ssl/docker/{{ name }} + mode=0440 + owner=root + group=root + +- name: copy docker client key and cert + become: true + copy: + src=certs/{{ name }}/ + dest=/opt/ssl/docker/{{ name }} + mode=0440 + owner=root + group=root diff --git a/ansible/roles/docks-psad/README.md b/ansible/roles/docks-psad/README.md new file mode 100644 index 00000000..e54c80b2 --- /dev/null +++ b/ansible/roles/docks-psad/README.md @@ -0,0 +1,6 @@ +iptables +=========== + +This role installs and configure psad and syscfg + +This role is to be run on docks to effectivly help limit ratelimiting and stop containers from accessing things they shouldn't diff --git a/ansible/roles/docks-psad/defaults/main.yml b/ansible/roles/docks-psad/defaults/main.yml new file mode 100644 index 00000000..b08ddcc8 --- /dev/null +++ b/ansible/roles/docks-psad/defaults/main.yml @@ -0,0 +1,3 @@ +error_mail: ops@runnable.com + +psad_script_folder: /opt/runnable/psad diff --git a/ansible/roles/docks-psad/handlers/main.yml b/ansible/roles/docks-psad/handlers/main.yml new file mode 100644 index 00000000..2215c843 --- /dev/null +++ b/ansible/roles/docks-psad/handlers/main.yml @@ -0,0 +1,11 @@ +--- +- name: apply sysctl by service procps start + become: true + command: service procps start + +- name: restart psad + become: true + service: + name=psad + state=restarted + enabled=true diff --git a/ansible/roles/docks-psad/tasks/main.yml b/ansible/roles/docks-psad/tasks/main.yml new file mode 100644 index 00000000..b092936b --- /dev/null +++ b/ansible/roles/docks-psad/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: create folder + become: true + file: + path={{ psad_script_folder }} + state=directory + recurse=yes + +- name: create sysctl config file + become: true + tags: [ sysctl ] + template: + src=60-runnable_sysctl.conf.j2 + dest=/etc/sysctl.d/60-runnable_sysctl.conf + mode=644 + group=root + owner=root + notify: + - apply sysctl by service procps start + +- name: installing psad to latest version + tags: [ psad ] + become: true + apt: + pkg=psad + state=latest + update_cache=yes + cache_valid_time=3600 + notify: + - restart psad + +- name: psad signature update + become: true + shell: psad --sig-update && psad -H + register: command_result + failed_when: "'FAILED' in command_result.stderr" + + +- name: copy psad detected script + become: true + tags: [ psad, psad_script ] + template: + src=psad-script.sh + dest={{ psad_script_folder }}/psad-script.sh + mode=u+x + +- name: copy pasd config script + become: true + tags: [ psad, psad_config ] + template: + dest=/etc/psad/psad.conf + src=psad.conf.j2 + owner=root + mode=0644 + notify: + - restart psad diff --git a/ansible/roles/docks-psad/templates/60-runnable_sysctl.conf.j2 b/ansible/roles/docks-psad/templates/60-runnable_sysctl.conf.j2 new file mode 100644 index 00000000..0ba78208 --- /dev/null +++ b/ansible/roles/docks-psad/templates/60-runnable_sysctl.conf.j2 @@ -0,0 +1,37 @@ +# Disable ping +net.ipv4.icmp_echo_ignore_all=1 + +# Turn on Source Address Verification in all interfaces to prevent some +# spoofing attacks +net.ipv4.conf.default.rp_filter=1 +net.ipv4.conf.all.rp_filter=1 + +# Do not accept IP source route packets (we are not a router) +net.ipv4.conf.default.accept_source_route=0 +net.ipv4.conf.all.accept_source_route=0 +net.ipv6.conf.default.accept_source_route=0 +net.ipv6.conf.all.accept_source_route=0 + +# Disable ICMP redirects. ICMP redirects are rarely used but can be used in +# MITM (man-in-the-middle) attacks. Disabling ICMP may disrupt legitimate +# traffic to those sites. +net.ipv4.conf.default.accept_redirects=0 +net.ipv4.conf.all.accept_redirects=0 +net.ipv6.conf.default.accept_redirects=0 +net.ipv6.conf.all.accept_redirects=0 + +# Ignore bogus ICMP errors +net.ipv4.icmp_echo_ignore_broadcasts=1 +net.ipv4.icmp_ignore_bogus_error_responses=1 +net.ipv4.icmp_echo_ignore_all=0 + +# Do not log Martian Packets (impossible packets) +net.ipv4.conf.default.log_martians=0 +net.ipv4.conf.all.log_martians=0 + +# Change to 1 to enable TCP/IP SYN cookies This disables TCP Window Scaling +# (http://lkml.org/lkml/2008/2/5/167) +net.ipv4.tcp_syncookies=0 +net.ipv4.tcp_max_syn_backlog = 2048 +net.ipv4.tcp_synack_retries = 2 +net.ipv4.tcp_syn_retries = 5 diff --git a/ansible/roles/docks-psad/templates/psad-script.sh b/ansible/roles/docks-psad/templates/psad-script.sh new file mode 100644 index 00000000..4292aab9 --- /dev/null +++ b/ansible/roles/docks-psad/templates/psad-script.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +alerted_ipaddress="${1}" +echo "looking for container with ip = ${alerted_ipaddress}" + +for container_id in $(docker ps -qa); do + container_ipaddress="$(docker inspect --format "{{ '{{' }} .NetworkSettings.IPAddress {{ '}}' }}" ${container_id})" + echo "checking ${container_id}: ${container_ipaddress}" + + if [[ "${container_ipaddress}" == "${alerted_ipaddress}" ]]; then + psad_logs="" + psad_logs_files="$(ls /var/log/psad/$alerted_ipaddress/*_email_alert)" + echo "found container_id, getting logs for ${container_id}: ${container_ipaddress} from ${psad_logs_files}" + + for log_file in "${psad_logs_files}"; do + psad_logs="${psad_logs}$(sed '/Whois Information/,$d' ${log_file})" + done + + data_file=`tempfile` + echo "generating data file at ${data_file}" + echo '{' > ${data_file} + echo '"containerId": "'"${container_id}"'",' >> ${data_file} + echo '"hostnames": "'`hostname -I | cut -d' ' -f1`'",' >> ${data_file} + echo '"logs": "'${psad_logs}'"'>> ${data_file} + echo '}' >> ${data_file} + + echo "sending alert" `cat ${data_file}` + curl --header "Content-Type: application/json" \ + -X POST \ + --data "@${data_file}" \ + "http://{{ drake_hostname }}/psad" + + rm "${data_file}" + fi +done diff --git a/ansible/roles/docks-psad/templates/psad.conf.j2 b/ansible/roles/docks-psad/templates/psad.conf.j2 new file mode 100644 index 00000000..c4dbf69b --- /dev/null +++ b/ansible/roles/docks-psad/templates/psad.conf.j2 @@ -0,0 +1,582 @@ +### Supports multiple email addresses (as a comma separated +### list). +EMAIL_ADDRESSES {{ error_mail }}; + +### Machine hostname +HOSTNAME {{ ansible_hostname }}; + +### Specify the home and external networks. Note that by default the +### ENABLE_INTF_LOCAL_NETS is enabled, so psad automatically detects +### all of the directly connected subnets and uses this information as +### the HOME_NET variable. +HOME_NET any; +EXTERNAL_NET any; + +### The FW_SEARCH_ALL variable controls how psad will parse iptables +### messages. If it is set to "Y" then psad will parse all iptables +### messages for evidence of scan activity. If it is set to "N" then +### psad will only parse those iptables messages that contain logging +### prefixes specified by the FW_MSG_SEARCH variable below. Logging +### prefixes are set with the --log-prefix command line option to iptables. +### Setting FW_SEARCH_ALL to "N" is useful for having psad only analyze +### iptables messages that are logged out of a specific iptables chain +### (multiple strings can be searched for, see the comment above the +### FW_MSG_SEARCH variable below) or a specific logging rule for example. +### FW_SEARCH_ALL is set to "Y" by default since usually people want psad +### to parse all iptables messages. +FW_SEARCH_ALL Y; + +### The FW_MSG_SEARCH variable can be modified to look for logging messages +### that are specific to your firewall configuration (specified by the +### "--log-prefix" option. For example, if your firewall uses the +### string "Audit" for packets that have been blocked, then you could +### set FW_MSG_SEARCH to "Audit"; The default string to search for is +### "DROP". Both psad and kmsgsd reference this file. NOTE: You can +### specify this variable multiple times to have psad search for multiple +### strings. For example to have psad search for the strings "Audit" and +### "Reject", you would use the following two lines: +#FW_MSG_SEARCH Audit; +#FW_MSG_SEARCH REJECT; +FW_MSG_SEARCH DROP; + +### Set the type of syslog daemon that is used. The SYSLOG_DAEMON +### variable accepts four possible values: syslogd, syslog-ng, ulogd, +### or metalog. Note: this variable is only used if ENABLE_SYSLOG_FILE is +### disabled, and this in turn will mean that the legacy kmsgsd daemon will +### collect firewall logs from syslog via the old named pipe mechanism. +SYSLOG_DAEMON syslogd; + +### What type of interface configuration do you use? Set this variable to +### "iproute2" if you want to use the iproute2 type configuration. +### iproute2 does not use aliases for multi-homed interfaces and +### ifconfig does not show secondary addresses for multi-homed interfaces. +#IFCFGTYPE iproute2; +IFCFGTYPE ifconfig; + +### Danger levels. These represent the total number of +### packets required for a scan to reach each danger level. +### A scan may also reach a danger level if the scan trips +### a signature or if the scanning ip is listed in +### auto_ips so a danger level is automatically +### assigned. +DANGER_LEVEL1 5; ### Number of packets. +DANGER_LEVEL2 15; +DANGER_LEVEL3 150; +DANGER_LEVEL4 1500; +DANGER_LEVEL5 10000; + +### Set the interval (in seconds) psad will use to sleep before +### checking for new iptables log messages +CHECK_INTERVAL 5; + +### Search for snort "sid" values generated by fwsnort +### or snort2iptables +SNORT_SID_STR SID; + +### Set the minimum range of ports that must be scanned before +### psad will send an alert. The default is 1 so that at +### least two port must be scanned (p2-p1 >= 1). This can be set +### to 0 if you want psad to be extra paranoid, or 30000 if not. +PORT_RANGE_SCAN_THRESHOLD 5; + +### For IP protocol scan detection (nmap -sO). While it may be relatively +### common for a host to trigger on tcp, udp, and icmp, it is more unusual if +### a host triggers on, say, five different IP protocols +PROTOCOL_SCAN_THRESHOLD 5; + +### If "Y", means that scans will never timeout. This is useful +### for catching scans that take place over long periods of time +### where the attacker is trying to slip beneath the IDS thresholds. +ENABLE_PERSISTENCE Y; + +### This is used only if ENABLE_PERSISTENCE = "N"; +SCAN_TIMEOUT 3600; ### seconds + +### Specify how often to timeout old scan data relative to CHECK_INTERVAL +### iterations. This feature is only used if ENABLE_PERSISTENCE is disabled. +### Note that for psad processes that have tracked a lot of scans, it is +### advisable to leave this threshold at the default value of 5 or greater +### because the scan tracking hash may be quite large. +PERSISTENCE_CTR_THRESHOLD 5; + +### Limit the number of src->dst IP pairs that psad will track. The default +### is zero (i.e. unlimited), but if psad is running on a system with limited +### memory, this can be handy to restrict psad's memory usage. It is best to +### combine this option with disabling ENABLE_PERSISTENCE so that older scans +### are deleted and therefore newer scans will on average continue to be +### tracked. A good non-zero value is, say, 50000, but this will vary +### depending on available system memory. +MAX_SCAN_IP_PAIRS 0; + +### If "Y", means all signatures will be shown since +### the scan started instead of just the current ones. +SHOW_ALL_SIGNATURES N; + +### Allow reporting methods to be enabled/restricted. This keyword can +### accept values of "nosyslog" (don't write any messages to syslog), +### "noemail" (don't send any email messages), or "ALL" (to generate both +### syslog and email messages). "ALL" is the default. Both "nosyslog" +### and "noemail" can be combined with a comma to disable all logging +### and alerting. +ALERTING_METHODS noemail; + +### By default, psad acquires iptables log data from the /var/log/messages +### file which the local syslog daemon (usually) writes iptables log messages +### to. If the ENABLE_SYSLOG_FILE variable below is set to "N", then psad +### reconfigures syslog to write iptables log data to the +### /var/lib/psad/psadfifo fifo file where the messages are picked up by kmsgsd +### written to the file /var/log/psad/fwdata for analysis by psad. On some +### systems, having syslog communicate log data to kmsgsd can be problematic +### (syslog configs and external factors such as Apparmor and SELinux can play +### a role here), so leaving the ENABLE_SYSLOG_FILE variable set to "Y" is +### usually recommended. +ENABLE_SYSLOG_FILE Y; +IPT_WRITE_FWDATA Y; +IPT_SYSLOG_FILE /var/log/syslog; + +### When enabled, this instructs psad to write the "msg" field +### associated with Snort rule matches to syslog. +ENABLE_SIG_MSG_SYSLOG Y; +SIG_MSG_SYSLOG_THRESHOLD 10; +SIG_SID_SYSLOG_THRESHOLD 10; + +### TTL values are decremented depending on the number of hops +### the packet has taken before it hits the firewall. We will +### assume packets will not jump through more than 20 hops on +### average. +MAX_HOPS 20; + +### Do not include any timestamp included within kernel logging +### messages (Ubuntu systems commonly have this) +IGNORE_KERNEL_TIMESTAMP Y; + +### FIXME: try to mitigate the affects of the iptables connection +### tracking bug by ignoring tcp packets that have the ack bit set. +### Read the "BUGS" section of the psad man page. Note that +### if a packet matches a snort SID generated by fwsnort (see +### http://www.cipherdyne.org/fwsnort/) +### then psad will see it even if the ack bit is set. See the +### SNORT_SID_STR variable. +IGNORE_CONNTRACK_BUG_PKTS Y; + +### define a set of ports to ignore (this is useful particularly +### for port knocking applications since the knock sequence will +### look to psad like a scan). This variable may be defined as +### a comma-separated list of port numbers or port ranges and +### corresponding protocol, For example, to have psad ignore all +### tcp in the range 61000-61356 and udp ports 53 and 5000, use: +### IGNORE_PORTS tcp/61000-61356, udp/53, udp/5000; +IGNORE_PORTS NONE; + +### allow entire protocols to be ignored. This keyword can accept +### a comma separated list of protocols. Each protocol must match +### the protocol that is specified in an iptables log message (case +### insensitively, so both "TCP" or "tcp" is ok). +### IGNORE_PROTOCOL tcp,udp; +IGNORE_PROTOCOLS NONE; + +### allow packets to be ignored based on interface (this is the +### "IN" interface in iptables logging messages). +IGNORE_INTERFACES NONE; + +### Ignore these specific logging prefixes +IGNORE_LOG_PREFIXES NONE; + +### Minimum danger level a scan must reach before any logging or +### alerting is done. The EMAIL_ALERT_DANGER_LEVEL variable below +### only refers to email alerts; the MIN_DANGER_LEVEL variable +### applies to everything from email alerts to whether or not the +### IP directory is created within /var/log/psad/. Hence +### MIN_DANGER_LEVEL should be set less than or equal to the value +### assigned to the EMAIL_ALERT_DANGER_LEVEL variable. +MIN_DANGER_LEVEL 1; + +### Only send email alert if danger level >= to this value. +EMAIL_ALERT_DANGER_LEVEL 1; + +### Enable detection of malicious activity that is delivered via IPv6. If +### ip6tables is not logging any traffic, then psad won't know anything +### about IPv6, or this variable can be set to "N" (this would be slightly +### faster if ip6tables isn't logging anything). +ENABLE_IPV6_DETECTION Y; + +### Treat all subnets on local interfaces as part of HOME_NET (this +### means that these networks do not have to be manually defined) +ENABLE_INTF_LOCAL_NETS Y; + +### Include MAC addresses in email alert +ENABLE_MAC_ADDR_REPORTING N; + +### Look for the iptables logging rule (fwcheck_psad is executed) +ENABLE_FW_LOGGING_CHECK Y; + +### Send no more than this number of emails for a single +### scanning source IP. Note that enabling this feature may cause +### alerts for real attacks to not be generated if an attack is sent +### after the email threshold has been reached for an IP address. +### This is why the default is set to "0". +EMAIL_LIMIT 0; + +### By default, psad maintains a counter for each scanning source address, +### but by enabling this variable psad will maintain email counters for +### each victim address that is scanned as well. +ENABLE_EMAIL_LIMIT_PER_DST N; + +### If "Y", send a status email message when an IP has reached the +### EMAIL_LIMIT threshold. +EMAIL_LIMIT_STATUS_MSG Y; + +### This variable is used to have psad throttle the email alerts it sends, +### and implemented as a per-IP threshold. That is, if EMAIL_THROTTLE +### is set to "10", then psad will only send 1/10th as many emails for each +### scanning IP as it would have normally. All other variables also apply, +### so this throttle value is taken into account after everything else. The +### default of zero means to not apply any throttling. +EMAIL_THROTTLE 0; + +### If "Y", send email for all newly logged packets from the same +### source ip instead of just when a danger level increases. +ALERT_ALL Y; + +### If "Y", then psad will import old scan source ip directories +### as current scans instead of moving the directories into the +### archive directory. +IMPORT_OLD_SCANS N; + +### syslog facility and priority (the defaults are usually ok) +### The SYSLOG_FACILITY variable can be set to one of LOG_LOCAL{0-7}, and +### SYSLOG_PRIORITY can be set to one of LOG_INFO, LOG_DEBUG, LOG_NOTICE, +### LOG_WARNING, LOG_ERR, LOG_CRIT, LOG_ALERT, or LOG_EMERG +SYSLOG_IDENTITY psad; +SYSLOG_FACILITY LOG_LOCAL7; +SYSLOG_PRIORITY LOG_INFO; + +### Port thresholds for logging and -S and -A output. +TOP_PORTS_LOG_THRESHOLD 500; +STATUS_PORTS_THRESHOLD 20; + +### Signature thresholds for logging and -S and -A output. +TOP_SIGS_LOG_THRESHOLD 500; +STATUS_SIGS_THRESHOLD 50; + +### Attackers thresholds for logging and -S and -A output. +TOP_IP_LOG_THRESHOLD 500; +STATUS_IP_THRESHOLD 25; + +### Specify how often to log the TOP_* information (i.e. how many +### CHECK_INTERVAL iterations before the data is logged again). +TOP_SCANS_CTR_THRESHOLD 1; + +### Send scan logs to dshield.org. This is disabled by default, +### but is a good idea to enable it (subject to your site security +### policy) since the DShield service helps to track the bad guys. +### For more information visit http://www.dshield.org +ENABLE_DSHIELD_ALERTS N; + +### dshield.org alert email address; this should not be changed +### unless the guys at DShield have changed it. +DSHIELD_ALERT_EMAIL reports@dshield.org; + +### Time interval (hours) to send email alerts to dshield.org. +### The default is 6 hours, and cannot be less than 1 hour or +### more than 24 hours. +DSHIELD_ALERT_INTERVAL 6; ### hours + +### If you have a DShield user id you can set it here. The +### default is "0". +DSHIELD_USER_ID 0; + +### If you want the outbound DShield email to appear as though it +### is coming from a particular user address then set it here. +DSHIELD_USER_EMAIL NONE; + +### Threshold danger level for DShield data; a scan must reach this +### danger level before associated packets will be included in an +### alert to DShield. Note that zero is the default since this +### will allow DShield to apply its own logic to determine what +### constitutes a scan (_all_ iptables log messages will be included +### in DShield email alerts). +DSHIELD_DL_THRESHOLD 0; + +### List of servers. Fwsnort supports the same variable resolution as +#### Snort. +HTTP_SERVERS $HOME_NET; +SMTP_SERVERS $HOME_NET; +DNS_SERVERS $HOME_NET; +SQL_SERVERS $HOME_NET; +TELNET_SERVERS $HOME_NET; + +#### AOL AIM server nets +AIM_SERVERS [64.12.24.0/24, 64.12.25.0/24, 64.12.26.14/24, 64.12.28.0/24, 64.12.29.0/24, 64.12.161.0/24, 64.12.163.0/24, 205.188.5.0/24, 205.188.9.0/24]; + +### Configurable port numbers +HTTP_PORTS 80; +SHELLCODE_PORTS !80; +ORACLE_PORTS 1521; + +### If this is enabled, then psad will die if a rule in the +### /etc/psad/signatures file contains an unsupported option (otherwise +### a syslog warning will be generated). +ENABLE_SNORT_SIG_STRICT Y; + +### If "Y", enable automated IDS response (auto manages +### firewall rulesets). +ENABLE_AUTO_IDS N; + +### Block all traffic from offending IP if danger +### level >= to this value +AUTO_IDS_DANGER_LEVEL 5; + +### Set the auto-blocked timeout in seconds (the default +### is one hour). +AUTO_BLOCK_TIMEOUT 3600; + +### Set the auto-blocked timeout in seconds for each danger +### level - zero means to block permanently. Each of these +### can be set independently +AUTO_BLOCK_DL1_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL2_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL3_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL4_TIMEOUT $AUTO_BLOCK_TIMEOUT; +AUTO_BLOCK_DL5_TIMEOUT 0; ### permanent + +### Enable regex checking on log prefixes for active response +ENABLE_AUTO_IDS_REGEX N; + +### Only block if the iptables log message matches the following regex +AUTO_BLOCK_REGEX ESTAB; ### from fwsnort logging prefixes + +### Control whether "renew" auto-block emails get sent. This is disabled +### by default because lots of IPs could have been blocked, and psad +### should not generate a renew email for each of them. +ENABLE_RENEW_BLOCK_EMAILS N; + +### By setting this variable to N, all auto-blocking emails can be +### suppressed. +ENABLE_AUTO_IDS_EMAILS Y; + +### Enable iptables blocking (only gets enabled if +### ENABLE_AUTO_IDS is also set) +IPTABLES_BLOCK_METHOD Y; + +### Specify chain names to which iptables blocking rules will be +### added with the IPT_AUTO_CHAIN{n} keyword. There is no limit on the +### number of IPT_AUTO_CHAIN{n} keywords; just increment the {n} number +### to add an additional IPT_AUTO_CHAIN requirement. The format for this +### variable is: ,,,,, \ +### ,. +### "Target": Can be any legitimate iptables target, but should usually +### just be "DROP". +### "Direction": Can be "src", "dst", or "both", which correspond to the +### INPUT, OUTPUT, and FORWARD chains. +### "Table": Can be any iptables table, but the default is "filter". +### "From_chain": Is the chain from which packets will be jumped. +### "Jump_rule_position": Defines the position within the From_chain where +### the jump rule is added. +### "To_chain": Is the chain to which packets will be jumped. This is the +### main chain where psad rules are added. +### "Rule_position": Defines the position where rule are added within the +### To_chain. +### +### The following defaults make sense for most installations, but note +### it is possible to include blocking rules in, say, the "nat" table +### using this functionality as well. The following three lines provide +### usage examples: +#IPT_AUTO_CHAIN1 DROP, src, filter, INPUT, 1, PSAD_BLOCK_INPUT, 1; +#IPT_AUTO_CHAIN2 DROP, dst, filter, OUTPUT, 1, PSAD_BLOCK_OUTPUT, 1; +#IPT_AUTO_CHAIN3 DROP, both, filter, FORWARD, 1, PSAD_BLOCK_FORWARD, 1; +IPT_AUTO_CHAIN1 DROP, src, filter, INPUT, 1, PSAD_BLOCK_INPUT, 1; +IPT_AUTO_CHAIN2 DROP, dst, filter, OUTPUT, 1, PSAD_BLOCK_OUTPUT, 1; +IPT_AUTO_CHAIN3 DROP, both, filter, FORWARD, 1, PSAD_BLOCK_FORWARD, 1; + +### Flush all existing rules in the psad chains at psad start time. +FLUSH_IPT_AT_INIT Y; + +### Prerequisite check for existence of psad chains and jump rules +IPTABLES_PREREQ_CHECK 1; + +### Enable tcp wrappers blocking (only gets enabled if +### ENABLE_AUTO_IDS is also set) +TCPWRAPPERS_BLOCK_METHOD N; + +### Set the whois timeout +WHOIS_TIMEOUT 60; ### seconds + +### Set the number of times an ip can be seen before another whois +### lookup is issued. +WHOIS_LOOKUP_THRESHOLD 20; + +### Use this option to force all whois information to contain ascii-only data. +### Sometime whois information for IP addresses in China and other countries +### can contain non-ascii data. If this option is enabled, then any non- +### ascii characters will be replaced with "NA". +ENABLE_WHOIS_FORCE_ASCII N; + +### This variable forces all whois lookups to be done against the source IP +### even when they are associated with a directly connected local network. IT +### is usually a good idea to leave this setting as the default of 'N'. +ENABLE_WHOIS_FORCE_SRC_IP N; + +### Set the number of times an ip can be seen before another dns +### lookup is issued. +DNS_LOOKUP_THRESHOLD 20; + +### Enable psad to run an external script or program (use at your +### own risk!) +ENABLE_EXT_SCRIPT_EXEC Y; + +### Define an external program to run after a scan is caught. +### Note that the scan source ip can be specified on the command +### line to the external program through the use of the "SRCIP" +### string (along with some appropriate switch for the program). +### Of course this is only useful if the external program knows +### what to do with this information. +### Example: EXTERNAL_SCRIPT /path/to/script --ip SRCIP -v; +EXTERNAL_SCRIPT {{ psad_script_folder }}/psad-script.sh SRCIP >> {{ psad_script_folder }}/psad-script.log 2>&1; + +### Control execution of EXTERNAL_SCRIPT (only once per IP, or +### every time a scan is detected for an ip). +EXEC_EXT_SCRIPT_PER_ALERT Y; + +### Disk usage variables +DISK_CHECK_INTERVAL 300; ### seconds + +### This can be set to 0 to disable disk checking altogether +DISK_MAX_PERCENTAGE 95; + +### This can be set to 0 to have psad not place any limit on the +### number of times it will attempt to remove data from +### /var/log/psad/. +DISK_MAX_RM_RETRIES 10; + +### Enable archiving of old scan directories at psad startup. +ENABLE_SCAN_ARCHIVE N; + +### Truncate fwdata file at startup +TRUNCATE_FWDATA Y; + +### Only archive scanning IP directories that have reached a danger +### level greater than or equal to this value. Archiving old +### scanning ip directories only takes place at psad startup. +MIN_ARCHIVE_DANGER_LEVEL 1; + +### Email subject line config. Change these prefixes if you want +### psad to generate email alerts that say something other than +### the following. +MAIL_ALERT_PREFIX [psad-alert]; +MAIL_STATUS_PREFIX [psad-status]; +MAIL_ERROR_PREFIX [psad-error]; +MAIL_FATAL_PREFIX [psad-fatal]; + +### URL for getting the latest psad signatures +SIG_UPDATE_URL http://www.cipherdyne.org/psad/signatures; + +### These next two are psadwatchd vars +PSADWATCHD_CHECK_INTERVAL 5; ### seconds +PSADWATCHD_MAX_RETRIES 10; + +### Directories +INSTALL_ROOT /; +PSAD_DIR $INSTALL_ROOT/var/log/psad; +PSAD_RUN_DIR $INSTALL_ROOT/var/run/psad; +PSAD_FIFO_DIR $INSTALL_ROOT/var/lib/psad; +PSAD_LIBS_DIR $INSTALL_ROOT/usr/lib/psad; +PSAD_CONF_DIR $INSTALL_ROOT/etc/psad; +PSAD_ERR_DIR $PSAD_DIR/errs; +CONF_ARCHIVE_DIR $PSAD_CONF_DIR/archive; +SCAN_DATA_ARCHIVE_DIR $PSAD_DIR/scan_archive; +ANALYSIS_MODE_DIR $PSAD_DIR/ipt_analysis; +SNORT_RULES_DIR $PSAD_CONF_DIR/snort_rules; +FWSNORT_RULES_DIR /etc/fwsnort/snort_rules; ### may not exist + +### Files +FW_DATA_FILE $PSAD_DIR/fwdata; +ULOG_DATA_FILE $PSAD_DIR/ulogd.log; +FW_CHECK_FILE $PSAD_DIR/fw_check; +DSHIELD_EMAIL_FILE $PSAD_DIR/dshield.email; +SIGS_FILE $PSAD_CONF_DIR/signatures; +PROTOCOLS_FILE $PSAD_CONF_DIR/protocols; +ICMP_TYPES_FILE $PSAD_CONF_DIR/icmp_types; +ICMP6_TYPES_FILE $PSAD_CONF_DIR/icmp6_types; +AUTO_DL_FILE $PSAD_CONF_DIR/auto_dl; +SNORT_RULE_DL_FILE $PSAD_CONF_DIR/snort_rule_dl; +POSF_FILE $PSAD_CONF_DIR/posf; +P0F_FILE $PSAD_CONF_DIR/pf.os; +IP_OPTS_FILE $PSAD_CONF_DIR/ip_options; +PSAD_FIFO_FILE $PSAD_FIFO_DIR/psadfifo; +ETC_HOSTS_DENY_FILE /etc/hosts.deny; +ETC_SYSLOG_CONF /etc/syslog.conf; +ETC_RSYSLOG_CONF /etc/rsyslog.conf; +ETC_SYSLOGNG_CONF /etc/syslog-ng/syslog-ng.conf; +ETC_METALOG_CONF /etc/metalog/metalog.conf; +STATUS_OUTPUT_FILE $PSAD_DIR/status.out; +ANALYSIS_OUTPUT_FILE $PSAD_DIR/analysis.out; +INSTALL_LOG_FILE $PSAD_DIR/install.log; + +### PID files +PSAD_PID_FILE $PSAD_RUN_DIR/psad.pid; +PSAD_CMDLINE_FILE $PSAD_RUN_DIR/psad.cmd; +KMSGSD_PID_FILE $PSAD_RUN_DIR/kmsgsd.pid; +PSADWATCHD_PID_FILE $PSAD_RUN_DIR/psadwatchd.pid; + +### List of ips that have been auto blocked by iptables +### or tcpwrappers (the auto blocking feature is disabled by +### default, see the psad man page and the ENABLE_AUTO_IDS +### variable). +AUTO_BLOCK_IPT_FILE $PSAD_DIR/auto_blocked_iptables; +AUTO_BLOCK_TCPWR_FILE $PSAD_DIR/auto_blocked_tcpwr; + +### File used internally by psad to add iptables blocking +### rules to a running psad process +AUTO_IPT_SOCK $PSAD_RUN_DIR/auto_ipt.sock; + +FW_ERROR_LOG $PSAD_ERR_DIR/fwerrorlog; +PRINT_SCAN_HASH $PSAD_DIR/scan_hash; + +### /proc interface for controlling ip forwarding +PROC_FORWARD_FILE /proc/sys/net/ipv4/ip_forward; + +### Packet counters for tcp, udp, and icmp protocols +PACKET_COUNTER_FILE $PSAD_DIR/packet_ctr; + +### Top scanned ports +TOP_SCANNED_PORTS_FILE $PSAD_DIR/top_ports; + +### Top signature matches +TOP_SIGS_FILE $PSAD_DIR/top_sigs; + +### Top attackers +TOP_ATTACKERS_FILE $PSAD_DIR/top_attackers; + +### Counter file for Dshield alerts +DSHIELD_COUNTER_FILE $PSAD_DIR/dshield_ctr; + +### Counter file for iptables prefixes +IPT_PREFIX_COUNTER_FILE $PSAD_DIR/ipt_prefix_ctr; + +### iptables command output and error collection files; these are +### used by IPTables::ChainMgr +IPT_OUTPUT_FILE $PSAD_DIR/psad.iptout; +IPT_ERROR_FILE $PSAD_DIR/psad.ipterr; + +### system binaries +iptablesCmd /sbin/iptables; +ip6tablesCmd /sbin/ip6tables; +shCmd /bin/sh; +wgetCmd /usr/bin/wget; +gzipCmd /bin/gzip; +mknodCmd /bin/mknod; +psCmd /bin/ps; +mailCmd /bin/mail; +sendmailCmd /usr/sbin/sendmail; +ifconfigCmd /sbin/ifconfig; +ipCmd /sbin/ip; +killallCmd /usr/bin/killall; +netstatCmd /bin/netstat; +unameCmd /bin/uname; +whoisCmd $INSTALL_ROOT/usr/bin/whois_psad; +dfCmd /bin/df; +fwcheck_psadCmd $INSTALL_ROOT/usr/sbin/fwcheck_psad; +psadwatchdCmd $INSTALL_ROOT/usr/sbin/psadwatchd; +kmsgsdCmd $INSTALL_ROOT/usr/sbin/kmsgsd; +psadCmd $INSTALL_ROOT/usr/sbin/psad; diff --git a/ansible/roles/ec2/sg_configure/tasks/main.yml b/ansible/roles/ec2/sg_configure/tasks/main.yml new file mode 100644 index 00000000..b71708ec --- /dev/null +++ b/ansible/roles/ec2/sg_configure/tasks/main.yml @@ -0,0 +1,475 @@ +#- name: install Python Boto +# become: true +# apt: name=python-boto state=latest + +- name: Bastion + tags: + - bastion + ec2_group: + name: "{{ env }}-bastion" + description: "{{ env }} Bastion Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ bastion_sshd_port }}" + to_port: "{{ bastion_sshd_port }}" + cidr_ip: "{{ ip_all }}" + +- name: API SG + tags: + - api + ec2_group: + name: "{{ env }}-api" + description: "{{ env }} API Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_hipache }}" + +- name: Consul Service SG + tags: + - consul + ec2_group: + name: "{{ env }}-consul" + description: "{{ env }} Consul Service Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: icmp + from_port: -1 + to_port: -1 + group_id: "{{ sg_consul }}" + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ consul_rpc_min_port }}" + to_port: "{{ consul_rpc_max_port }}" + group_id: "{{ sg_consul }}" + - proto: udp + from_port: "{{ consul_rpc_min_port }}" + to_port: "{{ consul_rpc_max_port }}" + group_id: "{{ sg_consul }}" + - proto: tcp + from_port: "{{ consul_cli_port }}" + to_port: "{{ consul_cli_port }}" + group_id: "{{ sg_consul }}" + - proto: tcp + from_port: "{{ vault_api_port }}" + to_port: "{{ vault_api_ssl_port }}" + group_id: "{{ sg_consul }}" + - proto: tcp + from_port: "{{ vault_api_port }}" + to_port: "{{ vault_api_ssl_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ consul_api_port }}" + to_port: "{{ consul_api_ssl_port }}" + group_id: "{{ sg_dock_services }}" + - proto: tcp + from_port: "{{ consul_api_port }}" + to_port: "{{ consul_api_port }}" + group_id: "{{ sg_app_services }}" + - proto: tcp + from_port: "{{ consul_api_port }}" + to_port: "{{ consul_api_ssl_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ consul_api_port }}" + to_port: "{{ consul_api_ssl_port }}" + group_id: "{{ sg_navi }}" + - proto: tcp + from_port: "{{ consul_api_port }}" + to_port: "{{ consul_api_ssl_port }}" + group_id: "{{ sg_userland }}" + +- name: On-Dock Services SG + tags: + - dock + ec2_group: + name: "{{ env }}-dock" + description: "{{ env }} Dock Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: udp + from_port: "{{ named_port }}" + to_port: "{{ named_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ named_port }}" + to_port: "{{ named_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ krain_port }}" + to_port: "{{ krain_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ docker_port }}" + to_port: "{{ docker_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ docker_port }}" + to_port: "{{ docker_port }}" + group_id: "{{ sg_dock_services }}" + - proto: udp + from_port: "{{ weave_port }}" + to_port: "{{ weave_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ weave_port }}" + to_port: "{{ weave_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ vault_api_port }}" + to_port: "{{ vault_api_ssl_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_userland }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_navi }}" + +- name: Hipache SG + tags: + - hipache + ec2_group: + name: "{{ env }}-hipache" + description: "{{ env }} Hipache Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: icmp + from_port: -1 + to_port: -1 + cidr_ip: "{{ ip_all }}" + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + cidr_ip: "{{ ip_all }}" + - proto: tcp + from_port: "{{ https_port }}" + to_port: "{{ https_port }}" + cidr_ip: "{{ ip_all }}" + +- name: MongoDB SG + tags: + - mongo + ec2_group: + name: "{{ env }}-mongo" + description: "{{ env }} MongoDB Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: 27000 + to_port: 27020 + group_id: "{{ sg_mongo }}" + - proto: tcp + from_port: 27000 + to_port: 27020 + group_id: "{{ sg_api }}" + - proto: tcp + from_port: 27000 + to_port: 27020 + group_id: "{{ sg_dock_services }}" + - proto: tcp + from_port: 27000 + to_port: 27020 + group_id: "{{ sg_app_services }}" + +- name: Navi SG + tags: + - navi + ec2_group: + name: "{{ env }}-navi" + description: "{{ env }} Navi Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_hipache }}" + +- name: RabbitMQ SG + tags: + - rabbit + ec2_group: + name: "{{ env }}-rabbit" + description: "{{ env }} RabbitMQ Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: 54320 + to_port: 54321 + group_id: "{{ sg_api }}" + - proto: tcp + from_port: 54320 + to_port: 54321 + group_id: "{{ sg_navi }}" + - proto: tcp + from_port: 54320 + to_port: 54321 + group_id: "{{ sg_app_services }}" + - proto: tcp + from_port: 54320 + to_port: 54321 + group_id: "{{ sg_dock_services }}" + +- name: RDS SG + tags: + - rds + ec2_group: + name: "{{ env }}-rds" + description: "{{ env }} RDS Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: 5432 + to_port: 5432 + group_id: "{{ sg_app_services }}" + +- name: Redis SG + tags: + - redis + ec2_group: + name: "{{ env }}-redis" + description: "{{ env }} Redis Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ redis_port }}" + to_port: "{{ redis_tls_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ redis_port }}" + to_port: "{{ redis_tls_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ redis_port }}" + to_port: "{{ redis_tls_port }}" + group_id: "{{ sg_hipache }}" + - proto: tcp + from_port: "{{ redis_port }}" + to_port: "{{ redis_tls_port }}" + group_id: "{{ sg_navi }}" + - proto: tcp + from_port: "{{ redis_port }}" + to_port: "{{ redis_tls_port }}" + group_id: "{{ sg_app_services }}" + - proto: tcp + from_port: "{{ redis_port }}" + to_port: "{{ redis_tls_port }}" + group_id: "{{ sg_dock_services }}" + +- name: Registry SG + tags: + - registry + ec2_group: + name: "{{ env }}-registry" + description: "{{ env }} Registry Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_app_services }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_consul }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_dock }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_dock_services }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_hipache }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_mongo }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_navi }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_rabbit }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_redis }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_userland }}" + +- name: App Services SG + tags: + - app-services + ec2_group: + name: "{{ env }}-app-services" + description: "{{ env }} Application Services Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ http_port }}" + group_id: "{{ sg_hipache }}" + - proto: tcp + from_port: "{{ swarm_port }}" + to_port: "{{ swarm_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ detention_port }}" + to_port: "{{ detention_port }}" + group_id: "{{ sg_navi }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_hipache }}" + +- name: Dock Services + tags: + - dock-services + ec2_group: + name: "{{ env }}-dock-services" + description: "{{ env }} Dock Services Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ sshd_port }}" + to_port: "{{ sshd_port }}" + group_id: "{{ sg_bastion }}" + - proto: tcp + from_port: "{{ swarm_port }}" + to_port: "{{ swarm_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ swarm_port }}" + to_port: "{{ swarm_port }}" + group_id: "{{ sg_dock_services }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_api }}" + - proto: tcp + from_port: "{{ container_min_port }}" + to_port: "{{ container_max_port }}" + group_id: "{{ sg_hipache }}" + +- name: Userland Hipache + tags: + - userland + ec2_group: + name: "{{ env }}-userland" + description: "{{ env }} Userland Hipache Security Policy" + vpc_id: "{{ vpc_id }}" + region: "{{ region }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_access_key: "{{ aws_access_key }}" + rules: + - proto: tcp + from_port: "{{ http_port }}" + to_port: "{{ container_max_port }}" + cidr_ip: "{{ ip_all }}" diff --git a/ansible/roles/git/README.md b/ansible/roles/git/README.md new file mode 100644 index 00000000..3c7dcd9f --- /dev/null +++ b/ansible/roles/git/README.md @@ -0,0 +1,26 @@ +Role Name +======== + +Ansible Role to Install Docker on CentOS 6.5 + +Role Variables +-------------- + +``` +docker_centos_packages: + - { package: "docker" } +``` + +Example Playbook +------------------------- + + - hosts: docker-servers + roles: + - { role: docker-centos, + tags: ["docker"] } + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/git/tasks/main.yml b/ansible/roles/git/tasks/main.yml new file mode 100644 index 00000000..16d60db9 --- /dev/null +++ b/ansible/roles/git/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: install git + apt: name=git state=present + become: true + when: "ansible_distribution == 'Ubuntu'" diff --git a/ansible/roles/git_repo/meta/main.yml b/ansible/roles/git_repo/meta/main.yml new file mode 100644 index 00000000..1a46c351 --- /dev/null +++ b/ansible/roles/git_repo/meta/main.yml @@ -0,0 +1,3 @@ +--- + dependencies: + - { role: git } diff --git a/ansible/roles/git_repo/tasks/main.yml b/ansible/roles/git_repo/tasks/main.yml new file mode 100644 index 00000000..c30e2279 --- /dev/null +++ b/ansible/roles/git_repo/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Ensure Tag Deploy For Prod + tags: deploy + when: not git_branch | match("^v([0-9]+)\.([0-9]+)\.([0-9]+)$") and node_env=="production-delta" + fail: msg="only tag can be deployed on prod not {{ git_branch }}" + +- name: Display Git Repo Name + debug: + msg: "application Installed: {{ app_name }}, branch : {{ git_branch }} " + +- name: create repository dir + file: + path=/opt/runnable/{{ app_name }} + state=directory + owner={{ ansible_env.USER }} + +- name: pull the git repository + tags: deploy + git: + repo={{ app_repo }} + dest=/opt/runnable/{{ app_name }} + version={{ git_branch }} + update=yes + accept_hostkey=True + force=yes diff --git a/ansible/roles/hipache/tasks/main.yml b/ansible/roles/hipache/tasks/main.yml new file mode 100644 index 00000000..356566b3 --- /dev/null +++ b/ansible/roles/hipache/tasks/main.yml @@ -0,0 +1,46 @@ +--- +- name: create certs path + become: true + file: + path=/etc/ssl/certs/{{ domain }} + state=directory + +- name: create private cert path + become: true + file: + path=/etc/ssl/private + state=directory + +# the keys below must be placed inside devops-scripts/ansible/roles/hipache/files +- name: copy ca.crt + tags: certs + become: true + copy: + src={{ domain }}/ca.pem + dest=/etc/ssl/certs/{{ domain }}/ca.crt + +- name: copy {{ domain }}.crt + tags: certs + become: true + copy: + src={{ domain }}/cert.pem + dest=/etc/ssl/certs/{{ domain }}/{{ domain }}.crt + +- name: copy private ssl key + tags: certs + become: true + copy: + src={{ domain }}/key.pem + dest=/etc/ssl/private/{{ domain }}.key + +- name: create hipache config directory + become: true + file: + path=/opt/hipache-conf + state=directory + +- name: copy configs + become: true + template: + src=config.json + dest=/opt/hipache-conf diff --git a/ansible/roles/hipache/templates/config.json b/ansible/roles/hipache/templates/config.json new file mode 100644 index 00000000..4e74d305 --- /dev/null +++ b/ansible/roles/hipache/templates/config.json @@ -0,0 +1,30 @@ +{ + "server": { + "datadogHost": "{{ ansible_default_ipv4.address }}", + "datadogPort": "8125", + "prependPort": {{ prependIncomingPort | default("false") }}, + "subDomainDepth": {{ subDomainDepth }}, + "accessLog": "/host/access.log", + "workers": 10, + "maxSockets": 10000, + "deadBackendTTL": 30, + "tcpTimeout": 120, + "retryOnError": {{ retry_on_error }}, + "deadBackendOn500": false, + "httpKeepAlive": false + }, + "http": { + "port": 80, + "bind": "0.0.0.0" + }, + "https": { + "port": 443, + "bind": "0.0.0.0", + "key": "/etc/ssl/private/{{ domain }}.key", + "cert": "/etc/ssl/certs/{{ domain }}/{{ domain }}.crt", + "ca": "/etc/ssl/certs/{{ domain }}/ca.crt", + "ciphers": "ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:DHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA256:HIGH:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!SRP:!CAMELLIA", + "checkForBackend80": {{ httpsCheckForBackend80 | default("false") }} + }, + "driver": ["redis://{{ redis_host_address }}:{{ redis_port }}"] +} diff --git a/ansible/roles/image-builder/tasks/main.yml b/ansible/roles/image-builder/tasks/main.yml new file mode 100644 index 00000000..ac71aa5e --- /dev/null +++ b/ansible/roles/image-builder/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: npm install + tags: deploy + npm: + path=/opt/runnable/image-builder + state=latest + +- name: build the image-builder + tags: deploy + command: sudo docker build --no-cache --tag="registry.runnable.com/{{ image_builder_docker_namespace }}:{{ git_branch }}" /opt/runnable/image-builder + +- name: push image-builder + tags: deploy + run_once: true + command: sudo docker push "registry.runnable.com/{{ image_builder_docker_namespace }}:{{ git_branch }}" + when: dock is not defined diff --git a/ansible/roles/install-ssm/tasks/main.yml b/ansible/roles/install-ssm/tasks/main.yml new file mode 100644 index 00000000..01a710ad --- /dev/null +++ b/ansible/roles/install-ssm/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: get amazon simple systems management + get_url: + url=https://amazon-ssm-us-west-2.s3.amazonaws.com/latest/debian_amd64/amazon-ssm-agent.deb + dest=/tmp + +- name: start amazon simple systems management + command: dpkg -i amazon-ssm-agent.deb + become: true + args: + chdir: /tmp diff --git a/ansible/roles/khronos/README.md b/ansible/roles/khronos/README.md new file mode 100644 index 00000000..b36230c3 --- /dev/null +++ b/ansible/roles/khronos/README.md @@ -0,0 +1,3 @@ +Run Khronos CLI tool out of cron once daily. + +`tasks/main.yml` - install cron entry to run `/khronos/bin/cli.js` and output to `{{ app_log_dir }}/khonos_cron.log` diff --git a/ansible/roles/khronos/meta/main.yml b/ansible/roles/khronos/meta/main.yml new file mode 100644 index 00000000..37a34a20 --- /dev/null +++ b/ansible/roles/khronos/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- { role: tls-client-cert, tls_service: mongodb} diff --git a/ansible/roles/khronos/tasks/main.yml b/ansible/roles/khronos/tasks/main.yml new file mode 100644 index 00000000..5aa53dea --- /dev/null +++ b/ansible/roles/khronos/tasks/main.yml @@ -0,0 +1,91 @@ +- name: Put khronos cron queues scripts in place + become: yes + tags: cron + template: + src: "khronos-cron-queues.sh" + mode: 0744 + dest: /opt/runnable/{{ item.script }} + with_items: + - { cron_queues: "{{ main_cron_queues }}", script: "main-cron.sh"} + - { cron_queues: "{{ canary_cron_queues }}", script: "canary-cron.sh"} + - { cron_queues: "{{ hourly_canary_cron_queues }}", script: "hourly-canary-cron.sh"} + +- name: Put khronos cron events scripts in place + become: yes + tags: cron + template: + src: "khronos-cron-events.sh" + mode: 0744 + dest: /opt/runnable/{{ item.script }} + with_items: + - { cron_event: "time.one-day.passed", script: "1_day_passed_cron_event.sh"} + - { cron_event: "time.four-hours.passed", script: "4_hours_passed_cron_event.sh"} + - { cron_event: "time.one-hour.passsed", script: "1_hour_passed_cron_event.sh"} + - { cron_event: "time.thirty-minutes.passed", script: "30_minutes_passed_cron_event.sh"} + - { cron_event: "time.five-minutes.passed", script: "5_minutes_passed_cron_event.sh"} + +- name: Put Khronos script into crontab + become: yes + tags: cron + cron: + name: "{{ item.name }}" + job: /opt/runnable/{{ item.script }} >> {{ app_log_dir }}/{{ item.script }}.log 2>&1 + minute: "{{ item.minute | default('*') }}" + hour: "{{ item.hour | default('*') }}" + state: "{{ item.state | default('present') }}" + with_items: + - name: Khronos CLI - Fourth Hour Cleanup Queue + minute: 13 + hour: 1,4,7,10,13,16,19,22 + script: main-cron.sh + - name: Khronos CLI - Canary Queue + minute: "*/5" + script: canary-cron.sh + state: "{% if node_env == 'production-delta' %}present{% else %}absent{% endif %}" + - name: Khronos CLI - Ever 30 Minutes Canary Queue + minute: "*/30" + script: hourly-canary-cron.sh + state: "{% if node_env == 'production-delta' %}present{% else %}absent{% endif %}" + - name: Khronos CLI - Daily Time Event + hour: 10 # 10 am UTC / 2 am PST + script: 1_day_passed_cron_event.sh + - name: Khronos CLI - 4-Hourly Time Event + minute: 13 + hour: 1,4,7,10,13,16,19,22 + script: 4_hours_passed_cron_event.sh + - name: Khronos CLI - Hourly Time Event + minute: 49 + script: 1_hour_passed_cron_event.sh + - name: Khronos CLI - Half-Hourly Time Event + minute: "*/30" + script: 30_minutes_passed_cron_event.sh + - name: Khronos CLI - Every 5 minutes Time Event + minute: "*/5" + script: 5_minutes_passed_cron_event.sh + +- name: make directory for mongo certificates + become: yes + file: + dest: /opt/ssl/mongo-client + state: directory + +- name: put client CA in place for mongo + become: yes + copy: + dest: /opt/ssl/mongo-client/ca.pem + content: "{{ new_client_certs.data.issuing_ca }}" + mode: 0400 + +- name: put client certificate in place for mongo + become: yes + copy: + dest: /opt/ssl/mongo-client/cert.pem + content: "{{ new_client_certs.data.certificate }}" + mode: 0400 + +- name: put client private key in place for mongo + become: yes + copy: + dest: /opt/ssl/mongo-client/key.pem + content: "{{ new_client_certs.data.private_key }}" + mode: 0400 diff --git a/ansible/roles/khronos/templates/khronos-cron-events.sh b/ansible/roles/khronos/templates/khronos-cron-events.sh new file mode 100755 index 00000000..d48bff3b --- /dev/null +++ b/ansible/roles/khronos/templates/khronos-cron-events.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +docker run --rm {{ container_image }}:{{ container_tag }} bash -c " \ + /khronos/bin/cli.js --event {{ item.cron_event }} --job '{}' --host {{ cron_rabbit_host_address }} {{ cron_rabbit_auth }};" diff --git a/ansible/roles/khronos/templates/khronos-cron-queues.sh b/ansible/roles/khronos/templates/khronos-cron-queues.sh new file mode 100755 index 00000000..1900e800 --- /dev/null +++ b/ansible/roles/khronos/templates/khronos-cron-queues.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +docker run --rm {{ container_image }}:{{ container_tag }} bash -c " \ + for QUEUE in {{ item.cron_queues }}; do \ + /khronos/bin/cli.js --queue \$QUEUE --job '{}' --host {{ cron_rabbit_host_address }} {{ cron_rabbit_auth }}; \ + done" diff --git a/ansible/roles/load/files/test.sh b/ansible/roles/load/files/test.sh new file mode 100755 index 00000000..dcd2e9ae --- /dev/null +++ b/ansible/roles/load/files/test.sh @@ -0,0 +1,15 @@ +#!/bin/bash +for i in `seq 1 $CUR`; do + CUR=1 + cd /runnable/node-hello-world + echo "" > data + for i in `seq 1 $CUR`; do + ./load.sh & + done + + for job in `jobs -p`; do + wait $job + done + + cat data +done \ No newline at end of file diff --git a/ansible/roles/load/tasks/main.yml b/ansible/roles/load/tasks/main.yml new file mode 100644 index 00000000..b154e4e7 --- /dev/null +++ b/ansible/roles/load/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- name: create node-hello-world dir + become: true + file: + path=/runnable/node-hello-world/ + state=directory + owner={{ ansible_env.USER }} + +- name: git pull repo + git: + repo=https://github.com/anandkumarpatel/node-hello-world.git + dest=/runnable/node-hello-world + version=master + update=yes + accept_hostkey=True + +- script: test.sh + register: out + tags: run + +- debug: var=out + tags: run diff --git a/ansible/roles/local-vault/handlers/main.yml b/ansible/roles/local-vault/handlers/main.yml new file mode 100644 index 00000000..7c055ec5 --- /dev/null +++ b/ansible/roles/local-vault/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: stop vault + local_action: + shell kill $(ps aux | grep "vault server" | grep -v grep | cut -d' ' -f3) + +- name: remove vault config + local_action: + command rm -f /tmp/vault.pid /tmp/vault.hcl diff --git a/ansible/roles/local-vault/tasks/main.yml b/ansible/roles/local-vault/tasks/main.yml new file mode 100644 index 00000000..29befa21 --- /dev/null +++ b/ansible/roles/local-vault/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: create vault config + run_once: true + local_action: + template + dest=/tmp/vault.hcl + src=vault.hcl.j2 + notify: + - remove vault config + +- name: start vault daemon + run_once: true + local_action: + shell vault server --config=/tmp/vault.hcl > /tmp/log 2>&1 & + notify: + - stop vault + +- name: pause for start + pause: + seconds: 5 + +- name: check vault seal + tags: [ unseal ] + run_once: true + local_action: + command vault status + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + ignore_errors: True + register: seal_data + changed_when: "'Sealed: true' in seal_data.stdout" + +- name: unseal vault + tags: [ unseal ] + run_once: true + when: seal_data.changed + local_action: + command vault unseal {{ item }} + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + with_items: + - "{{ vault_unseal_tokens.one }}" + - "{{ vault_unseal_tokens.two }}" + - "{{ vault_unseal_tokens.three }}" + +- name: pause to unseal + pause: + seconds: 1 diff --git a/ansible/roles/local-vault/templates/vault.hcl.j2 b/ansible/roles/local-vault/templates/vault.hcl.j2 new file mode 100644 index 00000000..f753aa83 --- /dev/null +++ b/ansible/roles/local-vault/templates/vault.hcl.j2 @@ -0,0 +1,13 @@ +disable_mlock = true + +backend "s3" { + bucket = "runnable.vault" + access_key = "{{ vault_aws_access_key }}" + secret_key = "{{ vault_aws_secret_key }}" + region = "us-west-2" +} + +listener "tcp" { + address = "127.0.0.1:{{ vault_local_port }}" + tls_disable = 1 +} diff --git a/ansible/roles/local-vault/vars/main.yml b/ansible/roles/local-vault/vars/main.yml new file mode 100644 index 00000000..dbf9d1f6 --- /dev/null +++ b/ansible/roles/local-vault/vars/main.yml @@ -0,0 +1,34 @@ +$ANSIBLE_VAULT;1.1;AES256 +35373162356535346134653633636333643665633235316564326661643333303038636666323635 +6535373634323066383834393134656362393963626666340a616534346338313735646265353964 +39363062316130666133643866346338366439626236666639383933616164336435353134373833 +3535636133396331370a386633383861336133643534313037353937396662333533303362393235 +35333039623438313030393233373961666464666436626434343332653661373266373736356664 +61663033623038613231373165373362393937356433623865656362626333643461663436396231 +66346264623238613661666463336635646232366639643331626363353131323838643763396635 +66393935643863383531613231346337326662396230313361616563346632353139636337373030 +34663062613565363035663763383065353866356330386639626264666361656436316639663863 +61303464303266316463356163636662353236376566383136333334393462356638316236613565 +33623536663763633263666234653336633962663737303835336161393430316334313031623035 +32393230633064373839363562366533663838656331323836303735303733313739306362313433 +33343030333535393664616334383430656336633636326233366233656332366665653733623064 +61353933303530306566363533303031333234373038623963323738666135303434373233326537 +37653661626536653230303433663534666439373331346535316438373063323634643662663438 +62383234346336633863343461336662613662323939663263356537393537303661363534333563 +35306262656432343861666261333631323666616636313034323931613937653130313163343633 +61643931303563626561396337303830646663616638366632376432363963353536636633656364 +31363661303635643462643563333733356431353861623432386130646434653264646561366363 +30396263613130643463646464383763623565376533303936646362376437643835303638316232 +35623531366530346335343461653636613930633664646433376665313431363139663530623337 +37343362373535383766373232626337336530356636373138333133383832653831396433393733 +32623963303764663538353335663136383964643031343434663134663964646164393230353337 +66636566346363626434363432323731656630353266653263323231376632613133393965643233 +31303831383463383565666665353136353439613161613864383766333133643837626239643962 +33393733326334323534383564613934323432326536326138356437653363313062366636366261 +65626137303561366630623933353064363438363735363438303735636434353530353563333935 +61396537666464646562323262663164316437623331343430633630343962383533366163316234 +65366134396130383364626436326434646566303330626330623830653162376266316336613365 +38653336363365393064666130363536333638643333663330346132323133663037383032363530 +66653537376665613065353166373433313634656665383964366264613235386165346264306265 +61623335346264393937333761303765613433336531353764386437373432646139313732633131 +6365 diff --git a/ansible/roles/log-mkdir/tasks/main.yml b/ansible/roles/log-mkdir/tasks/main.yml new file mode 100644 index 00000000..942b5d69 --- /dev/null +++ b/ansible/roles/log-mkdir/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: create application logs directory + tags: rsyslog + when: app_log_dir != "/var/log" + become: true + file: + path="{{ app_log_dir }}" + state=directory + owner=root + group=syslog + mode=0775 + diff --git a/ansible/roles/loggly/files/20-loggly-tls.conf b/ansible/roles/loggly/files/20-loggly-tls.conf new file mode 100644 index 00000000..1f98fa12 --- /dev/null +++ b/ansible/roles/loggly/files/20-loggly-tls.conf @@ -0,0 +1,6 @@ +#RsyslogGnuTLS +$DefaultNetstreamDriverCAFile /etc/rsyslog.d/keys/ca.d/logs-01.loggly.com_sha12.crt +$ActionSendStreamDriver gtls +$ActionSendStreamDriverMode 1 +$ActionSendStreamDriverAuthMode x509/name +$ActionSendStreamDriverPermittedPeer *.loggly.com diff --git a/ansible/roles/loggly/files/50-default.conf b/ansible/roles/loggly/files/50-default.conf new file mode 100644 index 00000000..fe332701 --- /dev/null +++ b/ansible/roles/loggly/files/50-default.conf @@ -0,0 +1,71 @@ +# Default rules for rsyslog. +# +# For more information see rsyslog.conf(5) and /etc/rsyslog.conf + +# No buffering +$ActionQueueType Direct + +# +# First some standard log files. Log by facility. +# +auth,authpriv.* /var/log/auth.log +*.*;auth,authpriv.none -/var/log/syslog +#cron.* /var/log/cron.log +#daemon.* -/var/log/daemon.log +kern.* -/var/log/kern.log +#lpr.* -/var/log/lpr.log +mail.* -/var/log/mail.log +#user.* -/var/log/user.log + +# +# Logging for the mail system. Split it up so that +# it is easy to write scripts to parse these files. +# +#mail.info -/var/log/mail.info +#mail.warn -/var/log/mail.warn +mail.err /var/log/mail.err + +# +# Logging for INN news system. +# +news.crit /var/log/news/news.crit +news.err /var/log/news/news.err +news.notice -/var/log/news/news.notice + +# +# Some "catch-all" log files. +# +#*.=debug;\ +# auth,authpriv.none;\ +# news.none;mail.none -/var/log/debug +#*.=info;*.=notice;*.=warn;\ +# auth,authpriv.none;\ +# cron,daemon.none;\ +# mail,news.none -/var/log/messages + +# +# Emergencies are sent to everybody logged in. +# +*.emerg :omusrmsg:* + +# +# I like to have messages displayed on the console, but only on a virtual +# console I usually leave idle. +# +#daemon,mail.*;\ +# news.=crit;news.=err;news.=notice;\ +# *.=debug;*.=info;\ +# *.=notice;*.=warn /dev/tty8 + +# The named pipe /dev/xconsole is for the `xconsole' utility. To use it, +# you must invoke `xconsole' with the `-file' option: +# +# $ xconsole -file /dev/xconsole [...] +# +# NOTE: adjust the list below, or you'll go crazy if you have a reasonably +# busy site.. +# +daemon.*;mail.*;\ + news.err;\ + *.=debug;*.=info;\ + *.=notice;*.=warn |/dev/xconsole diff --git a/ansible/roles/loggly/files/archiveOldLogs.sh b/ansible/roles/loggly/files/archiveOldLogs.sh new file mode 100644 index 00000000..54bda4cf --- /dev/null +++ b/ansible/roles/loggly/files/archiveOldLogs.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +# +# This is to be run only for legacy logs in /var/log, not application logs in /var/log/runnable +# + +# we only want this run as root +if [ "root" != `whoami` ] ; then + echo "${0}: ERROR - This script needs to be run as root." + exit 127 +fi + +# legacy log path +logdir=/var/log +# store log archives here, purge manually +archdir=/docker/archive +datetime=`date +%Y%m%d%H%m` + +# these logfiles haven't been modified in > 24 hours, so moving them without cleanup up filehandles first should be OK: +echo "Compressing logs > 24h" +find "${logdir}" -maxdepth 2 -type f -mmin +1440 -name '*.log' -exec bzip2 -9 {} \; -print +echo "Restarting rsyslogd" +# but we do need to clean the filehandles after, just in case +service rsyslog restart + +# archive anything > 6h +echo "Archiving logs > 6h" +mkdir -p "${archdir}" 2>&1 +find "${logdir}" -maxdepth 2 -type f -mtime +6 -name '*z' | xargs tar jcvpf "${archdir}"/log-archive-"${datetime}".tbz +echo "Purging logs > 1wk" +find "${logdir}" -maxdepth 2 -type f -mtime +6 -exec rm -f {} \; -print diff --git a/ansible/roles/loggly/files/logs-01.loggly.com_sha12.crt b/ansible/roles/loggly/files/logs-01.loggly.com_sha12.crt new file mode 100644 index 00000000..31a70521 --- /dev/null +++ b/ansible/roles/loggly/files/logs-01.loggly.com_sha12.crt @@ -0,0 +1,197 @@ +-----BEGIN CERTIFICATE----- +MIIFYTCCBEmgAwIBAgIIB1i8CkNiTSUwDQYJKoZIhvcNAQELBQAwgcYxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUw +IwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTMwMQYDVQQLEypo +dHRwOi8vY2VydHMuc3RhcmZpZWxkdGVjaC5jb20vcmVwb3NpdG9yeS8xNDAyBgNV +BAMTK1N0YXJmaWVsZCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIw +HhcNMTUwOTIzMjI1NjM4WhcNMTgwNDEwMDAxMDQ3WjBAMSEwHwYDVQQLExhEb21h +aW4gQ29udHJvbCBWYWxpZGF0ZWQxGzAZBgNVBAMTEmxvZ3MtMDEubG9nZ2x5LmNv +bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM17p0KBWDUaWuDmjD6U +B3geyK1/LfpZprDHSjJ+7HfdJX22KpEaO6FuLJra9CaSwyq8vwvVko93Is0BkdWt +hcFgE89I5UX16ieFCVb/WfADmEM8lIB3EkTmSkHglbaeI+wYEh5hUzOhavpQVTZ9 +XGvZZLyPvwUaNPwr/PfjTqWIlKLyNYZIfQwuADeb0C5meoSwmI+yC7ca4rCvMDLQ +EyzCJdDYV9/ugJUqGq2uhA2c+EFOP/Mvc51N11upWIMFgGoPWgGTuqbOAKf7w46x +oSNxgNaMFA0VsHW+HmxKnAQ5PuPNsheECQT3NqxvXMqez0voc8QuyBlrKVRrxeu5 +h00CAwEAAaOCAdYwggHSMAwGA1UdEwEB/wQCMAAwHQYDVR0lBBYwFAYIKwYBBQUH +AwEGCCsGAQUFBwMCMA4GA1UdDwEB/wQEAwIFoDA8BgNVHR8ENTAzMDGgL6Athito +dHRwOi8vY3JsLnN0YXJmaWVsZHRlY2guY29tL3NmaWcyczEtMTYuY3JsMFkGA1Ud +IARSMFAwTgYLYIZIAYb9bgEHFwEwPzA9BggrBgEFBQcCARYxaHR0cDovL2NlcnRp +ZmljYXRlcy5zdGFyZmllbGR0ZWNoLmNvbS9yZXBvc2l0b3J5LzCBggYIKwYBBQUH +AQEEdjB0MCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5zdGFyZmllbGR0ZWNoLmNv +bS8wRgYIKwYBBQUHMAKGOmh0dHA6Ly9jZXJ0aWZpY2F0ZXMuc3RhcmZpZWxkdGVj +aC5jb20vcmVwb3NpdG9yeS9zZmlnMi5jcnQwHwYDVR0jBBgwFoAUJUWBaFAmOD07 +LSy+zWrZtj2zZmMwNQYDVR0RBC4wLIISbG9ncy0wMS5sb2dnbHkuY29tghZ3d3cu +bG9ncy0wMS5sb2dnbHkuY29tMB0GA1UdDgQWBBRyVaLpluYAt1XsqWNl1g8v68GW +gDANBgkqhkiG9w0BAQsFAAOCAQEAEWHC5d39uw1r2qORpNBLLubB5N0R6dHCzwmH +E1b/TPXIkP3KZXbBGQqezRbRjj5NjxjNIt7C0PI2IzOBpgqscPYQ1JiApok8qiFJ +/pEOgnDwhI4Ao0dd/g0urnD6AmD6l5YjoXdFZtA1s0pa1EUar9/rIkeHbcpRY+je +Z9hi+Yt+NvllAgbML+EyFCp/hB9OSNmYrZGxkz7MeptC2iM4aNtFObmnl1sFec49 +hO766oe06grj/ToKLcCDcKgtsfiTt6yWWcN5c+NPp6LbciUE5VE8qcUSbyu6XjDK +m/vYlGMCycwqmXC/6xXjsfBh+R3smpSFIJvRjqVKzXdx8yc21Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFADCCA+igAwIBAgIBBzANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTExMDUwMzA3MDAw +MFoXDTMxMDUwMzA3MDAwMFowgcYxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTMwMQYDVQQLEypodHRwOi8vY2VydHMuc3RhcmZpZWxk +dGVjaC5jb20vcmVwb3NpdG9yeS8xNDAyBgNVBAMTK1N0YXJmaWVsZCBTZWN1cmUg +Q2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDlkGZL7PlGcakgg77pbL9KyUhpgXVObST2yxcT+LBxWYR6ayuF +pDS1FuXLzOlBcCykLtb6Mn3hqN6UEKwxwcDYav9ZJ6t21vwLdGu4p64/xFT0tDFE +3ZNWjKRMXpuJyySDm+JXfbfYEh/JhW300YDxUJuHrtQLEAX7J7oobRfpDtZNuTlV +Bv8KJAV+L8YdcmzUiymMV33a2etmGtNPp99/UsQwxaXJDgLFU793OGgGJMNmyDd+ +MB5FcSM1/5DYKp2N57CSTTx/KgqT3M0WRmX3YISLdkuRJ3MUkuDq7o8W6o0OPnYX +v32JgIBEQ+ct4EMJddo26K3biTr1XRKOIwSDAgMBAAGjggEsMIIBKDAPBgNVHRMB +Af8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUJUWBaFAmOD07LSy+ +zWrZtj2zZmMwHwYDVR0jBBgwFoAUfAwyH6fZMH/EfWijYqihzqsHWycwOgYIKwYB +BQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5zdGFyZmllbGR0ZWNo +LmNvbS8wOwYDVR0fBDQwMjAwoC6gLIYqaHR0cDovL2NybC5zdGFyZmllbGR0ZWNo +LmNvbS9zZnJvb3QtZzIuY3JsMEwGA1UdIARFMEMwQQYEVR0gADA5MDcGCCsGAQUF +BwIBFitodHRwczovL2NlcnRzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkv +MA0GCSqGSIb3DQEBCwUAA4IBAQBWZcr+8z8KqJOLGMfeQ2kTNCC+Tl94qGuc22pN +QdvBE+zcMQAiXvcAngzgNGU0+bE6TkjIEoGIXFs+CFN69xpk37hQYcxTUUApS8L0 +rjpf5MqtJsxOYUPl/VemN3DOQyuwlMOS6eFfqhBJt2nk4NAfZKQrzR9voPiEJBjO +eT2pkb9UGBOJmVQRDVXFJgt5T1ocbvlj2xSApAer+rKluYjdkf5lO6Sjeb6JTeHQ +sPTIFwwKlhR8Cbds4cLYVdQYoKpBaXAko7nv6VrcPuuUSvC33l8Odvr7+2kDRUBQ +7nIMpBKGgc0T0U7EPMpODdIm8QC3tKai4W56gf0wrHofx1l7 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEoDCCA4igAwIBAgIDORSEMA0GCSqGSIb3DQEBCwUAMGgxCzAJBgNVBAYTAlVT +MSUwIwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQL +EylTdGFyZmllbGQgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x +NDAxMDEwNzAwMDBaFw0zMTA1MzAwNzAwMDBaMIGPMQswCQYDVQQGEwJVUzEQMA4G +A1UECBMHQXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTElMCMGA1UEChMcU3Rh +cmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UEAxMpU3RhcmZpZWxkIFJv +b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQC97cED/PaP/AKxb1ufSNmdeeKitwNhVhjDR7bXyj01LolD +96Fpm96KGv0TIJy0SXcyKVb9ueyM3SL6ctwnYZfu9lqE7G4ZuYks3IRb1XT7a1/F +iaUQUolGVfS4dRzmf+RUrkv4VXJXAhn4F3FZ6x4oB3TFnUi+bLT0pLDzZDd5ksDs +Rl5/4W1TTGKvzR8LY7s6nfv8eQCYYXTPJoJAY/OycmoZDZnK1A51zDf7i4nBWfFi +f1+zX2Uw+Ke3TXZaHnZeNMDollaZirPwf6TNvdwyMXyRz+BfEfhrqklc0ZmU0aLj +Y1sJdrVWYuFLdB2W1CbUCARZ0JgODube/MPsH5DxAgMBAAGjggEpMIIBJTAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUfAwyH6fZMH/E +fWijYqihzqsHWycwHwYDVR0jBBgwFoAUv1+30c7dH4b0W1Ws3NcQwg6piOcwOgYI +KwYBBQUHAQEELjAsMCoGCCsGAQUFBzABhh5odHRwOi8vb2NzcC5zdGFyZmllbGR0 +ZWNoLmNvbS8wOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL2NybC5zdGFyZmllbGR0 +ZWNoLmNvbS9zZnJvb3QuY3JsMEwGA1UdIARFMEMwQQYEVR0gADA5MDcGCCsGAQUF +BwIBFitodHRwczovL2NlcnRzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkv +MA0GCSqGSIb3DQEBCwUAA4IBAQCFY8HZ3bn/qb2mGdy/EzoROCJUsawFEPt8s5Y/ +MYtm/4jz4b/7xx8A/0Zqi2EyyQFRdvuaxvogUchGxJjXeaPjBHI/i000U2fsMyx7 +6JQBKHw6NFsCdxaNQCUzsLxsl9cFev+Mhc5voFMAF24ebL0i1wqIN/Z965lB7yfL +jGBrTAF+ZVALT7iVmppuNP1zOjPxkdXzTi106O/TkDXxBmhk1NAT/VLTxm3BOoox +3QUmNUqMZbhSa4Hs0py1NBCXnD7GL+2OQkIkLulzmiX5EfHyI2nL5ZRpoNLcsPxE +iawXqMzVN3cWxYC5DI9XAlWZhXtJ8C5boMJXU12i6KY3wwH6 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFBzCCA++gAwIBAgICAgEwDQYJKoZIhvcNAQEFBQAwaDELMAkGA1UEBhMCVVMx +JTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsT +KVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2 +MTExNjAxMTU0MFoXDTI2MTExNjAxMTU0MFowgdwxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTkwNwYDVQQLEzBodHRwOi8vY2VydGlm +aWNhdGVzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkxMTAvBgNVBAMTKFN0 +YXJmaWVsZCBTZWN1cmUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxETAPBgNVBAUT +CDEwNjg4NDM1MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4qddo+1m +72ovKzYf3Y3TBQKgyg9eGa44cs8W2lRKy0gK9KFzEWWFQ8lbFwyaK74PmFF6YCkN +bN7i6OUVTVb/kNGnpgQ/YAdKym+lEOez+FyxvCsq3AF59R019Xoog/KTc4KJrGBt +y8JIwh3UBkQXPKwBR6s+cIQJC7ggCEAgh6FjGso+g9I3s5iNMj83v6G3W1/eXDOS +zz4HzrlIS+LwVVAv+HBCidGTlopj2WYN5lhuuW2QvcrchGbyOY5bplhVc8tibBvX +IBY7LFn1y8hWMkpQJ7pV06gBy3KpdIsMrTrlFbYq32X43or174Q7+edUZQuAvUdF +pfBE2FM7voDxLwIDAQABo4IBRDCCAUAwHQYDVR0OBBYEFElLUifRG7zyoSFqYntR +QnqK19VWMB8GA1UdIwQYMBaAFL9ft9HO3R+G9FtVrNzXEMIOqYjnMBIGA1UdEwEB +/wQIMAYBAf8CAQAwOQYIKwYBBQUHAQEELTArMCkGCCsGAQUFBzABhh1odHRwOi8v +b2NzcC5zdGFyZmllbGR0ZWNoLmNvbTBMBgNVHR8ERTBDMEGgP6A9hjtodHRwOi8v +Y2VydGlmaWNhdGVzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkvc2Zyb290 +LmNybDBRBgNVHSAESjBIMEYGBFUdIAAwPjA8BggrBgEFBQcCARYwaHR0cDovL2Nl +cnRpZmljYXRlcy5zdGFyZmllbGR0ZWNoLmNvbS9yZXBvc2l0b3J5MA4GA1UdDwEB +/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAhlK6sx+mXmuQpmQq/EWyrp8+s2Kv +2x9nxL3KoS/HnA0hV9D4NiHOOiU+eHaz2d283vtshF8Mow0S6xE7cV+AHvEfbQ5f +wezUpfdlux9MlQETsmqcC+sfnbHn7RkNvIV88xe9WWOupxoFzUfjLZZiUTIKCGhL +Indf90XcYd70yysiKUQl0p8Ld3qhJnxK1w/C0Ty6DqeVmlsFChD5VV/Bl4t0zF4o +aRN+0AqNnQ9gVHrEjBs1D3R6cLKCzx214orbKsayUWm/EheSYBeqPVsJ+IdlHaek +KOUiAgOCRJo0Y577KM/ozS4OUiDtSss4fJ2ubnnXlSyokfOGASGRS7VApA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFfzCCBGegAwIBAgIILqvAG0gVC3QwDQYJKoZIhvcNAQEFBQAwgdwxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUw +IwYDVQQKExxTdGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTkwNwYDVQQLEzBo +dHRwOi8vY2VydGlmaWNhdGVzLnN0YXJmaWVsZHRlY2guY29tL3JlcG9zaXRvcnkx +MTAvBgNVBAMTKFN0YXJmaWVsZCBTZWN1cmUgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkxETAPBgNVBAUTCDEwNjg4NDM1MB4XDTE1MDQwNjA1NDIzOFoXDTE2MDQwNjA1 +MzgzOFowQDEhMB8GA1UECxMYRG9tYWluIENvbnRyb2wgVmFsaWRhdGVkMRswGQYD +VQQDExJsb2dzLTAxLmxvZ2dseS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQCo6JQSJCK8y67Wfg3bVCBndVmjzF52shm/Qn+dDxk6ge06zVgfZ5cP +D35YDQqdbHdyu1Jq59Ak4pu/Ta5uWvOhUuqsDUuYyfu9Bh6NGyCzvUiNFwOa9dH9 +W7JpFz/CUJqpsKAmwYNDeXuB0VFrLRxYCQTzqWBDuXnDvtfiMmvBvFiUKFfm4lUh +WV37ixUiXtx7xu+qJOqBeRwo0X5En5pk1oSTzHZaTtEExbdezV3vOQixHtchkrRN +KlbohdkrUpZZn9Z21K+FhOTmp/u03DhgiQXav6bxkW1Po8ZBPlyJRlHXe27XbqZm +o1yJJn2F33M7gNuKlspO3cdS1UqDqWelAgMBAAGjggHeMIIB2jAMBgNVHRMBAf8E +AjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAOBgNVHQ8BAf8EBAMC +BaAwOQYDVR0fBDIwMDAuoCygKoYoaHR0cDovL2NybC5zdGFyZmllbGR0ZWNoLmNv +bS9zZnMxLTI3LmNybDBZBgNVHSAEUjBQME4GC2CGSAGG/W4BBxcBMD8wPQYIKwYB +BQUHAgEWMWh0dHA6Ly9jZXJ0aWZpY2F0ZXMuc3RhcmZpZWxkdGVjaC5jb20vcmVw +b3NpdG9yeS8wgY0GCCsGAQUFBwEBBIGAMH4wKgYIKwYBBQUHMAGGHmh0dHA6Ly9v +Y3NwLnN0YXJmaWVsZHRlY2guY29tLzBQBggrBgEFBQcwAoZEaHR0cDovL2NlcnRp +ZmljYXRlcy5zdGFyZmllbGR0ZWNoLmNvbS9yZXBvc2l0b3J5L3NmX2ludGVybWVk +aWF0ZS5jcnQwHwYDVR0jBBgwFoAUSUtSJ9EbvPKhIWpie1FCeorX1VYwNQYDVR0R +BC4wLIISbG9ncy0wMS5sb2dnbHkuY29tghZ3d3cubG9ncy0wMS5sb2dnbHkuY29t +MB0GA1UdDgQWBBT0MM8oRzFYKeB0kuQkvExGRBXwWjANBgkqhkiG9w0BAQUFAAOC +AQEAQ9JNeNIPx+DacFSPG+AV3blBhgfZQXfLO2Wbls2Vuol7PtDKHuaoBSQE1RYE +A/iyXI3OJnNivGU/V2p4weHgitpNpQ8AJ7uZVERIUCOlCYJaDSevpFfoALQK2rWr +gegZZ6gVkdFanhHCRW4a2apLCRnUbt//7k1G6Fw8v+YCzyVtf31AnY/bhknWAfDc +oldME9cCeAPT8WvCC3Xmrrd1FxlVkEGyshAzEpA1BNeVQM4iB17Up2tXQIv+ehsU +cUJz4IKut0lglszuanEfAazOzEn37n/2Q3cNx5IDEHv3z4fBLwNfd9yT14izqKLJ +ODRffuOanfiyg+bXxdmuhfXUqQ== +-----END CERTIFICATE----- + diff --git a/ansible/roles/loggly/files/purgeLogs.sh b/ansible/roles/loggly/files/purgeLogs.sh new file mode 100644 index 00000000..cb000fc2 --- /dev/null +++ b/ansible/roles/loggly/files/purgeLogs.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +# +# This is to be run logs in {{ app_log_dir }}. +# Runs from crontab. +# + +logdir="${1}" + +# We can compress anything older than 6 hours +find "${logdir}" -mindepth 2 -type f -mmin +360 -name '*.log' -exec bzip2 -9 {} \; + +# We automatically purge anything > 1wk +find "${logdir}" -maxdepth 2 -type f -mtime +7 -exec rm -f {} \; diff --git a/ansible/roles/loggly/files/rsyslog.conf b/ansible/roles/loggly/files/rsyslog.conf new file mode 100644 index 00000000..2301c962 --- /dev/null +++ b/ansible/roles/loggly/files/rsyslog.conf @@ -0,0 +1,71 @@ +# /etc/rsyslog.conf Configuration file for rsyslog. +# +# For more information see +# /usr/share/doc/rsyslog-doc/html/rsyslog_conf.html +# +# Default logging rules can be found in /etc/rsyslog.d/50-default.conf +$MaxMessageSize 1024k + +################# +#### MODULES #### +################# + +$ModLoad imuxsock # provides support for local system logging +$ModLoad imklog # provides kernel logging support +#$ModLoad immark # provides --MARK-- message capability + +# provides UDP syslog reception +$ModLoad imudp +$UDPServerRun 514 + +# provides TCP syslog reception +$ModLoad imtcp +$InputTCPServerRun 514 + +# Enable non-kernel facility klog messages +$KLogPermitNonKernelFacility on + +########################### +#### GLOBAL DIRECTIVES #### +########################### + +# +# Set main message queue to use direct queuing (not queued) mode +# + +$MainMsgQueueType Direct + +# +# Use traditional timestamp format. +# To enable high precision timestamps, comment out the following line. +# +$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat + +# Runnable JSON logging format +# Creates an "output formatter" template that accepts as input JSON and prints it out without any further processing ("raw JSON"). +# The formatting around the %msg% string is as such: start printing at the second character "2" until the end of the line "$" using the raw JSON format type. +$template RunnableJSON,"%msg:2:$:%\n" + +# Filter duplicated messages +$RepeatedMsgReduction on + +# +# Set the default permissions for all log files. +# +$FileOwner syslog +$FileGroup adm +$FileCreateMode 0644 +$DirCreateMode 0755 +$Umask 0022 +$PrivDropToUser syslog +$PrivDropToGroup adm + +# +# Where to place spool and state files +# +$WorkDirectory /var/spool/rsyslog + +# +# Include all config files in /etc/rsyslog.d/ +# +$IncludeConfig /etc/rsyslog.d/*.conf diff --git a/ansible/roles/loggly/meta/main.yml b/ansible/roles/loggly/meta/main.yml new file mode 100644 index 00000000..1d1930df --- /dev/null +++ b/ansible/roles/loggly/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: bash_aliases } diff --git a/ansible/roles/loggly/tasks/main.yml b/ansible/roles/loggly/tasks/main.yml new file mode 100644 index 00000000..a9556ae1 --- /dev/null +++ b/ansible/roles/loggly/tasks/main.yml @@ -0,0 +1,156 @@ +--- +- name: Install rsyslog-gnutls + become: yes + tags: loggly + apt: + pkg=rsyslog-gnutls + state=latest + update_cache=yes + cache_valid_time=604800 + install_recommends=yes + +- name: copy rsyslog default config + tags: loggly + become: true + copy: + src=50-default.conf + dest=/etc/rsyslog.d/50-default.conf + owner=syslog + group=syslog + +- name: copy loggly TLS config + tags: loggly + become: true + copy: + src=20-loggly-tls.conf + dest=/etc/rsyslog.d/20-loggly-tls.conf + owner=syslog + group=syslog + +- name: create loggly TLS cert dir + tags: loggly + become: true + file: + path=/etc/rsyslog.d/keys/ca.d + state=directory + owner=syslog + group=syslog + +- name: copy loggly TLS certs + tags: loggly + become: true + copy: + src=logs-01.loggly.com_sha12.crt + dest=/etc/rsyslog.d/keys/ca.d/logs-01.loggly.com_sha12.crt + owner=syslog + group=syslog + mode=400 + +- name: copy loggly config + tags: loggly + become: true + template: + src=22-loggly.conf.j2 + dest=/etc/rsyslog.d/22-loggly.conf + owner=syslog + group=syslog + +- name: copy docker upstart logs config + when: node_env != 'default' + tags: loggly + become: true + template: + src=21-rotated-docker.conf.j2 + dest=/etc/rsyslog.d/21-rotated-docker.conf + owner=syslog + group=syslog + +- name: copy app config + when: name is defined + tags: loggly + become: true + template: + src=21-output-syslog.conf.j2 + dest=/etc/rsyslog.d/21-rotated-{{ name }}.conf + owner=syslog + group=syslog + +- name: copy rsyslog config + tags: loggly + become: true + copy: + src=rsyslog.conf + dest=/etc/rsyslog.conf + owner=syslog + group=syslog + +- name: stop rsyslog + tags: [loggly, deploy] + become: true + service: name=rsyslog state=stopped + +- name: clear rsyslog state file + when: name is defined + tags: [loggly, deploy] + become: true + file: + path=/var/spool/rsyslog/stat-{{ name }} + state=absent + +- name: ensure log path + tags: loggly + become: true + file: + path="{{ app_log_dir }}" + state=directory + owner=syslog + group=adm + mode=0755 + recurse=yes + +- name: restart rsyslog + tags: [ loggly, deploy ] + become: true + service: name=rsyslog state=restarted + +- name: add loggly monitor + tags: cron + cron: + name="loggly monitor" + minute="*/2" + job="echo 'loggly monitoring' 2>&1 | /usr/bin/logger -t LogMonitor" + state=present + +- name: runnable bin directory + tags: [ loggly, clean ] + become: true + file: + path=/opt/runnable/bin + state=directory + owner=ubuntu + group=ubuntu + mode=0700 + +- name: copy log purge script + tags: [ loggly, clean ] + copy: + src=purgeLogs.sh + dest=/opt/runnable/bin/purgeLogs.sh + owner=ubuntu + group=ubuntu + mode=0700 + +- name: purge log files + tags: [ loggly, clean ] + become: true + cron: + name="purge log files" + minute=0 + job="/opt/runnable/bin/purgeLogs.sh {{ app_log_dir }}" + state=present + +- name: clean legacy log files + tags: [ loggly, clean ] + become: true + script: archiveOldLogs.sh + register: purge_out diff --git a/ansible/roles/loggly/templates/21-output-syslog.conf.j2 b/ansible/roles/loggly/templates/21-output-syslog.conf.j2 new file mode 100644 index 00000000..b384c89a --- /dev/null +++ b/ansible/roles/loggly/templates/21-output-syslog.conf.j2 @@ -0,0 +1,12 @@ +$WorkDirectory /var/spool/rsyslog + +# Rotate per hour +$ActionQueueType Direct +$template RotateHourly_{{ name }},"{{ app_log_dir }}/%$YEAR%/%$MONTH%/%$DAY%/%$HOUR%/{{ name }}.log" +if $syslogtag contains '{{ name }}' and $syslogfacility-text == 'local7' then { action (type="omfile" DynaFile="RotateHourly_{{ name }}" template="RunnableJSON" dirCreateMode="0755" FileCreateMode="0644") } + +# Loggly: Add a tag for {{ name }} events +$ActionQueueType LinkedList +$template LogglyFormat_{{ name }},"<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %syslogtag% [{{ loggly_token }}@41058 tag=\"runnable\" tag=\"{{ node_env }}\"] %msg%\n" +if $syslogtag contains '{{ name }}' and $syslogfacility-text == 'local7' then @@logs-01.loggly.com:6514;LogglyFormat_{{ name }} +if $syslogtag contains '{{ name }}' and $syslogfacility-text == 'local7' then stop diff --git a/ansible/roles/loggly/templates/21-rotated-docker.conf.j2 b/ansible/roles/loggly/templates/21-rotated-docker.conf.j2 new file mode 100644 index 00000000..abcbaad2 --- /dev/null +++ b/ansible/roles/loggly/templates/21-rotated-docker.conf.j2 @@ -0,0 +1,12 @@ +$WorkDirectory /var/spool/rsyslog + +# Rotate per hour +$ActionQueueType Direct +$template RotateHourly_docker_engine,"{{ app_log_dir }}/%$YEAR%/%$MONTH%/%$DAY%/%$HOUR%/docker_engine.log" +if $syslogtag contains 'docker_engine' and $syslogfacility-text == 'local7' then { action (type="omfile" DynaFile="RotateHourly_docker_engine" template="RunnableJSON" dirCreateMode="0755" FileCreateMode="0644") } + +# Loggly: Add a tag for docker_engine events +$ActionQueueType LinkedList +$template LogglyFormat_docker_engine,"<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %syslogtag% [{{ loggly_token }}@41058 tag=\"runnable\" tag=\"{{ node_env }}\"] %msg%\n" +if $syslogtag contains 'docker_engine' and $syslogfacility-text == 'local7' then @@logs-01.loggly.com:6514;LogglyFormat_docker_engine +if $syslogtag contains 'docker_engine' and $syslogfacility-text == 'local7' then stop diff --git a/ansible/roles/loggly/templates/22-loggly.conf.j2 b/ansible/roles/loggly/templates/22-loggly.conf.j2 new file mode 100644 index 00000000..3ee1a95b --- /dev/null +++ b/ansible/roles/loggly/templates/22-loggly.conf.j2 @@ -0,0 +1,18 @@ +# ------------------------------------------------------- +# Syslog Logging Directives for Loggly (sandboxes.loggly.com) +# ------------------------------------------------------- + +# Define the template used for sending logs to Loggly. Do not change this format. +$template LogglyFormat,"<%pri%>%protocol-version% %timestamp:::date-rfc3339% %HOSTNAME% %app-name% %procid% %msgid% [{{ loggly_token }}@41058 tag=\"{{ node_env }}\"] %msg%\n" + +$WorkDirectory /var/spool/rsyslog # where to place spool files +$ActionQueueFileName fwdRule1 # unique name prefix for spool files +$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) +$ActionQueueSaveOnShutdown on # save messages to disk on shutdown +$ActionQueueType LinkedList # run asynchronously +$ActionResumeRetryCount -1 # infinite retries if host is down + +# Send messages to Loggly over TCP using the template. +*.* @@logs-01.loggly.com:6514;LogglyFormat + +# ------------------------------------------------------- diff --git a/ansible/roles/mongo-server/meta/main.yml b/ansible/roles/mongo-server/meta/main.yml new file mode 100644 index 00000000..a86d80f9 --- /dev/null +++ b/ansible/roles/mongo-server/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- { role: tls-server-cert, name: mongodb } diff --git a/ansible/roles/mongo-server/tasks/main.yml b/ansible/roles/mongo-server/tasks/main.yml new file mode 100644 index 00000000..9368e698 --- /dev/null +++ b/ansible/roles/mongo-server/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: make folder for certificates + tags: [ tls ] + become: true + file: + dest: /opt/ssl/mongo-server + state: directory + owner: mongodb + group: mongodb + +- name: create server pem file + tags: [ tls ] + become: true + copy: + dest: /opt/ssl/mongo-server/mongo.pem + content: "{{ new_certs.data.private_key }}\n{{ new_certs.data.certificate }}\n" + owner: mongodb + group: mongodb + mode: 0440 + +- name: create server CA file + tags: [ tls ] + become: true + copy: + dest: /opt/ssl/mongo-server/ca.pem + content: "{{ new_certs.data.issuing_ca }}\n" + owner: mongodb + group: mongodb + mode: 0440 diff --git a/ansible/roles/nginx-proxied-service/tasks/main.yml b/ansible/roles/nginx-proxied-service/tasks/main.yml new file mode 100644 index 00000000..dded3fbb --- /dev/null +++ b/ansible/roles/nginx-proxied-service/tasks/main.yml @@ -0,0 +1,44 @@ +--- +# Get port information from the hosted service +- name: get socket server ports + tags: [ configure_proxy, deploy ] + become: true + when: hosted_ports is defined + shell: | + for c in {{ new_container_ids.stdout_lines|join(' ') }}; do + {% for hosted_port in hosted_ports %} + docker port $c {{ hosted_port }} | cut -d ':' -f 2 + {% endfor %} + done + args: + executable: /bin/bash + register: proxy_target_ports + +# everything from this point on is delegated to the nginx host +- name: print target info + delegate_to: "{{ nginx_host }}" + tags: [ configure_proxy, deploy ] + debug: + msg: | + proxy target ports -- {{ proxy_target_ports }} + ip -- {{ target_ip_address }} + +- name: put configuration in place + delegate_to: "{{ nginx_host }}" + tags: [ configure_proxy, deploy ] + become: yes + template: + src: "{{ item }}" + dest: /etc/nginx/sites-enabled/{{ item }} + with_items: "{{ templates }}" + +- name: reload nginx + delegate_to: "{{ nginx_host }}" + tags: [ configure_proxy, deploy ] + become: yes + shell: > + docker ps | + awk '/nginx/{ print $1 }' | + xargs -n 1 docker kill --signal SIGHUP + args: + executable: /bin/bash diff --git a/ansible/roles/nginx-proxied-service/templates/00-nginx-status.conf b/ansible/roles/nginx-proxied-service/templates/00-nginx-status.conf new file mode 100644 index 00000000..24cbcc58 --- /dev/null +++ b/ansible/roles/nginx-proxied-service/templates/00-nginx-status.conf @@ -0,0 +1,8 @@ +server { + listen 80; + server_name {{ ansible_default_ipv4.address }}; + + location /basic_status { + stub_status; + } +} diff --git a/ansible/roles/nginx-proxied-service/templates/01-socket-server.conf b/ansible/roles/nginx-proxied-service/templates/01-socket-server.conf new file mode 100644 index 00000000..c7bd65d3 --- /dev/null +++ b/ansible/roles/nginx-proxied-service/templates/01-socket-server.conf @@ -0,0 +1,56 @@ +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +upstream socketserver { + sticky; + {% for port in proxy_target_ports.stdout_lines -%} + server {{ target_ip_address }}:{{ port }}; + {% endfor %} +} + +server { + listen 80; + server_name apisock.{{ domain }}; + + return 301 https://$server_name$request_uri; +} + +server { + listen 443 ssl; + server_name apisock.{{ domain }}; + gzip off; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + # add_header Strict-Transport-Security "max-age=15724800; includeSubdomains; preload"; + # add_header X-Frame-Options "DENY"; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + location / { + proxy_pass http://socketserver; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol 'https'; + } +} diff --git a/ansible/roles/nginx-proxied-service/templates/11-eru-server.conf b/ansible/roles/nginx-proxied-service/templates/11-eru-server.conf new file mode 100644 index 00000000..930d758a --- /dev/null +++ b/ansible/roles/nginx-proxied-service/templates/11-eru-server.conf @@ -0,0 +1,68 @@ +server { + listen 80; + server_name eru.{{ domain }}; + + return 301 https://$server_name$request_uri; +} + +server { + listen 443 ssl; + server_name eru.{{ domain }}; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + add_header Strict-Transport-Security "max-age=15724800; includeSubdomains; preload"; + add_header X-Frame-Options "DENY"; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + gzip on; + gzip_vary on; + gzip_proxied any; + gzip_types text/plain text/css application/json application/x-javascript application/javascript text/xml application/xml application/xml+rss text/javascript; + + location / { + expires 300; + proxy_pass http://{{ target_ip_address }}:{{ proxy_target_ports.stdout_lines[0] | trim }}; + proxy_set_header Host $host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol 'https'; + proxy_set_header x-forwarded-proto 'https'; + } + + location ~ ^/app/(.*)$ { + rewrite /app/(.*) /app/ break; + expires 300; + proxy_pass http://{{ target_ip_address }}:{{ proxy_target_ports.stdout_lines[0] | trim }}; + proxy_set_header Host $host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol 'https'; + proxy_set_header x-forwarded-proto 'https'; + proxy_set_header x-forwarded-path '/app/$1'; + } + + location /graphql { + proxy_pass http://{{ target_ip_address }}:{{ proxy_target_ports.stdout_lines[1] | trim }}; + proxy_set_header Host $host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol 'https'; + proxy_set_header x-forwarded-proto 'https'; + } +} diff --git a/ansible/roles/nginx/files/genNginxConf.js b/ansible/roles/nginx/files/genNginxConf.js new file mode 100644 index 00000000..19eefd46 --- /dev/null +++ b/ansible/roles/nginx/files/genNginxConf.js @@ -0,0 +1,138 @@ +var fs = require('fs'); +var xml2js = require('xml2js'); +var request = require('request'); +var exists = require('101/exists'); +var async = require('async'); +var parser = new xml2js.Parser(); + +var paths = { + 'login': true, + 'signup': true, + 'jobs': true, + 'about': true, + 'privacy': true +}; + +async.waterfall([ + nginxWebServicesRewrite, + nginxConfigHeader, + nginxManualLocations, + fs.readFile.bind(fs, 'sitemap.xml'), + parser.parseString.bind(parser), + function (result, cb) { + result.urlset.url.forEach(function (o) { + var path = o.loc[0].replace('http://runnable.com/', ''); + if (path.indexOf('/') !== -1) { + path = path.split('/').shift(); + } + // escape " + path = path.replace(/"/g, '\\"'); + // drop any including $ (messes w/ nginx) + if (path.indexOf('$') !== -1) { return; } + // drop ones where path now is undefined + if (!exists(path) || path.length === 0) { return; } + // monthly are our manual pages + if (o.changefreq[0] === 'monthly') { return; } + // keep track of dupes + if (paths[path]) { return; } + else { paths[path] = true; } + if (o.changefreq[0] === 'daily') { + if (o.priority[0] === '0.5') { + console.log('# CHANNEL:', path); + locationEqualsDirective(path); + } + } + else if (o.changefreq[0] === 'weekly') { + if (o.priority[0] === '0.6') { + console.log('# EXAMPLE:', path); + locationRegexDirective(path); + } + } + }); + cb(); + }, + nginxConfigFooter +], function (err) { + if (err) { + console.error('error:', err); + return process.exit(1); + } + process.exit(0); +}); + +function nginxWebServicesRewrite (cb) { + console.log([ + 'server {', + '\tserver_name ~^web-(?.+)\.runnable.com$;', + '\tlocation = / {', + '\t\treturn 301 "$scheme://web-$token.runnablecodesnippets.com$request_uri";', + '\t}', + '}', + 'server {', + '\t server_name ~^services-(?.+)\.runnable.com$;', + '\tlocation = / {', + '\t\treturn 301 "$scheme://services-$token.runnablecodesnippets.com$request_uri";', + '\t}', + '}', + '' + ].join('\n')); + cb(); +} + +function nginxConfigHeader (cb) { + console.log([ + 'server {', + '\tserver_name runnable.com;' + ].join('\n')); + cb(); +} + +function nginxManualLocations (cb) { + console.log([ + '### Directives we do not want to move to sandbox app yet', + 'location = / {', + '\treturn 302 $scheme://runnable.io;', + '}', + 'location = /login {', + '\treturn 302 $scheme://code.runnable.com/login;', + '}', + 'location = /signup {', + '\treturn 302 $scheme://code.runnable.com/signup;', + '}', + 'location = /about {', + '\treturn 302 $scheme://code.runnable.com/about;', + '}', + 'location = /jobs {', + '\treturn 302 $scheme://code.runnable.com/jobs;', + '}', + 'location = /privacy {', + '\treturn 302 $scheme://code.runnable.com/privacy;', + '}', + ].join('\n')); + cb(); +} + +function nginxConfigFooter (cb) { + console.log([ + '} # !server', + ].join('\n')); + cb(); +} + +function locationEqualsDirective (path) { + var quote = (path.indexOf(' ') !== -1) ? '"' : ''; + console.log([ + '\tlocation = ' + quote + '/' + path + quote + ' {', + '\t\treturn 301 ' + quote + '$scheme://code.runnable.com/' + path + quote + ';', + '\t}' + ].join('\n')); +} + +function locationRegexDirective (path) { + // these won't need any quotes to escape + console.log([ + '\tlocation ~ ^\\/' + path + '(\\/.+)?$ {', + '\t\treturn 301 $scheme://code.runnable.com/' + path + '$1;', + '\t}' + ].join('\n')); +} diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml new file mode 100644 index 00000000..d315b284 --- /dev/null +++ b/ansible/roles/nginx/tasks/main.yml @@ -0,0 +1,82 @@ +--- +- name: install nginx + become: true + apt: + update_cache=yes + cache_valid_time=604800 + pkg=nginx + state=present + +- name: install node dependencies (build-essential) + become: true + apt: + update_cache=yes + cache_valid_time=604800 + pkg=build-essential + state=present + +- name: get node install script + become: true + get_url: + url=https://deb.nodesource.com/setup + dest=~/node-install.sh + mode=744 + +- name: check for node PPA + become: true + register: ppa_check + file: + path=/etc/apt/sources.list.d/nodesource.list + +- name: install node PPA + become: true + when: ppa_check.changed + shell: ~/node-install.sh + +- name: install node + become: true + apt: + update_cache=yes + cache_valid_time=604800 + pkg=nodejs + state=present + +- name: copy node script for nginx + become: true + copy: + src=genNginxConf.js + dest=/tmp/genNginxConf.js + +# - name: download sitemap +# become: true +# get_url: +# url=http://runnable.com/sitemap.xml +# dest=/tmp/sitemap.xml +# mode=444 +# timeout=30 +# force=yes + +- name: install packages + become: true + shell: + npm install 101 request xml2js async + chdir=/tmp + +- name: generate nginx config + become: true + shell: node /tmp/genNginxConf.js > /etc/nginx/conf.d/runnable-migration.conf + chdir=/tmp + +- name: remove default nginx config + become: true + file: + path=/etc/nginx/sites-enabled/default + state=absent + +- name: test nginx config + become: true + shell: nginx -t + +- name: reload nginx + become: true + shell: nginx -s reload diff --git a/ansible/roles/node/README.md b/ansible/roles/node/README.md new file mode 100644 index 00000000..3c7dcd9f --- /dev/null +++ b/ansible/roles/node/README.md @@ -0,0 +1,26 @@ +Role Name +======== + +Ansible Role to Install Docker on CentOS 6.5 + +Role Variables +-------------- + +``` +docker_centos_packages: + - { package: "docker" } +``` + +Example Playbook +------------------------- + + - hosts: docker-servers + roles: + - { role: docker-centos, + tags: ["docker"] } + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/node/tasks/main.yml b/ansible/roles/node/tasks/main.yml new file mode 100644 index 00000000..752373c8 --- /dev/null +++ b/ansible/roles/node/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: install node.js + become: true + nodejs: + version={{node_version|default('0.10.40')}} + +- name: upgrade npm + become: true + npm: + name=npm + version={{npm_version|default('2.15.3')}} + global=yes diff --git a/ansible/roles/node_service/meta/main.yml b/ansible/roles/node_service/meta/main.yml new file mode 100644 index 00000000..26ed4014 --- /dev/null +++ b/ansible/roles/node_service/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: node } diff --git a/ansible/roles/node_service/tasks/main.yml b/ansible/roles/node_service/tasks/main.yml new file mode 100644 index 00000000..8b454355 --- /dev/null +++ b/ansible/roles/node_service/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: remove node_modules + tags: deploy + when: remove_node_modules is defined + file: + path=/opt/runnable/{{ app_name }}/node_modules + state=absent + +- name: npm install {{ app_name }} + tags: deploy + npm: + path=/opt/runnable/{{ app_name }} + state=latest + production=yes + +- name: make override file + tags: deploy + become: true + lineinfile: + dest="/etc/init/{{ app_name }}.override" + line="manual" + create=yes + +- name: create new config file + tags: deploy + become: true + template: + src=upstart.conf + dest=/etc/init/{{ app_name }}.conf + backup=yes + +- name: restart service {{ app_name }} + tags: deploy + become: true + when: dock is not defined + service: + name={{ app_name }} + state=restarted + enabled=yes diff --git a/ansible/roles/node_service/templates/upstart.conf b/ansible/roles/node_service/templates/upstart.conf new file mode 100644 index 00000000..7fac67c5 --- /dev/null +++ b/ansible/roles/node_service/templates/upstart.conf @@ -0,0 +1,41 @@ +#!upstart +description "{{ app_name }}" +author "Anandkumar Patel" + +env NPM_BIN=/usr/local/bin/npm +env APP_DIR=/opt/runnable/{{ app_name }} +env LOG_FILE={{ app_log_dir }}/{{ app_name }}.log +env NODE_ENV={{ node_env }} + +{% if enviroment_vars is defined %} +{% for name, value in enviroment_vars.iteritems() %} +env {{ name }}={{ value }} +{% endfor %} +{% endif %} + +start on (local-filesystems and net-device-up IFACE=eth0) +stop on shutdown + +script + touch $LOG_FILE + chdir $APP_DIR + echo $$ > /var/run/{{ app_name }}.pid + exec $NPM_BIN start >> $LOG_FILE 2>&1 +end script + +pre-start script + # Date format same as (new Date()).toISOString() for consistency + echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Starting" >> $LOG_FILE +end script + +pre-stop script + rm /var/run/{{ app_name }}.pid + echo "[`date -u +%Y-%m-%dT%T.%3NZ`] (sys) Stopping" >> $LOG_FILE +end script + +post-start script + echo "===== App restarted =====" >> $LOG_FILE +end script + +respawn +respawn limit 5 1 # give up restart after 5 respawns in 1 seconds diff --git a/ansible/roles/notify/handlers/main.yml b/ansible/roles/notify/handlers/main.yml new file mode 100644 index 00000000..dd956a7a --- /dev/null +++ b/ansible/roles/notify/handlers/main.yml @@ -0,0 +1,51 @@ +--- +- name: send done message + tags: + - always + slack: + token={{ slack_token }} + channel={{ item }} + msg="{{ local_username.stdout }} is {{ notify_end_message }}" + with_items: + - "{{ slack_channels }}" + - "{{ extra_channels | default([]) }}" + changed_when: true + +- name: send rollbar message for deploy + when: rollbar_token is defined and node_env is defined and git_branch is defined + tags: + - rollbar + - always + rollbar_deployment: + token={{ rollbar_token }} + environment={{ node_env }} + revision={{ git_branch }} + rollbar_user={{ local_username.stdout }} + changed_when: true + +- name: send datadog message for deploy + when: datadog_api_key is defined and datadog_tags is defined and git_branch is defined + ignore_errors: true + tags: + - datadog + - always + datadog_event: + api_key={{ datadog_api_key }} + title="{{ name }} Deployment" + text="{{ local_username.stdout }} deployed {{ name }}@{{ git_branch }} to {{ node_env }}" + alert_type=success + tags="{{ datadog_tags }},host:{{ ansible_hostname }}" + changed_when: true + +- name: put deployed version in consul + delegate_to: "{{ groups['consul'][0] }}" + run_once: true + when: git_branch is defined and name is defined and consul_host_address is defined + tags: + - consul-environment + - always + uri: + method=PUT + url=http://{{ consul_host_address }}:8500/v1/kv/runnable/environment/{{ name }} + body="{{ git_branch }}" + changed_when: true diff --git a/ansible/roles/notify/tasks/main.yml b/ansible/roles/notify/tasks/main.yml new file mode 100644 index 00000000..9184b173 --- /dev/null +++ b/ansible/roles/notify/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: get the username running the deploy + tags: + - always + local_action: command whoami + register: local_username + delegate_to: 127.0.0.1 + +- name: send start message + run_once: yes + changed_when: True + tags: + - always + slack: + token={{ slack_token }} + channel={{ item }} + msg="{{ local_username.stdout }} is {{ notify_start_message }}" + with_items: + - "{{ slack_channels }}" + - "{{ extra_channels | default([]) }}" + notify: + - send done message + - send rollbar message for deploy + - send datadog message for deploy + - put deployed version in consul diff --git a/ansible/roles/notify/vars/main.yml b/ansible/roles/notify/vars/main.yml new file mode 100644 index 00000000..a544062c --- /dev/null +++ b/ansible/roles/notify/vars/main.yml @@ -0,0 +1,4 @@ +# message to say when starting deploy +notify_start_message: "starting to deploy {{ container_image | default(app_name) }} {{ container_tag | default(git_branch) }} to {{ ansible_hostname }}" +# message printed when deploy finished +notify_end_message: "finished deploying {{ container_image | default(app_name) }} {{ container_tag | default(git_branch) }} to {{ ansible_hostname }}" diff --git a/ansible/roles/package-aws/tasks/main.yml b/ansible/roles/package-aws/tasks/main.yml new file mode 100644 index 00000000..34ac6d50 --- /dev/null +++ b/ansible/roles/package-aws/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: Install ec2-metadata in /usr/local/bin + become: true + get_url: + url=http://s3.amazonaws.com/ec2metadata/ec2-metadata + dest=/usr/local/bin + mode=0755 + +- name: Download the zip file for ec2-api-tools + become: true + get_url: + url=http://s3.amazonaws.com/ec2-downloads/ec2-api-tools.zip + dest=/usr/local + mode=0755 + +- name: Unzip the ec2-api-tool + become: true + unarchive: + src: /usr/local/ec2-api-tools.zip + dest: /usr/local + copy: no + mode: 0755 + +- name: remove old ec2 dir/link + become: true + file: + path: /usr/local/ec2 + state: absent + +- name: Link to ec2 + become: true + file: + src=/usr/local/ec2-api-tools-1.7.5.1 + dest=/usr/local/ec2 + state=link diff --git a/ansible/roles/package-dock/tasks/main.yml b/ansible/roles/package-dock/tasks/main.yml new file mode 100644 index 00000000..880f453f --- /dev/null +++ b/ansible/roles/package-dock/tasks/main.yml @@ -0,0 +1,69 @@ +--- +- name: Install make + become: true + apt: + pkg=make + state=latest + update_cache=yes + cache_valid_time=604800 + +- name: Install unzip + become: true + apt: + pkg=unzip + state=latest + update_cache=yes + cache_valid_time=604800 + +- name: Install openjdk-7-jdk + become: true + apt: + pkg=openjdk-7-jdk + state=latest + update_cache=yes + cache_valid_time=604800 + +- name: Install jq + become: true + tags: "Install jq" + apt: + pkg=jq + state=latest + update_cache=yes + cache_valid_time=604800 + + +- name: Download Vault 041 + become: true + get_url: + url=https://releases.hashicorp.com/vault/0.4.1/vault_0.4.1_linux_amd64.zip + dest=/usr/local/bin + +- name: unzip vault_0.4.1_linux_amd64.zip + become: true + unarchive: + src: /usr/local/bin/vault_0.4.1_linux_amd64.zip + dest: /usr/local/bin + copy: no + mode: 0755 + +- name: Download Consul-Template + become: true + get_url: + url=https://releases.hashicorp.com/consul-template/0.11.1/consul-template_0.11.1_linux_amd64.zip + dest=/usr/local/bin + +- name: unzip + become: true + unarchive: + src: /usr/local/bin/consul-template_0.11.1_linux_amd64.zip + dest: /usr/local/bin + copy: no + mode: 0755 + +- name: Download weave 1.5.0 + become: true + get_url: + url=https://github.com/weaveworks/weave/releases/download/v1.5.0/weave + dest=/usr/local/bin + mode=0755 diff --git a/ansible/roles/package_ntp/defaults/main.yml b/ansible/roles/package_ntp/defaults/main.yml new file mode 100644 index 00000000..6cf338f4 --- /dev/null +++ b/ansible/roles/package_ntp/defaults/main.yml @@ -0,0 +1,6 @@ +--- +ntp_servers: +- 0.north-america.pool.ntp.org +- 1.north-america.pool.ntp.org +- 2.north-america.pool.ntp.org +- 3.north-america.pool.ntp.org diff --git a/ansible/roles/package_ntp/handlers/main.yml b/ansible/roles/package_ntp/handlers/main.yml new file mode 100644 index 00000000..067ffc8d --- /dev/null +++ b/ansible/roles/package_ntp/handlers/main.yml @@ -0,0 +1,7 @@ +--- +- name: Start and Enable ntp + become: true + service: + name=ntp + state=started + enabled=yes diff --git a/ansible/roles/package_ntp/tasks/main.yml b/ansible/roles/package_ntp/tasks/main.yml new file mode 100644 index 00000000..f9a2a526 --- /dev/null +++ b/ansible/roles/package_ntp/tasks/main.yml @@ -0,0 +1,18 @@ +--- +- name: Install ntp package + become: true + apt: + name=ntp + state=present + force=yes + update_cache=yes + cache_valid_time=604800 + +- name: Copy the ntp.conf template file + become: true + template: + src=ntp.conf.j2 + dest=/etc/ntp.conf + mode=0644 + notify: Start and Enable ntp + diff --git a/ansible/roles/package_ntp/templates/ntp.conf.j2 b/ansible/roles/package_ntp/templates/ntp.conf.j2 new file mode 100644 index 00000000..149c6c52 --- /dev/null +++ b/ansible/roles/package_ntp/templates/ntp.conf.j2 @@ -0,0 +1,19 @@ +# {{ ansible_managed }} + +driftfile /var/lib/ntp/ntp.drift + +statsdir /var/log/ntpstats/ +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + +restrict -4 default kdo notrap nomodify nopeer noquery +restrict -6 default kdo notrap nomodify nopeer noquery + +restrict 127.0.0.1 +restrict ::1 + +{% for server in ntp_servers %} +server {{ server }} iburst +{% endfor %} diff --git a/ansible/roles/prometheus-alerts/tasks/main.yml b/ansible/roles/prometheus-alerts/tasks/main.yml new file mode 100644 index 00000000..61de0de9 --- /dev/null +++ b/ansible/roles/prometheus-alerts/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: create prometheus folder + become: true + file: + path: "{{ prometheus_alert_root }}" + state: directory + +- name: copy prometheus config to server + tags: [ deploy ] + become: true + template: + src=prometheus-alerts.yml + dest={{ prometheus_alert_root }}/prometheus-alerts.yml + owner=ubuntu + group=ubuntu + mode=0700 diff --git a/ansible/roles/prometheus-alerts/templates/prometheus-alerts.yml b/ansible/roles/prometheus-alerts/templates/prometheus-alerts.yml new file mode 100644 index 00000000..cf998db2 --- /dev/null +++ b/ansible/roles/prometheus-alerts/templates/prometheus-alerts.yml @@ -0,0 +1,54 @@ +global: + # ResolveTimeout is the time after which an alert is declared resolved + # if it has not been updated. + resolve_timeout: 10m + + slack_api_url: "{{ ops_slack_channel_url }}" + + pagerduty_url: "https://events.pagerduty.com/generic/2010-04-15/create_event.json" + +# The root node of the routing tree. +route: + # A default receiver + receiver: slack + + continue: true + + routes: + - match_re: + reportTo: .*pagerduty.* + receiver: pagerduty + + - match_re: + reportTo: .*drake.* + receiver: drake + +# A list of notification receivers. +receivers: +- name: "pagerduty" + pagerduty_configs: + - send_resolved: true + service_key: "{{ pager_duty_key }}" + description: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + slack_configs: + - send_resolved: true + channel: ops + title: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.summary{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + text: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + +- name: "slack" + slack_configs: + - send_resolved: true + channel: ops + title: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.summary{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + text: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + +- name: "drake" + webhook_configs: + - send_resolved: true + url: "http://{{ drake_hostname }}/prometheus" + slack_configs: + - send_resolved: true + channel: ops + title: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.summary{{ '}}' }} {{ '{{' }} end {{ '}}' }}" + text: "{{ '{{' }} range .Alerts {{ '}}' }} {{ '{{' }} .Annotations.description{{ '}}' }} {{ '{{' }} end {{ '}}' }}" diff --git a/ansible/roles/prometheus/files/alerts.conf b/ansible/roles/prometheus/files/alerts.conf new file mode 100644 index 00000000..8dd02a0d --- /dev/null +++ b/ansible/roles/prometheus/files/alerts.conf @@ -0,0 +1,90 @@ +ALERT HookDockUnresponsive + IF up == 0 + FOR 10m + LABELS { + reportTo = "drake", + type = "unresponsive" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock unresponsive host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "(hook) Dock unresponsive host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}" + } + +ALERT DockUnresponsive + IF up == 0 + FOR 1h + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock unresponsive host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "Dock unresponsive host={{ $labels.hostIp }} org={{ $labels.githubOrgId }" + } + +ALERT HookDockDockerDiskFull + IF (node_filesystem_size{device="/dev/xvdb"} - node_filesystem_free{device="/dev/xvdb"}) / node_filesystem_size{device="/dev/xvdb"} * 100 > 70 + FOR 5m + LABELS { + reportTo = "drake", + type = "disk_filled" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock /docker disk 70% host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "(hook) Dock /docker disk 70% host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}" + } + +ALERT DockDockerDiskFull + IF (node_filesystem_size{device="/dev/xvdb"} - node_filesystem_free{device="/dev/xvdb"}) / node_filesystem_size{device="/dev/xvdb"} * 100 > 90 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock /docker disk 90% host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "Playbook here: https://github.com/CodeNow/devops-scripts/wiki/server-out-of-disk" + } + +ALERT DockRootDiskFull + IF (node_filesystem_size{device="/dev/xvda1"} - node_filesystem_free{device="/dev/xvda1"}) / node_filesystem_size{device="/dev/xvda1"} * 100 > 90 + FOR 5m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock root disk 90% host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "Playbook here: https://github.com/CodeNow/devops-scripts/wiki/server-out-of-disk" + } + +ALERT HookDockOutOfRam + IF (node_memory_MemFree + node_memory_Buffers + node_memory_Cached) < 150000000 + FOR 5m + LABELS { + reportTo = "drake", + type = "memory_exhausted" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock out of ram host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "(hook) Dock out of ram host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}" + } + +ALERT DockOutOfRam + IF (node_memory_MemFree + node_memory_Buffers + node_memory_Cached) < 130000000 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock out of ram host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "unhealthy dock {{ $labels.hostIp } using dock-cli and message slack #customer channel with org={{ $labels.githubOrgId }}" + } + +ALERT DockHighLoad + IF node_load15 > 90 + FOR 30m + LABELS { + reportTo = "pagerduty" + } + ANNOTATIONS { + summary = "({{ $labels.env }}) Dock is experiencing high load host={{ $labels.hostIp }} org={{ $labels.githubOrgId }}", + description = "ssh {{ $labels.hostIp }} into dock make sure it is responsive, if it is not, unhealthy. `docks unhealthy -e delta {{ $labels.hostIp }}`" + } diff --git a/ansible/roles/prometheus/tasks/main.yml b/ansible/roles/prometheus/tasks/main.yml new file mode 100644 index 00000000..1730e999 --- /dev/null +++ b/ansible/roles/prometheus/tasks/main.yml @@ -0,0 +1,26 @@ +--- +- name: create prometheus folder + become: true + file: + path: "{{ prometheus_root }}" + state: directory + +- name: copy prometheus config to server + tags: [ deploy ] + become: true + template: + src=prometheus.yml + dest={{ prometheus_root }}/prometheus.yml + owner=ubuntu + group=ubuntu + mode=0700 + +- name: copy alert config to server + tags: [ deploy ] + become: true + copy: + src=alerts.conf + dest={{ prometheus_root }}/alerts.conf + owner=ubuntu + group=ubuntu + mode=0700 diff --git a/ansible/roles/prometheus/templates/prometheus.yml b/ansible/roles/prometheus/templates/prometheus.yml new file mode 100644 index 00000000..84209776 --- /dev/null +++ b/ansible/roles/prometheus/templates/prometheus.yml @@ -0,0 +1,80 @@ +# my global config +global: + scrape_interval: 1m + evaluation_interval: 1m + +rule_files: +- "alerts.conf" + +scrape_configs: +# monitor self to get metrics for prometheus +- job_name: prometheus + metrics_path: /metrics + static_configs: + - targets: [ 'localhost:{{ prometheus_port }}' ] + + +# pulls server list from ec2 and drops all servers that are not in env or a dock +- job_name: container_info + scrape_interval: 30m + # keys to access this region and port of prom + ec2_sd_configs: + - region: us-west-2 + access_key: {{ prometheus_aws_access_key }} + secret_key: {{ prometheus_aws_secret_key }} + port: {{ cadvisor_port }} + + # drop all servers not in this env and not a dock + relabel_configs: + - source_labels: [__meta_ec2_tag_aws_autoscaling_groupName] + regex: {{ env }}-asg-dock-pool + action: drop + + - source_labels: [__meta_ec2_tag_env] + regex: {{ node_env }} + action: keep + + - source_labels: [__meta_ec2_tag_role] + regex: dock + action: keep + + - source_labels: [__meta_ec2_tag_org] + target_label: githubOrgId + + - source_labels: [__meta_ec2_private_ip] + target_label: hostIp + + - source_labels: [__meta_ec2_tag_env] + target_label: env + +# pulls server list from ec2 and drops all servers that are not production gamma or a dock +- job_name: server_info + # keys to access this region and port of prom + ec2_sd_configs: + - region: us-west-2 + access_key: {{ prometheus_aws_access_key }} + secret_key: {{ prometheus_aws_secret_key }} + port: {{ node_exporter_port }} + + # drop all servers not in this env and not a dock + relabel_configs: + - source_labels: [__meta_ec2_tag_aws_autoscaling_groupName] + regex: {{ env }}-asg-dock-pool + action: drop + + - source_labels: [__meta_ec2_tag_env] + regex: {{ node_env }} + action: keep + + - source_labels: [__meta_ec2_tag_role] + regex: dock + action: keep + + - source_labels: [__meta_ec2_tag_org] + target_label: githubOrgId + + - source_labels: [__meta_ec2_private_ip] + target_label: hostIp + + - source_labels: [__meta_ec2_tag_env] + target_label: env diff --git a/ansible/roles/rabbitmq/tasks/main.yml b/ansible/roles/rabbitmq/tasks/main.yml new file mode 100644 index 00000000..ee2473a3 --- /dev/null +++ b/ansible/roles/rabbitmq/tasks/main.yml @@ -0,0 +1,14 @@ +--- +# commands to build an image +- name: create build folder + become: true + file: + path="{{ config_path }}" + state=directory + +- name: copy dockerfile to build folder + become: true + template: + src="rabbitmq.config" + dest="{{ config_path }}" + tags: 'genConfig' diff --git a/ansible/roles/rabbitmq/templates/rabbitmq.config b/ansible/roles/rabbitmq/templates/rabbitmq.config new file mode 100644 index 00000000..a1802f4d --- /dev/null +++ b/ansible/roles/rabbitmq/templates/rabbitmq.config @@ -0,0 +1,36 @@ +%% -*- mode: erlang -*- +%% ---------------------------------------------------------------------------- +%% RabbitMQ Sample Configuration File. +%% +%% See http://www.rabbitmq.com/configure.html for details. +%% ---------------------------------------------------------------------------- +[ +{rabbit, + [ + %% + %% Default User / VHost + %% ==================== + %% + + %% On first start RabbitMQ will create a vhost and a user. These + %% config items control what gets created. See + %% http://www.rabbitmq.com/access-control.html for further + %% information about vhosts and access control. + %% + {default_vhost, <<"/">>}, + {default_user, <<"{{ rabbit_username }}">>}, + {default_pass, <<"{{ rabbit_password }}">>}, + {default_permissions, [<<".*">>, <<".*">>, <<".*">>]} + + ]}, + {rabbitmq_management, + [%% Pre-Load schema definitions from the following JSON file. See + %% http://www.rabbitmq.com/management.html#load-definitions + %% + %% {load_definitions, "/path/to/schema.json"}, + + %% Log all requests to the management HTTP API to a file. + %% + {http_log_dir, "{{ app_log_dir }}/access.log"} + ]} +]. diff --git a/ansible/roles/redis-tls/meta/main.yml b/ansible/roles/redis-tls/meta/main.yml new file mode 100644 index 00000000..d7bd3971 --- /dev/null +++ b/ansible/roles/redis-tls/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- { role: tls-server-cert } diff --git a/ansible/roles/redis-tls/tasks/main.yml b/ansible/roles/redis-tls/tasks/main.yml new file mode 100644 index 00000000..7f284252 --- /dev/null +++ b/ansible/roles/redis-tls/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: make folder for certificate + become: true + file: + dest: /opt/redis-tls + state: directory + +- name: create private key + become: true + copy: + dest: /opt/redis-tls/stunnel.pem + content: "{{ new_certs.data.private_key }}\n{{ new_certs.data.certificate }}\n" + owner: root + group: root + mode: 0400 diff --git a/ansible/roles/redis_key/README.md b/ansible/roles/redis_key/README.md new file mode 100644 index 00000000..d1c259d0 --- /dev/null +++ b/ansible/roles/redis_key/README.md @@ -0,0 +1,10 @@ +Role Name +======== + +Ansible Role to setup redis key + +Author Information +------------------ + +# anandkumarpatel +### # diff --git a/ansible/roles/redis_key/meta/main.yml b/ansible/roles/redis_key/meta/main.yml new file mode 100644 index 00000000..4a1f6483 --- /dev/null +++ b/ansible/roles/redis_key/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: docker } diff --git a/ansible/roles/redis_key/tasks/main.yml b/ansible/roles/redis_key/tasks/main.yml new file mode 100644 index 00000000..65819c6b --- /dev/null +++ b/ansible/roles/redis_key/tasks/main.yml @@ -0,0 +1,11 @@ +--- +- name: check keys + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} LLEN {{ redis_key }} + register: key_length + changed_when: key_length.stdout == "0" + +- name: create key if not exist + when: key_length.changed + become: yes + command: docker run --rm redis redis-cli -h {{ redis_host_address }} RPUSH {{ redis_key }} {{ name }} {{ name }} diff --git a/ansible/roles/runnable-domain-proxy/tasks/main.yml b/ansible/roles/runnable-domain-proxy/tasks/main.yml new file mode 100644 index 00000000..73dd73f4 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/tasks/main.yml @@ -0,0 +1,81 @@ +--- +- name: make sure cert directory is in place + tags: [ configure_proxy, certs ] + become: true + file: + dest: /etc/ssl/certs/{{ domain }} + state: directory + +- name: put certs in place + tags: [ configure_proxy, certs ] + become: true + register: add_certs + copy: + src: "{{ domain }}/{{ item }}" + dest: /etc/ssl/certs/{{ domain }}/{{ item }} + mode: 0400 + owner: root + group: root + with_items: + - ca.pem + - key.pem + - cert.pem + +- name: create chained cert + tags: [ configure_proxy, certs ] + become: true + when: add_certs.changed + shell: > + cat + /etc/ssl/certs/{{ domain }}/cert.pem + /etc/ssl/certs/{{ domain }}/ca.pem + > + /etc/ssl/certs/{{ domain }}/chained.pem + +- name: create dhparam.pem + tags: [ configure_proxy, certs ] + when: add_certs.changed + become: yes + command: openssl dhparam -out /etc/ssl/certs/{{ domain }}/dhparam.pem 2048 + +- name: make sure nginx directory is in place + tags: [ configure_proxy, configure_files ] + become: true + file: + dest: /etc/nginx + state: directory + +- name: put nginx configuration in place + tags: [ configure_proxy, configure_files ] + become: yes + template: + src: proxy-nginx.conf + dest: /etc/nginx/nginx.conf + +- name: assert nginx template directory + tags: [ configure_proxy, configure_files ] + become: yes + file: + state: directory + dest: /etc/nginx/template + +- name: assert nginx other-sites-enabled directory + tags: [ configure_proxy, configure_files ] + become: yes + file: + state: directory + dest: /etc/nginx/other-sites-enabled + +- name: put mixpanel template in place + tags: [ configure_proxy, configure_files ] + become: yes + template: + src: mixpanel.tmpl + dest: /etc/nginx/other-sites-enabled/mixpanel.conf + +- name: put api template in place + tags: [ configure_proxy, configure_files ] + become: yes + template: + src: api.tmpl + dest: /etc/nginx/template/api-{{ domain }}.tmpl diff --git a/ansible/roles/runnable-domain-proxy/templates/api.tmpl b/ansible/roles/runnable-domain-proxy/templates/api.tmpl new file mode 100644 index 00000000..ad28e399 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/api.tmpl @@ -0,0 +1,70 @@ +map $http_upgrade $connection_upgrade { + default upgrade; + '' close; +} + +upstream {{ proxy_service_name }} { + {{ '{{' }}range service "{{ proxy_service_name }}"{{ '}}' }} server {{ '{{' }}.Address{{ '}}' }}:{{ '{{' }}.Port{{ '}}' }} max_fails=0 fail_timeout=1s; + {{ '{{' }}end{{ '}}' }} +} + +server { + listen 80; + client_max_body_size 5g; + server_name {{ domain }}; + access_log /var/log/nginx/{{ proxy_service_name }}.access.log; + + location / { + proxy_pass http://{{ proxy_service_name }}; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + } +} + +server { + listen 443 ssl; + client_max_body_size 5g; + server_name {{ domain }}; + access_log /var/log/nginx/{{ proxy_service_name }}.ssl.access.log; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + ssl_trusted_certificate /etc/ssl/certs/{{ domain }}/ca.pem; + ssl_dhparam /etc/ssl/certs/{{ domain }}/dhparam.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + location / { + proxy_pass http://{{ proxy_service_name }}; + proxy_http_version 1.1; + proxy_set_header upgrade $http_upgrade; + proxy_set_header connection $connection_upgrade; + + + proxy_set_header Host $http_host; + proxy_set_header x-forwarded-host $http_host; + proxy_set_header x-real-ip $remote_addr; + proxy_set_header x-forwarded-for $proxy_add_x_forwarded_for; + proxy_set_header x-forwarded-protocol $scheme; + proxy_set_header x-forwarded-proto $scheme; + } +} diff --git a/ansible/roles/runnable-domain-proxy/templates/mixpanel.tmpl b/ansible/roles/runnable-domain-proxy/templates/mixpanel.tmpl new file mode 100644 index 00000000..016097c1 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/mixpanel.tmpl @@ -0,0 +1,19 @@ +server { + listen 80; + listen [::]:80; + server_name mixpanel.{{ domain }}; + return 301 https://$server_name$request_uri; +} + +server { + listen 443 ssl; + server_name mixpanel.{{ domain }}; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ domain }}/chained.pem; + ssl_certificate_key /etc/ssl/certs/{{ domain }}/key.pem; + + location / { + proxy_pass https://api.mixpanel.com; + } +} diff --git a/ansible/roles/runnable-domain-proxy/templates/proxy-nginx.conf b/ansible/roles/runnable-domain-proxy/templates/proxy-nginx.conf new file mode 100644 index 00000000..95db9226 --- /dev/null +++ b/ansible/roles/runnable-domain-proxy/templates/proxy-nginx.conf @@ -0,0 +1,29 @@ +user www-data; +worker_processes 4; +pid /run/nginx.pid; + +events { + worker_connections 5000; +} + +http { + ## + # Basic Settings + ## + tcp_nodelay on; + keepalive_timeout 65; + server_tokens off; + + ## + # Logging Settings + ## + + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + ## + # Virtual Host Configs + ## + include /etc/nginx/sites-enabled/*; + include /etc/nginx/other-sites-enabled/*; +} diff --git a/ansible/roles/service_node/tasks/main.yml b/ansible/roles/service_node/tasks/main.yml new file mode 100644 index 00000000..1172a255 --- /dev/null +++ b/ansible/roles/service_node/tasks/main.yml @@ -0,0 +1,97 @@ +--- +#- name: Install service-tier node and utilities +# become: true +# script: nodeInstall.sh +- name: "remove ax25-node" + become: yes + apt: + name=node + force=yes + purge=yes + state=absent + +- stat: path=/usr/bin/node + register: bin_node + +- stat: path=/usr/bin/sbin + register: sbin_node + +- name: "remove any linked node in /usr/bin" + become: yes + file: + path=/usr/bin/node + state=absent + when: bin_node.stat.islnk is defined and bin_node.stat.islnk + +- name: "remove any linked node in /usr/sbin" + become: yes + file: + path=/usr/sbin/node + state=absent + when: sbin_node.stat.islnk is defined and sbin_node.stat.islnk + +- name: "download node 4.x dpkg update script" + get_url: + url=https://deb.nodesource.com/setup_4.x + dest=/tmp/setup_4.x + +- name: "update package repo for node 4.x" + become: yes + command: /usr/bin/env bash /tmp/setup_4.x + +- name: "clean up node 4.x package repo updater" + file: + path=/tmp/setup_4.x + state=absent + +- name: "install node 4.x" + become: yes + apt: + name=nodejs + state=latest + +- name: "link nodejs to node" + become: yes + file: + src=/usr/bin/nodejs + dest=/usr/bin/node + owner=root + group=root + state=link + +- name: "install build-essential" + become: yes + apt: + name: build-essential + state: latest + +- name: "install jq" + become: yes + apt: + name=jq + state=latest + +- name: "download nvm installer" + get_url: + url=https://raw.githubusercontent.com/creationix/nvm/v0.31.0/install.sh + dest=/tmp/install.sh + +- name: "install nvm" + command: /usr/bin/env bash /tmp/install.sh + +- name: "clean up nvm installer" + file: + path=/tmp/install.sh + state=absent + +- name: "install bunyan globally" + become: yes + npm: + name=bunyan + global=yes + +- name: "install json globally" + become: yes + npm: + name=json + global=yes diff --git a/ansible/roles/ssh-keys/README.md b/ansible/roles/ssh-keys/README.md new file mode 100644 index 00000000..56049f55 --- /dev/null +++ b/ansible/roles/ssh-keys/README.md @@ -0,0 +1,3 @@ +# SSH-KEYS + +This is fun. In the `vars` file, you can add groups that the user is added to (use comma seperated values) diff --git a/ansible/roles/ssh-keys/tasks/main.yml b/ansible/roles/ssh-keys/tasks/main.yml new file mode 100644 index 00000000..cb39a7f5 --- /dev/null +++ b/ansible/roles/ssh-keys/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- include_vars: users.yml + +- name: ensure runnable admin group exists + become: true + group: + name={{ runnable_admin_group }} + state=present + +- name: ensure 'sudoers' has runnable admin group + become: true + lineinfile: " + dest=/etc/sudoers + regexp='^%{{ runnable_admin_group }}' + line='%{{ runnable_admin_group }} ALL=(ALL) NOPASSWD: ALL' + state=present + " + +- name: make users + become: true + with_dict: "{{ users }}" + user: + name={{ item.key }} + append=yes + groups={{ item.value.groups | default("") }} + shell=/bin/bash + state=present + +- name: "set user's keys" + become: true + with_dict: "{{ users }}" + authorized_key: + exclusive=yes + user={{ item.key }} + key="{{ item.value.pub_key }}" + +- name: remove users + become: true + with_dict: "{{ delete }}" + user: + name={{ item.key }} + state="absent" diff --git a/ansible/roles/ssh-keys/vars/users.yml b/ansible/roles/ssh-keys/vars/users.yml new file mode 100644 index 00000000..13230c3b --- /dev/null +++ b/ansible/roles/ssh-keys/vars/users.yml @@ -0,0 +1,24 @@ +--- +runnable_admin_group: runnadmin + +users: + bkendall: + name: Bryan Kendall + groups: runnadmin + pub_key: > + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC40ubXFxuA9VmslDPtVSlzjtPNDbq5hcun5Td9Znjt7Lyk8dw2DQrM4wbzbB8dqn+J9yWJGv3xOj/043tpam69rf77VxZOrzNBL3fe9S+1LdQ9c8VBHZDYvSrQKdmAPO61RheCA6P96fUW4MgxsmTovXbF4Bl9RMw1vLE5xJaagtiF3L5LU3SpMt56oPrkUKP9lglVLymjqvguFTJBEfqzBJflxx0mMjQ6dX2HXA8iUTI3vZ6Q3Ti2vask90qgK5fREckkx6IhDgVeoeD/IOJNPYWkm7CgSgvrzkBxKLNRl7MZx85b524LhjVjVOEyfEIg9jDsEejtwVanOY7gdQ/vKmxXxAGvQJKEXBwqBSFI4zZnfJQ6D5f95Isxz8PCIqtnJScMmIKmTo7vSFWGhEWT8vfon67Y5nzNuNLrnW9cQIIZ60jeG2NesPYnsTCMRCS0lxRhb1nJdW1xtoeS7mnc8SHAyFDpKKarE8WXsi1XAfUTaSTgZ9BuAxMynvxUgqN7Ru9DHJ05QNu/ZNAgtvLCy5lvGjk9+6wuXy2gU27fvt6XIdWC064y/fURpq/qSd+irSNJ3Nk37X2rOJuRN9HlscxUtfbJTnvtIo026MXCn65xOh+Vj4Dv1ZugJs0mLyIK2BBi5SSHMBkiUOi4MOqEfIND604pXnG/yWsK9c33Fw== bryan@init.me + + anand: + name: Anandkumar Patel + groups: runnadmin + pub_key: > + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCv3cxgdayF7qF8vuGUMt5rWIfaTd3sqOhaK82CIe/aTSlyIuXqGKhTcHWzttAvTlfLFC5qXBJBScZPhXYE7t0YCxaDrmQvuhJ40fu8ZpROEmY9T7WzgBx1uB0mmqPXCrX1oyuUF2n4YVcG5dF1Y3p9d/1ZFCcwKEh8Pi3H4LMRHaph72yNFJUit0Bp8b3sTvQQZ62g9ztCzL5tgaY0jc8vnXGssOGm/TuNMPaXydDVGSK1JDqw0xUhbofZ4/I3JFEhZ9mlPlhu4f1Ts+uX6DnXExgfOM/uN7XuaK6VJBggJRielRlGWxztkzT2owtJsJE9Ave5cQfifjCwYB3TRLQx anandkumarpatel@gmail.com + + kahn: + name: Ryan Kahn + groups: runnadmin + pub_key: > + ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCtaCZTriyt5xsjIfKMqwimK+IgtSe8AokOcMnaAMMZ6LB9zeaq42/oWe0J6GqpqBAJ2EPFQkmdT5qVKbf5ragGhpNkkFFLzziMibeDXauc9aIryox2KoZBv8E8g3zPmV7caOrrLqz7NR9Gf4P3xYv5drUNUWsrDaUD0E022PEJHwmTa86PCcHmguI56KyicNi2MRFa4J+E4yIHcIZ3fHMXtvh7DxQnmRyDuZFjGb+Xl00HbyPzSBychq92cN49S3XupEPH7xtEkmar2DlOZaW3wDXgQ2VD+2ax11hefV/XgfI/ToXCNnR9gf+F5sLOpCrqszM57HoA0GWAkiNwrjmD kahn@runnable.com + +delete: + casey: diff --git a/ansible/roles/tls-client-cert/meta/main.yml b/ansible/roles/tls-client-cert/meta/main.yml new file mode 100644 index 00000000..397baf22 --- /dev/null +++ b/ansible/roles/tls-client-cert/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: local-vault } + - { role: tls-server-ca } diff --git a/ansible/roles/tls-client-cert/tasks/main.yml b/ansible/roles/tls-client-cert/tasks/main.yml new file mode 100644 index 00000000..283dc01c --- /dev/null +++ b/ansible/roles/tls-client-cert/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: check for role for client + local_action: + command vault read --format=json {{ node_env }}/roles/{{ tls_service }}-client + ignore_errors: True + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_role + changed_when: raw_role.rc != 0 + +- name: create role for client + when: raw_role.rc != 0 + local_action: + command vault write --format=json {{ node_env }}/roles/{{ tls_service }}-client allow_any_name=true ttl=87600h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: create certificate for client + when: save_on_certs is undefined + local_action: + command vault write --format=json {{ node_env }}/issue/{{ tls_service }}-client common_name="{{ name }}.client.{{ tls_service }}.runnable" ttl=8760h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_new_certs + +- when: raw_new_certs is defined and save_on_certs is undefined + set_fact: + new_client_certs: "{{ raw_new_certs.stdout | from_json }}" + +- when: save_on_certs is defined + set_fact: + new_client_certs: + data: + certificate: fake certificate + issuing_ca: fake CA + private_key: fake private key diff --git a/ansible/roles/tls-client/meta/main.yml b/ansible/roles/tls-client/meta/main.yml new file mode 100644 index 00000000..c162aab8 --- /dev/null +++ b/ansible/roles/tls-client/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: tls-client-cert } diff --git a/ansible/roles/tls-client/tasks/main.yml b/ansible/roles/tls-client/tasks/main.yml new file mode 100644 index 00000000..4de959a0 --- /dev/null +++ b/ansible/roles/tls-client/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: make directory for client certificates + tags: [ tls_client ] + become: yes + file: + dest: /opt/ssl/{{ tls_service }}-client + state: directory + +- name: put client CA in place for service + tags: [ tls_client ] + become: yes + copy: + dest: /opt/ssl/{{ tls_service }}-client/ca.pem + content: "{{ new_client_certs.data.issuing_ca }}" + mode: 0400 + +- name: put client certificate in place for service + tags: [ tls_client ] + become: yes + copy: + dest: /opt/ssl/{{ tls_service }}-client/cert.pem + content: "{{ new_client_certs.data.certificate }}" + mode: 0400 + +- name: put client private key in place for service + tags: [ tls_client ] + become: yes + copy: + dest: /opt/ssl/{{ tls_service }}-client/key.pem + content: "{{ new_client_certs.data.private_key }}" + mode: 0400 diff --git a/ansible/roles/tls-server-ca/meta/main.yml b/ansible/roles/tls-server-ca/meta/main.yml new file mode 100644 index 00000000..cb0b2731 --- /dev/null +++ b/ansible/roles/tls-server-ca/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - { role: local-vault } diff --git a/ansible/roles/tls-server-ca/tasks/main.yml b/ansible/roles/tls-server-ca/tasks/main.yml new file mode 100644 index 00000000..28f6c7ea --- /dev/null +++ b/ansible/roles/tls-server-ca/tasks/main.yml @@ -0,0 +1,75 @@ +--- +- name: check for pki endpoint for environment + local_action: + command vault mounts + register: mounts + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + changed_when: "'{{ node_env }}' not in mounts.stdout" + +- name: mount endpoint for environment + when: mounts.changed + local_action: + command vault mount --path={{ node_env }} pki + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: tune endpoint for environment + when: mounts.changed + local_action: + command vault mount-tune --max-lease-ttl=87600h {{ node_env }} + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: read root CA certificate + local_action: + command vault read --format=json {{ node_env }}/cert/ca + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_ca + +- set_fact: + ca_data: "{{ raw_ca.stdout | from_json }}" + +- name: set ca variable + set_fact: + ca: "{{ ca_data.data.certificate }}" + changed_when: "ca_data.data.certificate == ''" + +- name: generate root CA certificate + when: ca == "" + local_action: + command vault write --format=json {{ node_env }}/root/generate/internal common_name="runnable" ttl="87600h" + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_new_ca + +- when: ca == "" + set_fact: + ca_data: "{{ raw_new_ca.stdout | from_json }}" + +- when: ca == "" + set_fact: + ca: "{{ ca_data.data.certificate }}" + +- name: make sure CA directory is in place + when: ca_dest is defined + become: true + file: + dest: "{{ ca_dest | dirname }}" + state: directory + +- name: put CA in place + when: ca_dest is defined + become: true + copy: + content: "{{ ca }}" + dest: "{{ ca_dest }}" + mode: 0400 + owner: root + group: root diff --git a/ansible/roles/tls-server-cert/meta/main.yml b/ansible/roles/tls-server-cert/meta/main.yml new file mode 100644 index 00000000..397baf22 --- /dev/null +++ b/ansible/roles/tls-server-cert/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: local-vault } + - { role: tls-server-ca } diff --git a/ansible/roles/tls-server-cert/tasks/main.yml b/ansible/roles/tls-server-cert/tasks/main.yml new file mode 100644 index 00000000..dd6e4c70 --- /dev/null +++ b/ansible/roles/tls-server-cert/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: check for role for server + run_once: true + local_action: + command vault read --format=json {{ node_env }}/roles/server-{{ name }} + ignore_errors: True + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_role + changed_when: raw_role.rc != 0 + +- name: create role + when: raw_role.rc != 0 + run_once: true + local_action: + command vault write --format=json {{ node_env }}/roles/server-{{ name }} allow_any_name=true ttl=87600h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + +- name: create certificate for server + when: save_on_certs is undefined + run_once: true + local_action: + command vault write --format=json {{ node_env }}/issue/server-{{ name }} common_name="server.{{ name }}.runnable" alt_names="{{ ansible_fqdn }}" ip_sans="{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" ttl=8760h + environment: + VAULT_ADDR: "{{ vault_addr }}" + VAULT_TOKEN: "{{ vault_root_token }}" + register: raw_new_certs + +- when: raw_new_certs is defined and save_on_certs is undefined + run_once: true + set_fact: + new_certs: "{{ raw_new_certs.stdout | from_json }}" + +- when: save_on_certs is defined + run_once: true + set_fact: + new_certs: + data: + certificate: fake certificate + issuing_ca: fake CA + private_key: fake private key diff --git a/ansible/roles/ulimits/files/limits.conf b/ansible/roles/ulimits/files/limits.conf new file mode 100644 index 00000000..a51d3dbb --- /dev/null +++ b/ansible/roles/ulimits/files/limits.conf @@ -0,0 +1,56 @@ +# /etc/security/limits.conf +# +#Each line describes a limit for a user in the form: +# +# +# +#Where: +# can be: +# - a user name +# - a group name, with @group syntax +# - the wildcard *, for default entry +# - the wildcard %, can be also used with %group syntax, +# for maxlogin limit +# - NOTE: group and wildcard limits are not applied to root. +# To apply a limit to the root user, must be +# the literal username root. +# +# can have the two values: +# - "soft" for enforcing the soft limits +# - "hard" for enforcing hard limits +# +# can be one of the following: +# - core - limits the core file size (KB) +# - data - max data size (KB) +# - fsize - maximum filesize (KB) +# - memlock - max locked-in-memory address space (KB) +# - nofile - max number of open files +# - rss - max resident set size (KB) +# - stack - max stack size (KB) +# - cpu - max CPU time (MIN) +# - nproc - max number of processes +# - as - address space limit (KB) +# - maxlogins - max number of logins for this user +# - maxsyslogins - max number of logins on the system +# - priority - the priority to run user process with +# - locks - max number of file locks the user can hold +# - sigpending - max number of pending signals +# - msgqueue - max memory used by POSIX message queues (bytes) +# - nice - max nice priority allowed to raise to values: [-20, 19] +# - rtprio - max realtime priority +# - chroot - change root to directory (Debian-specific) +# +# +# + +root soft nofile 1048576 +root hard nofile 1048576 +* soft nofile 1048576 +* hard nofile 1048576 +root soft nproc 1048576 +root hard nproc 1048576 +* soft nproc 1048576 +* hard nproc 1048576 + +# End of file + diff --git a/ansible/roles/ulimits/files/sysctl.conf b/ansible/roles/ulimits/files/sysctl.conf new file mode 100644 index 00000000..65541618 --- /dev/null +++ b/ansible/roles/ulimits/files/sysctl.conf @@ -0,0 +1,5 @@ +fs.file-max = 100000000 +fs.inotify.max_user_watches = 524288 +fs.inotify.max_queued_events = 65536 +fs.inotify.max_user_instances = 8192 +net.ipv4.ip_local_port_range = 64535 65535 diff --git a/ansible/roles/ulimits/handlers/main.yml b/ansible/roles/ulimits/handlers/main.yml new file mode 100644 index 00000000..d024f257 --- /dev/null +++ b/ansible/roles/ulimits/handlers/main.yml @@ -0,0 +1,9 @@ +- name: reboot server + become: true + shell: + reboot + +- name: load kernel parameters + become: true + shell: + sysctl -p diff --git a/ansible/roles/ulimits/tasks/main.yml b/ansible/roles/ulimits/tasks/main.yml new file mode 100644 index 00000000..22307ca4 --- /dev/null +++ b/ansible/roles/ulimits/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: update sysctl.conf + become: true + copy: + src=sysctl.conf + dest=/etc/sysctl.conf + notify: load kernel parameters + +- name: update limits.conf + become: true + copy: + src=limits.conf + dest=/etc/security/limits.conf + +- name: force reboot server + become: true + command: echo rebooting + when: force_restart is defined + notify: reboot server diff --git a/ansible/roles/user-content-pixel/tasks/main.yml b/ansible/roles/user-content-pixel/tasks/main.yml new file mode 100644 index 00000000..e0c92b69 --- /dev/null +++ b/ansible/roles/user-content-pixel/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: assert nginx config directory + tags: [ deploy ] + become: yes + file: + state: directory + dest: /etc/nginx + +- name: assert nginx sites-available directory + tags: [ deploy ] + become: yes + file: + state: directory + dest: /etc/nginx/sites-available + +- name: assert nginx sites-enable directory + tags: [ deploy ] + become: yes + file: + state: directory + dest: /etc/nginx/sites-enable + +- name: put configuration in place + tags: [ deploy ] + become: yes + template: + src: "{{ item }}" + dest: /etc/nginx/sites-available/{{ item }} + with_items: + - 90-user-content-pixel.conf + +- name: link configuration to enable + tags: [ deploy ] + become: yes + file: + state: link + dest: /etc/nginx/sites-enabled/{{ item }} + src: /etc/nginx/sites-available/{{ item }} + with_items: + - 90-user-content-pixel.conf + +- name: reload nginx + tags: [ deploy ] + become: yes + shell: > + docker ps | + awk '/nginx/{ print $1 }' | + xargs -n 1 docker kill --signal SIGHUP + args: + executable: /bin/bash diff --git a/ansible/roles/user-content-pixel/templates/90-user-content-pixel.conf b/ansible/roles/user-content-pixel/templates/90-user-content-pixel.conf new file mode 100644 index 00000000..864e9848 --- /dev/null +++ b/ansible/roles/user-content-pixel/templates/90-user-content-pixel.conf @@ -0,0 +1,40 @@ +server { + listen 80; + server_name blue.{{ user_content_domain }}; + location / { + return 404; + } +} + +server { + listen 443 ssl; + server_name blue.{{ user_content_domain }}; + gzip off; + + ssl on; + ssl_certificate /etc/ssl/certs/{{ user_content_domain }}/{{ user_content_domain }}.chained.crt; + ssl_certificate_key /etc/ssl/private/{{ user_content_domain }}.key; + ssl_trusted_certificate /etc/ssl/certs/{{ user_content_domain }}/ca.pem; + + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + ssl_dhparam /etc/nginx/ssl/dhparam.pem; + + ssl_stapling on; + ssl_stapling_verify on; + resolver 8.8.8.8 8.8.4.4 valid=300s; + resolver_timeout 5s; + + location = /pixel.gif { + add_header Set-Cookie "isModerating=1; Domain=.{{ user_content_domain }}; Path=/; HttpOnly;"; + empty_gif; + } + + location / { + return 404; + } +} diff --git a/ansible/roles/vault/meta/main.yml b/ansible/roles/vault/meta/main.yml new file mode 100644 index 00000000..89d64d2e --- /dev/null +++ b/ansible/roles/vault/meta/main.yml @@ -0,0 +1,4 @@ +--- +dependencies: + - { role: tls-server-cert } + - { role: tls-client-cert, tls_service: consul } diff --git a/ansible/roles/vault/tasks/main.yml b/ansible/roles/vault/tasks/main.yml new file mode 100644 index 00000000..df5a4825 --- /dev/null +++ b/ansible/roles/vault/tasks/main.yml @@ -0,0 +1,60 @@ +--- +- name: create configuration directory + become: yes + file: + path=/opt/runnable/vault + state=directory + recurse=yes + +- name: create consul client certificate directory + become: yes + file: + path=/opt/vault/client-consul + state=directory + recurse=yes + +- name: create vault server tls certificate directory + become: yes + file: + path=/opt/vault/server + state=directory + recurse=yes + +- name: copy vault config + tags: [ deploy ] + become: true + template: + src=vault.hcl + dest=/opt/runnable/vault/vault.hcl + +- name: put consul certificates in place + become: yes + copy: + content: "{{ item.content }}" + dest: /opt/vault/client-consul/{{ item.file }} + mode: 0400 + owner: root + group: root + with_items: + - file: ca.pem + content: "{{ new_client_certs.data.issuing_ca }}" + - file: cert.pem + content: "{{ new_client_certs.data.certificate }}" + - file: key.pem + content: "{{ new_client_certs.data.private_key }}" + +- name: put vault server certificates in place + become: yes + copy: + content: "{{ item.content }}" + dest: /opt/vault/server/{{ item.file }} + mode: 0400 + owner: root + group: root + with_items: + - file: ca.pem + content: "{{ new_certs.data.issuing_ca }}" + - file: cert.pem + content: "{{ new_certs.data.certificate }}" + - file: key.pem + content: "{{ new_certs.data.private_key }}" diff --git a/ansible/roles/vault/templates/vault.hcl b/ansible/roles/vault/templates/vault.hcl new file mode 100644 index 00000000..53da0bf2 --- /dev/null +++ b/ansible/roles/vault/templates/vault.hcl @@ -0,0 +1,23 @@ +backend "consul" { + advertise_addr = "http://{{ ansible_default_ipv4.address }}:8200" + address = "{{ ansible_default_ipv4.address }}:{{ consul_https_port }}" + scheme = "https" + path = "vault" + tls_ca_file = "/opt/vault/client-consul/ca.pem" + tls_cert_file = "/opt/vault/client-consul/cert.pem" + tls_key_file = "/opt/vault/client-consul/key.pem" +} + +listener "tcp" { + address = "0.0.0.0:8200" + tls_disable = 1 +} + +listener "tcp" { + address = "0.0.0.0:8201" + tls_ca_file = "/opt/vault/server/ca.pem" + tls_cert_file = "/opt/vault/server/cert.pem" + tls_key_file = "/opt/vault/server/key.pem" +} + +max_lease_ttl = "8760h" diff --git a/ansible/sauron.yml b/ansible/sauron.yml new file mode 100644 index 00000000..9b54179e --- /dev/null +++ b/ansible/sauron.yml @@ -0,0 +1,15 @@ +--- +- hosts: rabbitmq +- hosts: consul +- hosts: swarm-manager + +- hosts: sauron + vars_files: + - group_vars/alpha-sauron.yml + roles: + - role: notify + rollbar_token: "{{ sauron_rollbar_token }}" + tags: [ notify ] + - { role: docker_client } + - { role: builder, tags: [build] } + - { role: container_kill_start } diff --git a/ansible/sg_configure.yml b/ansible/sg_configure.yml new file mode 100644 index 00000000..bfc1a8e3 --- /dev/null +++ b/ansible/sg_configure.yml @@ -0,0 +1,8 @@ +--- +- hosts: ec2 + connection: local + vars_files: + - "group_vars/ec2_sg.yml" + roles: + - { role: notify, tags: "notify" } + - { role: ec2/sg_configure } diff --git a/ansible/sg_create.yml b/ansible/sg_create.yml new file mode 100644 index 00000000..51d6c2f3 --- /dev/null +++ b/ansible/sg_create.yml @@ -0,0 +1,6 @@ +--- +- hosts: ec2 + vars_files: + - "group_vars/ec2_sg.yml" + roles: + - { role: create_sg } diff --git a/ansible/shiva.yml b/ansible/shiva.yml new file mode 100644 index 00000000..7ecc6c7e --- /dev/null +++ b/ansible/shiva.yml @@ -0,0 +1,15 @@ +--- +- hosts: redis +- hosts: rabbitmq +- hosts: consul + +- hosts: shiva + vars_files: + - group_vars/alpha-shiva.yml + roles: + - role: notify + rollbar_token: "{{ shiva_rollbar_token }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: tls-server-ca, ca_dest: "{{ redis_ca_cert_path }}" } + - { role: container_kill_start } diff --git a/ansible/site.yml b/ansible/site.yml new file mode 100644 index 00000000..95eacc1d --- /dev/null +++ b/ansible/site.yml @@ -0,0 +1,20 @@ +--- +# deploy entire site + +# start with DB's +# - include: registry.yml +# redis +- include: redis.yml +# mongo +- include: mongo.yml + +# Now do app servers + +# docks +- include: docks.yml +# api +- include: api.yml +# web +- include: web.yml +# hipache +- include: hipache.yml diff --git a/ansible/socket-server-proxy.yml b/ansible/socket-server-proxy.yml new file mode 100644 index 00000000..9e10fc8a --- /dev/null +++ b/ansible/socket-server-proxy.yml @@ -0,0 +1,11 @@ +--- +- hosts: socket-server-proxy + vars_files: + - group_vars/alpha-proxy-socket-server.yml + roles: + - role: datadog + has_dd_integration: yes + + - role: runnable-domain-proxy + + - role: container_restart diff --git a/ansible/socket-server.yml b/ansible/socket-server.yml new file mode 100644 index 00000000..067ef6b8 --- /dev/null +++ b/ansible/socket-server.yml @@ -0,0 +1,30 @@ +--- +- hosts: mongodb +- hosts: navi +- hosts: charon +- hosts: rabbitmq +- hosts: redis +- hosts: swarm-manager +- hosts: consul +- hosts: big-poppa +- hosts: cream +- hosts: socket-server-proxy + +- hosts: socket-server + vars_files: + - group_vars/alpha-api-base.yml + - group_vars/alpha-socket-server.yml + roles: + - role: notify + rollbar_token: "{{ api_socket_server_rollbar_key }}" + tags: [ notify ] + - { role: redis_key, tags: [ setup, redis_key ] } + - { role: builder, tags: [ build ] } + - { role: docker_client } + - { role: tls-client, tls_service: mongodb, tags: [ tls ] } + - { role: datadog, tags: [ datadog ] } + - { role: container_start, number_of_containers: 8 } + - role: nginx-proxied-service + nginx_host: "{{ groups['socket-server-proxy'][0] }}" + target_ip_address: "{{ hostvars[groups['socket-server'][0]]['ansible_default_ipv4']['address'] }}" + templates: [ 01-socket-server.conf ] diff --git a/ansible/stack.yml b/ansible/stack.yml new file mode 100644 index 00000000..0220b254 --- /dev/null +++ b/ansible/stack.yml @@ -0,0 +1,37 @@ +## configure security group policy +- include: sg_configure.yml +# +## Install Datadog Agent +- include: datadog.yml +# +## begin with databases: +- include: consul.yml +- include: vault.yml +#- include: mongo-navi.yml +- include: rabbitmq.yml +- include: redis.yml +#- include: redis-slave.yml +- include: registry.yml +# +## base service tier +#- include: consul-values.yml +- include: detention.yml git_branch="{{ detention_branch }}" -t deploy +- include: eru.yml git_branch="{{ eru_branch }}" -t deploy +- include: hipache.yml +- include: khronos.yml git_branch="{{ khronos_branch }}" -t deploy +- include: link.yml git_branch="{{ link_branch }}" -t deploy +- include: metis.yml git_branch="{{ metis_branch }}" -t deploy +- include: optimus.yml git_branch="{{ optimus_branch }}" -t deploy +- include: palantiri.yml git_branch="{{ palantiri_branch }}" -t deploy +- include: sauron.yml git_branch="{{ sauron_branch }}" -t deploy +- include: shiva.yml git_branch="{{ astral_branch }}" -t deploy +- include: swarm-manager.yml +- include: web.yml git_branch="{{ angular_branch }}" -t deploy +# +## secondary services and configuration +- include: api.yml git_branch="{{ api_branch }}" -t deploy +- include: navi.yml git_branch="{{ navi_branch }}" -t deploy +- include: vault-values.yml -e write_values="true" +- include: consul-services.yml +- include: consul-values.yml -e write_values="true" +# run with: `--extra-vars "@current_versions.yml"` diff --git a/ansible/stage-hosts/docks.js b/ansible/stage-hosts/docks.js new file mode 100755 index 00000000..e4b2c7a5 --- /dev/null +++ b/ansible/stage-hosts/docks.js @@ -0,0 +1,75 @@ +#!/usr/bin/env node + +'use strict'; + +var aws = require('aws-sdk'); +var ec2 = new aws.EC2({ + accessKeyId: 'AKIAJ3RCYU6FCULAJP2Q', + secretAccessKey: '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', + region: 'us-west-2' +}); + +var params = { + Filters: [ + // Only fetch instances that are tagged as docks + { + Name: 'tag:role', + Values: ['dock'] + }, + // Only fetch running instances + { + Name: 'instance-state-name', + Values: ['running'] + }, + // Only fetch docks with the tag "env" equal to "staging" + { + Name: 'tag:env', + Values: ['staging'] + } + ] +}; + +ec2.describeInstances(params, function (err, data) { + if (err) { + console.error("An error occurred: ", err); + process.exit(1); + } + + // Get a set of instances from the describe response + var instances = []; + data.Reservations.forEach(function (res) { + res.Instances.forEach(function (instance) { + instances.push(instance); + }); + }); + + // Map the instances to their private ip addresses + // NOTE This will work locally because of the wilcard ssh proxy in the config + var hosts = instances.map(function (instance) { + return instance.PrivateIpAddress; + }); + + var hostVars = {}; + instances.forEach(function (instance) { + for (var i = 0; i < instance.Tags.length; i++) { + if (instance.Tags[i].Key === 'org') { + hostVars[instance.PrivateIpAddress] = { + host_tags: instance.Tags[i].Value + }; + } + } + }); + + // Output the resulting JSON + // NOTE http://docs.ansible.com/ansible/developing_inventory.html + console.log(JSON.stringify( + { + docks: { + hosts: hosts + }, + _meta : { + hostvars : hostVars + } + } + )); +}); diff --git a/ansible/stage-hosts/hosts b/ansible/stage-hosts/hosts new file mode 100644 index 00000000..cdfd5111 --- /dev/null +++ b/ansible/stage-hosts/hosts @@ -0,0 +1,22 @@ +[dock] + +[docks] + +[targets] +localhost ansible_connection=local bastion_name=alpha-bastion + +[redis] +delta-staging-data + +[vault] +delta-staging-data + +[consul] +delta-staging-data + +[stage:children] +dock +docks +redis +vault +consul \ No newline at end of file diff --git a/ansible/stage-hosts/variables b/ansible/stage-hosts/variables new file mode 100644 index 00000000..22de2e30 --- /dev/null +++ b/ansible/stage-hosts/variables @@ -0,0 +1,27 @@ +[stage:vars] +api_hostname=api-staging-codenow.runnableapp.com +api_socket_server_hostname=api-staging-codenow.runnableapp.com +aws_access_key_id=AKIAJ3RCYU6FCULAJP2Q +aws_secret_access_key=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 +consul_host_address=consul-server-staging-codenow.runnableapp.com +detention_hostname=detention-staging-codenow.runnableapp.com +docks_rollbar_key=d1af6567ed0f464fb1d676f38fd31751 +domain=runnable-angular-staging-codenow.runnableapp.com +eru_hostname=admin-staging-codenow.runnableapp.com +fluffy_hostname=fluffy-staging-codenow.runnableapp.com +node_env=staging +optimus_hostname=optimus-staging-codenow.runnableapp.com +rabbit_password=h7n972sPLs5tWGCWe6QPSCnaxDWjvnpEFEA9c9mBqauH3f22 +rabbit_username=5WE5wsEQmfw9sLsppJ4CEq9Auea8mPC4kgVt3xYqKxbYHPfP +registry_host=10.8.4.126 +user_content_domain=runnable3.net +vault_auth_token=db6bcf6c-eace-35b2-915a-46aeed706ad6 +vault_token_01=739b5afb7d5aa075153f750a4d0d7697f2b90ef610cc440627ab43ab60ad7bc601 +vault_token_02=7593c543320e82850d15827fb43ae7c553e6e480856a8bfaff658ae3dd75043602 +vault_token_03=e8f75fab3bc960784c7c802318207204e8f31b3bb9f1a615efd90371d05dc97803 + +[vault:vars] +vault_hello_runnable_github_token=88ddc423c2312d02a8bbcaad76dd4c374a30e4af +vault_aws_access_key_id=AKIAJ7R4UIM45KH2WGWQ +vault_aws_secret_key=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 +vault_aws_region=us-east-1 diff --git a/ansible/swarm-cloudwatch-reporter.yml b/ansible/swarm-cloudwatch-reporter.yml new file mode 100644 index 00000000..8d4d26a9 --- /dev/null +++ b/ansible/swarm-cloudwatch-reporter.yml @@ -0,0 +1,33 @@ +--- +- hosts: swarm-manager + vars_files: + - group_vars/alpha-swarm-manager-metrics.yml + roles: + - role: builder + tags: [ build ] + tasks: + - name: run container + tags: test_swarm_stats + when: test_swarm_stats is defined + become: yes + shell: >- + docker run + -e DRY_RUN=true + {{ container_run_opts }} + {{ container_image }}:{{ container_tag }} + {{ container_run_args }} + + - name: put script into cron + tags: [ deploy ] + become: yes + cron: + name: swarm-cloudwatch-reporter + cron_file: 10-swarm-cloudwatch + user: root + state: present + job: >- + docker run + --rm + {{ container_run_opts }} + {{ container_image }}:{{ container_tag }} + {{ container_run_args }} diff --git a/ansible/swarm-daemon.yml b/ansible/swarm-daemon.yml new file mode 100644 index 00000000..bc48e93f --- /dev/null +++ b/ansible/swarm-daemon.yml @@ -0,0 +1,10 @@ +--- +- hosts: consul + +- hosts: "{{ dock | default('docks') }}" + vars_files: + - "group_vars/alpha-swarm-daemon.yml" + roles: + - { role: notify, tags: "notify" } + - { role: container_kill_start } + - { role: consul_value, tags: [consul_value] } diff --git a/ansible/swarm-manager.yml b/ansible/swarm-manager.yml new file mode 100644 index 00000000..9dcca5ea --- /dev/null +++ b/ansible/swarm-manager.yml @@ -0,0 +1,10 @@ +--- +- hosts: consul + +- hosts: swarm-manager + vars_files: + - "group_vars/alpha-swarm-manager.yml" + roles: + - { role: notify, tags: "notify" } + - { role: docker_client, tags: "docker_client" } + - { role: container_kill_start } diff --git a/ansible/user-content-pixel.yml b/ansible/user-content-pixel.yml new file mode 100644 index 00000000..e77c7195 --- /dev/null +++ b/ansible/user-content-pixel.yml @@ -0,0 +1,4 @@ +--- +- hosts: socket-server-proxy + roles: + - { role: user-content-pixel } diff --git a/ansible/users.yml b/ansible/users.yml new file mode 100644 index 00000000..de825365 --- /dev/null +++ b/ansible/users.yml @@ -0,0 +1,4 @@ +--- +- hosts: all + roles: + - { role: ssh-keys, tags: ["ssh-keys"] } diff --git a/ansible/vault-values.yml b/ansible/vault-values.yml new file mode 100644 index 00000000..d856017e --- /dev/null +++ b/ansible/vault-values.yml @@ -0,0 +1,125 @@ +--- +- hosts: vault + vars_files: + - group_vars/alpha-vault.yml + tasks: + - name: make sure httplib2 is installed + run_once: true + become: true + apt: package=python-httplib2 state=present + + - name: put values into vault + run_once: true + when: write_values is defined + uri: + method=PUT + url=http://{{ ansible_default_ipv4.address }}:8200/v1/{{ item.key }} + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item.data | to_json }}' + status_code=200,204 + with_items: "{{ vault_seed_values }}" + + - name: check for aws backend in vault + run_once: true + when: write_values is defined + uri: + method=GET + url=http://{{ ansible_default_ipv4.address }}:8200/v1/sys/mounts + HEADER_X-Vault-Token="{{ vault_auth_token }}" + return_content=yes + register: mounts + + - name: mount 1h aws backend in vault + run_once: true + when: write_values is defined and mounts.json['aws_1h/'] is not defined + uri: + method=POST + follow_redirects=all + url=http://{{ ansible_default_ipv4.address }}:8200/v1/sys/mounts/aws_1h + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + with_items: + - type: "aws" + config: + default_lease_ttl: "3600s" # 1 hour, in seconds + max_lease_ttl: "3600s" # 1 hour, in seconds + + - name: mount 1yr aws backend in vault + run_once: true + when: write_values is defined and mounts.json['aws_1yr/'] is not defined + uri: + method=POST + follow_redirects=all + url=http://{{ ansible_default_ipv4.address }}:8200/v1/sys/mounts/aws_1yr + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + with_items: + - type: "aws" + config: + default_lease_ttl: "8760h" # 1 year, in hours + max_lease_ttl: "8760h" # 1 year, in hours + + - name: configure 1h aws root credentials + run_once: true + when: (write_values is defined and write_root_creds is defined) or (write_values is defined and mounts.json['aws_1h/'] is not defined) + uri: + method=POST + follow_redirects=all + url=http://{{ ansible_default_ipv4.address }}:8200/v1/aws_1h/config/root + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + register: creds + with_items: + - access_key: "{{ vault_aws_access_key_id }}" + secret_key: "{{ vault_aws_secret_key }}" + region: "{{ vault_aws_region }}" + + - name: configure 1yr aws root credentials + run_once: true + when: (write_values is defined and write_root_creds is defined) or (write_values is defined and mounts.json['aws_1yr/'] is not defined) + uri: + method=POST + follow_redirects=all + url=http://{{ ansible_default_ipv4.address }}:8200/v1/aws_1yr/config/root + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + status_code=204 + register: creds + with_items: + - access_key: "{{ vault_aws_access_key_id }}" + secret_key: "{{ vault_aws_secret_key }}" + region: "{{ vault_aws_region }}" + + - name: check for the dock-init role + run_once: true + when: write_values is defined + uri: + method=GET + follow_redirects=all + url=http://{{ ansible_default_ipv4.address }}:8200/v1/aws_1h/roles/dock-init + HEADER_X-Vault-Token="{{ vault_auth_token }}" + status_code=200,404 + register: role + + - name: write the dock-init role + run_once: true + when: write_values is defined and role.status == 404 + uri: + method=POST + follow_redirects=all + url=http://{{ ansible_default_ipv4.address }}:8200/v1/aws_1h/roles/dock-init + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json | replace("\\\\", "") }}' + status_code=204 + register: creds + with_items: + - policy: "{{ vault_seed_policy }}" diff --git a/ansible/vault.yml b/ansible/vault.yml new file mode 100644 index 00000000..fe9b3357 --- /dev/null +++ b/ansible/vault.yml @@ -0,0 +1,33 @@ +--- +- hosts: vault + vars_files: + - group_vars/alpha-vault.yml + roles: + - { role: notify, tags: notify } + - { role: database } + - { role: vault } + - { role: container_kill_start } + + tasks: + - name: get seal status + tags: [ deploy ] + uri: + method=GET + url=http://{{ ansible_default_ipv4.address }}:8200/v1/sys/seal-status + HEADER_X-Vault-Token="{{ vault_auth_token }}" + return_content=yes + register: seal_status + + - name: unseal vault + tags: [ deploy ] + when: seal_status.json.sealed + uri: + method=PUT + url=http://{{ ansible_default_ipv4.address }}:8200/v1/sys/unseal + HEADER_X-Vault-Token="{{ vault_auth_token }}" + body_format=json + body='{{ item | to_json }}' + with_items: + - key: "{{ vault_token_01 }}" + - key: "{{ vault_token_02 }}" + - key: "{{ vault_token_03 }}" diff --git a/ansible/web.yml b/ansible/web.yml new file mode 100644 index 00000000..f6ec07b3 --- /dev/null +++ b/ansible/web.yml @@ -0,0 +1,11 @@ +--- +- hosts: consul + +- hosts: web + vars_files: + - group_vars/alpha-web.yml + roles: + - role: notify + rollbar_token: "{{ rollbar_web_token }}" + tags: [ notify ] + - { role: builder, tags: "build" } diff --git a/ansible/workers.yml b/ansible/workers.yml new file mode 100644 index 00000000..1b3e203b --- /dev/null +++ b/ansible/workers.yml @@ -0,0 +1,24 @@ +--- +- hosts: mongodb +- hosts: navi +- hosts: charon +- hosts: rabbitmq +- hosts: redis +- hosts: swarm-manager +- hosts: big-poppa +- hosts: cream +- hosts: consul + +- hosts: worker + vars_files: + - group_vars/alpha-api-base.yml + - group_vars/alpha-workers.yml + roles: + - role: notify + rollbar_token: "{{ api_workers_rollbar_key }}" + tags: [ notify ] + - { role: builder, tags: [ build ] } + - { role: docker_client } + - { role: tls-client, tls_service: mongodb, tags: [ tls ] } + - { role: datadog, tags: [ datadog ] } + - { role: container_start, number_of_containers: 4 } diff --git a/deployer/README.md b/deployer/README.md new file mode 100644 index 00000000..14945be0 --- /dev/null +++ b/deployer/README.md @@ -0,0 +1,10 @@ +# Deployer +![Deployer](https://cloud.githubusercontent.com/assets/2194285/21335997/6f51847c-c617-11e6-999d-4db7794d6be0.jpg) + +## Purpose +Deployer is the application that is in charge of deploying code here at runnable. + + +## How it works +Deployer is just a runnable wrapper around ansible. It takes jobs from `deploy.requested` exchange and converts them into ansible playbook commands. + diff --git a/deployer/configs/.env b/deployer/configs/.env new file mode 100644 index 00000000..10217616 --- /dev/null +++ b/deployer/configs/.env @@ -0,0 +1,8 @@ +ANSIBLE_BIN=ansible-playbook +ANSIBLE_DIR=/ansible +APP_NAME=deployer +LOG_LEVEL=trace +SECRET_DIR=/root/.ssh + +# ponos vars +WORKER_PREFETCH=1 diff --git a/deployer/configs/.env.test b/deployer/configs/.env.test new file mode 100644 index 00000000..cbfecf86 --- /dev/null +++ b/deployer/configs/.env.test @@ -0,0 +1,2 @@ +ANSIBLE_BIN=./ansible-playbook-mock +ANSIBLE_DIR=./test/fixtures/ diff --git a/deployer/index.js b/deployer/index.js new file mode 100644 index 00000000..81ff7370 --- /dev/null +++ b/deployer/index.js @@ -0,0 +1,14 @@ +'use strict' +require('loadenv')() + +const log = require('./logger') +const server = require('./worker-server') + +server.start() +.then(() => { + log.trace('server started') +}) +.catch((err) => { + log.error({ err }, 'server error:') + throw err +}) diff --git a/deployer/logger.js b/deployer/logger.js new file mode 100644 index 00000000..4d1a998d --- /dev/null +++ b/deployer/logger.js @@ -0,0 +1,37 @@ +'use strict' +require('loadenv')() +const bunyan = require('bunyan') +const cls = require('continuation-local-storage') + +const serializers = { + tx: () => { + let out + try { + out = { + tid: cls.getNamespace('ponos').get('tid') + } + } catch (e) { + // cant do anything here + } + return out + } +} + +const logger = bunyan.createLogger({ + name: process.env.APP_NAME, + streams: [ + { + level: process.env.LOG_LEVEL, + stream: process.stdout + } + ], + serializers: module.exports.serializers, + src: true, + branch: process.env._VERSION_GIT_COMMIT, + commit: process.env._VERSION_GIT_BRANCH, + environment: process.env.NODE_ENV +}) + +module.exports = logger.child({ tx: true }) + +module.exports.serializers = serializers diff --git a/deployer/package.json b/deployer/package.json new file mode 100644 index 00000000..589c8f29 --- /dev/null +++ b/deployer/package.json @@ -0,0 +1,34 @@ +{ + "name": "deployer", + "version": "2.1.0", + "description": "deploy things", + "main": "index.js", + "scripts": { + "start": "NODE_PATH=./lib node index.js", + "test": "NODE_PATH=./lib lab -v -c ./test" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/CodeNow/devops-scripts.git" + }, + "author": "Anandkumar Patel", + "license": "MIT", + "bugs": { + "url": "https://github.com/CodeNow/devops-scripts/issues" + }, + "homepage": "https://github.com/CodeNow/devops-scripts#readme", + "dependencies": { + "101": "^1.6.2", + "bluebird": "^3.4.6", + "bunyan": "^1.8.5", + "continuation-local-storage": "^3.2.0", + "error-cat": "^3.0.0", + "joi": "^10.0.5", + "loadenv": "^2.2.0", + "ponos": "^5.7.1" + }, + "devDependencies": { + "code": "^4.0.0", + "lab": "^11.2.1" + } +} diff --git a/deployer/test/fixtures/ansible-playbook-mock b/deployer/test/fixtures/ansible-playbook-mock new file mode 100755 index 00000000..18054649 --- /dev/null +++ b/deployer/test/fixtures/ansible-playbook-mock @@ -0,0 +1,2 @@ +echo $* +echo $* > ./ansibleMockArgs diff --git a/deployer/test/integration/deploy.js b/deployer/test/integration/deploy.js new file mode 100644 index 00000000..9e15af44 --- /dev/null +++ b/deployer/test/integration/deploy.js @@ -0,0 +1,53 @@ +'use strict' +const Code = require('code') +const Lab = require('lab') +const Promise = require('bluebird') +const Publisher = require('ponos/lib/rabbitmq') + +const lab = exports.lab = Lab.script() +const app = require('../../index') +const workerServer = require('../../worker-server') + +const after = lab.after +const afterEach = lab.afterEach +const before = lab.before +const beforeEach = lab.beforeEach +const describe = lab.describe +const expect = Code.expect +const it = lab.it + +const publisher = new Publisher({ + name: process.env.APP_NAME, + hostname: process.env.RABBITMQ_HOSTNAME, + port: process.env.RABBITMQ_PORT, + username: process.env.RABBITMQ_USERNAME, + password: process.env.RABBITMQ_PASSWORD, + events: ['deploy.requested'] +}) + +describe('deploy test', () => { + beforeEach((done) => { + publisher.connect() + .then(() => { + return app.start() + }) + .asCallback(done) + }) + + afterEach((done) => { + publisher.disconnect() + .then(() => { + return workerServer.disconnect() + }).asCallback(done) + done() + }) + + it('should run deploy', (done) => { + publisher.publishEvent('deploy.requested', { + version: 'master', + env: 'gamma', + service: 'deployer' + }) + done() + }) +}) diff --git a/deployer/worker-server.js b/deployer/worker-server.js new file mode 100644 index 00000000..92bbc3a2 --- /dev/null +++ b/deployer/worker-server.js @@ -0,0 +1,22 @@ +'use strict' +require('loadenv')() +const Ponos = require('ponos') +const log = require('./logger') + +module.exports = new Ponos.Server({ + name: process.env.APP_NAME, + enableErrorEvents: true, + log: log, + rabbitmq: { + channel: { + prefetch: process.env.WORKER_PREFETCH + }, + hostname: process.env.RABBITMQ_HOSTNAME, + port: process.env.RABBITMQ_PORT, + username: process.env.RABBITMQ_USERNAME, + password: process.env.RABBITMQ_PASSWORD + }, + events: { + 'deploy.requested': require('./workers/deploy.requested') + } +}) diff --git a/deployer/workers/deploy.requested.js b/deployer/workers/deploy.requested.js new file mode 100644 index 00000000..0fc38948 --- /dev/null +++ b/deployer/workers/deploy.requested.js @@ -0,0 +1,62 @@ +'use strict' +require('loadenv')() +const joi = require('joi') +const Promise = require('bluebird') +const spawn = require('child_process').spawn + +const logger = require('../logger') +const ansibleRoot = process.env.ANSIBLE_DIR +const secretRoot = process.env.SECRET_DIR + +module.exports.jobSchema = joi.object({ + version: joi.string().required(), + env: joi.string().required(), + service: joi.string().required() +}).required() + +module.exports.task = (job) => { + const log = logger.child({ job, worker: 'deploy.requested' }) + + return Promise.fromCallback((cb) => { + const version = job.version + const env = job.env + const service = job.service + + const commandArgs = [ + '-i', `${env}-hosts`, + `--vault-password-file=${secretRoot}/vault-pass`, + '-e', `git_branch=${version}`, + '-t', 'deploy', + `${service}.yml` + ] + + const spawnOpts = { + cwd: ansibleRoot + } + + log.trace({ + commandArgs, + spawnOpts + }, `about to call ${process.env.ANSIBLE_BIN}`) + + const cmd = spawn(process.env.ANSIBLE_BIN, commandArgs, spawnOpts) + + cmd.stdout.on('data', (data) => { + log.trace({ type: 'stdout' }, data.toString()) + }) + + cmd.stderr.on('data', (data) => { + log.error({ type: 'stderr' }, data.toString()) + }) + + cmd.on('close', (code) => { + log.trace(`ansible-playbook exited with code ${code}`) + cb() + }) + + cmd.on('error', (err) => { + log.error({ err }, 'Failed to start ansible-playbook process.') + cb(err) + }) + }) +} diff --git a/graph/.gitignore b/graph/.gitignore new file mode 100644 index 00000000..e653629b --- /dev/null +++ b/graph/.gitignore @@ -0,0 +1 @@ +graph.png diff --git a/graph/README.md b/graph/README.md new file mode 100644 index 00000000..661bdc24 --- /dev/null +++ b/graph/README.md @@ -0,0 +1,13 @@ +# Runnable Graph + +## Tools + +```bash +brew install graphviz +``` + +### Making Graph + +```bash +cat runnable.dot | dot -Tpng > graph.png && open graph.png +``` diff --git a/graph/runnable.dot b/graph/runnable.dot new file mode 100644 index 00000000..06b794e7 --- /dev/null +++ b/graph/runnable.dot @@ -0,0 +1,60 @@ +digraph { + # describe the groups + subgraph cluster_0 { + label = "Data Layer" + mongodb + rabbitmq + redis + neo4j + } + subgraph cluster_1 { + label = "App Services" + api + api_socket_server + api_workers + detention + drake + drake_workers + eru + github_varnish + pheidi + shiva + } + subgraph cluster_4 { + label = "Dock Services" + docker_listener + khronos + mavis + optimus + palantiri + sauron + } + subgraph cluster_2 { + label = "Proxy Layer" + nginx + hipache + } + subgraph cluster_3 { + label = "Customer Clusters" + docker + } + + # here's the actual interactions + rankdir=LR + api -> mongodb, redis, neo4j, rabbitmq, github_varnish + api_socket_server -> mongodb, redis, docker, github_varnish + api_workers -> mongodb, redis, neo4j, rabbitmq, docker, github_varnish + drake -> rabbitmq + drake_workers -> api + eru -> rabbitmq + docker_listener -> docker + khronos -> mongodb, docker, rabbitmq + mavis -> docker + palantiri -> docker + sauron -> docker + hipache -> drake, api, detention, optimus, mavis + nginx -> api_socket_server, eru + rabbitmq -> api_workers, drake_workers, pheidi, shiva, khronos, palantiri, eru + docker -> docker_listener + the_internet -> nginx, hipache +} diff --git a/lambda/asg-events/index.js b/lambda/asg-events/index.js new file mode 100644 index 00000000..3fb399fb --- /dev/null +++ b/lambda/asg-events/index.js @@ -0,0 +1,53 @@ +'use strict' +const https = require('https') + +exports.handler = function (event, context) { + let message + try { + message = JSON.parse(event.Records[0].Sns.Message) + } catch (e) { + message = { Description: event.Records[0].Sns.Message } + } + + const postData = { + channel: '#aws-notifications', + username: 'AWS', + text: '*' + event.Records[0].Sns.Subject + '*', + icon_emoji: ':cloud:' + } + + let slackMessage = [ + message.Description, + '', + message.Cause + ].join('\n') + + if (!message.Description && !message.Cause) { + slackMessage = 'test notification :partyparrot:' + } + + postData.attachments = [{ + color: 'good', + text: slackMessage + }] + + const options = { + method: 'POST', + hostname: 'hooks.slack.com', + port: 443, + path: '/services/T029DEC10/B141U5BHT/UaAwljeWydJaiW6RmD09C4Wx' + } + + const req = https.request(options, (res) => { + res.setEncoding('utf8') + res.on('data', () => {}) + res.on('end', () => { context.done(null) }) + }) + + req.on('error', (e) => { + console.error('problem with request: ' + e.message) + }) + + req.write(JSON.stringify(postData)) + req.end() +} diff --git a/lambda/asg-events/package.json b/lambda/asg-events/package.json new file mode 100644 index 00000000..722a7ec8 --- /dev/null +++ b/lambda/asg-events/package.json @@ -0,0 +1,31 @@ +{ + "name": "asg-event-handler", + "version": "1.0.0", + "description": "AWS Lambda Handler for ASG Events.", + "main": "index.js", + "directories": { + "test": "test" + }, + "scripts": { + "test": "standard && $npm_package_options_testCommand", + "testonly": "$npm_package_options_testCommand" + }, + "options": { + "testCommand": "ava --serial --fail-fast --timeout 5000 --verbose" + }, + "repository": { + "type": "git", + "url": "git+https://github.com/CodeNow/devops-scripts.git" + }, + "author": "Bryan Kendall ", + "license": "UNLICENSED", + "bugs": { + "url": "https://github.com/CodeNow/devops-scripts/issues" + }, + "homepage": "https://github.com/CodeNow/devops-scripts#readme", + "devDependencies": { + "ava": "^0.15.2", + "sinon": "^1.17.4", + "standard": "^7.1.2" + } +} diff --git a/lambda/asg-events/test/fixtures/new-ec2-instances.json b/lambda/asg-events/test/fixtures/new-ec2-instances.json new file mode 100644 index 00000000..c1a26d20 --- /dev/null +++ b/lambda/asg-events/test/fixtures/new-ec2-instances.json @@ -0,0 +1,22 @@ +{ + "Records": [ + { + "EventSource": "aws:sns", + "EventVersion": "1.0", + "EventSubscriptionArn": "arn:aws:sns:us-west-2:437258487404:asg-events-production-delta:2467eda4-9382-441d-b00f-4baec59c583a", + "Sns": { + "Type": "Notification", + "MessageId": "302ece4d-4377-52fc-87da-81b353f6530d", + "TopicArn": "arn:aws:sns:us-west-2:437258487404:asg-events-production-delta", + "Subject": "Auto Scaling: launch for group \"asg-production-delta-2513635\"", + "Message": "{\"Progress\":50,\"AccountId\":\"437258487404\",\"Description\":\"Launching a new EC2 instance: i-8844d555\",\"RequestId\":\"9d2dc323-d5e1-42a3-9945-515767249fa7\",\"EndTime\":\"2016-07-11T21:23:13.840Z\",\"AutoScalingGroupARN\":\"arn:aws:autoscaling:us-west-2:437258487404:autoScalingGroup:a1b93c0d-4566-420a-a753-4cb9df982cff:autoScalingGroupName/asg-production-delta-2513635\",\"ActivityId\":\"9d2dc323-d5e1-42a3-9945-515767249fa7\",\"StartTime\":\"2016-07-11T21:22:07.785Z\",\"Service\":\"AWS Auto Scaling\",\"Time\":\"2016-07-11T21:23:13.840Z\",\"EC2InstanceId\":\"i-8844d555\",\"StatusCode\":\"InProgress\",\"StatusMessage\":\"\",\"Details\":{\"Subnet ID\":\"subnet-3343206a\",\"Availability Zone\":\"us-west-2c\",\"InvokingAlarms\":[{\"Trigger\":{\"MetricName\":\"Swarm Reserved Memory Maximum Available\",\"ComparisonOperator\":\"LessThanThreshold\",\"Statistic\":\"AVERAGE\",\"Dimensions\":[{\"name\":\"AutoScalingGroupName\",\"value\":\"asg-production-delta-2513635\"}],\"Period\":300,\"EvaluationPeriods\":1,\"Unit\":null,\"Namespace\":\"Runnable/Swarm\",\"Threshold\":2},\"AlarmName\":\"asg-production-delta-2513635-max-available-memory\",\"AlarmDescription\":null,\"AWSAccountId\":\"437258487404\",\"OldStateValue\":\"OK\",\"NewStateReason\":\"Threshold Crossed: 1 datapoint (1.7499999999999996) was less than the threshold (2.0).\",\"Region\":\"US West - Oregon\",\"NewStateValue\":\"ALARM\",\"StateChangeTime\":1468272105152}]},\"AutoScalingGroupName\":\"asg-production-delta-2513635\",\"Cause\":\"At 2016-07-11T21:21:45Z a monitor alarm asg-production-delta-2513635-max-available-memory in state ALARM triggered policy scale-out changing the desired capacity from 11 to 12. At 2016-07-11T21:22:06Z an instance was started in response to a difference between desired and actual capacity, increasing the capacity from 11 to 12.\",\"Event\":\"autoscaling:EC2_INSTANCE_LAUNCH\"}", + "Timestamp": "2016-07-11T21:23:13.907Z", + "SignatureVersion": "1", + "Signature": "RBO7jTwwXwNHyW2x4O39FN2Drz7FwvbmfdbUsm+OFFjGXITAkwGms6OFMVYsaGClTXbeycfFcCCCjsSZPeY6o2jZSrfeteYPHUcLs/xSyz1FCaUJN99qxwGYqRVdP/O3wXi65NwRqOTHSK9Ptm9SjRGzCnJfwwLQIjQimmaVaUsVMIGbkVgLPlCJ59qALXXeEJ5zNVVrym3E4b/LsO8D+WEmLySsJQ/92UTBX7ZwQUb/aoEVnqFqgCrCaQQpxztyURjbjl1C4BkXAuC1bHqfJ3czXv/WbuwjLbhvUNK//eutXBaqN+k7aO/VU3wwWQ8Tv5TPQ8qvDaiAbnWUrgUVaA==", + "SigningCertUrl": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-bb750dd426d95ee9390147a5624348ee.pem", + "UnsubscribeUrl": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:437258487404:asg-events-production-delta:2467eda4-9382-441d-b00f-4baec59c583a", + "MessageAttributes": {} + } + } + ] +} diff --git a/lambda/asg-events/test/fixtures/test-notification.json b/lambda/asg-events/test/fixtures/test-notification.json new file mode 100644 index 00000000..7dfcaacb --- /dev/null +++ b/lambda/asg-events/test/fixtures/test-notification.json @@ -0,0 +1,22 @@ +{ + "Records": [ + { + "EventSource": "aws:sns", + "EventVersion": "1.0", + "EventSubscriptionArn": "arn:aws:sns:us-west-2:437258487404:asg-events-production-delta:2467eda4-9382-441d-b00f-4baec59c583a", + "Sns": { + "Type": "Notification", + "MessageId": "ab6d7f62-fd75-5025-bfe8-309cfbd0f8eb", + "TopicArn": "arn:aws:sns:us-west-2:437258487404:asg-events-production-delta", + "Subject": "Auto Scaling: test notification for group \"asg-production-delta-2685575\"", + "Message": "{\"AccountId\":\"437258487404\",\"RequestId\":\"cc86deb5-47a7-11e6-b529-354c7d92db8f\",\"AutoScalingGroupARN\":\"arn:aws:autoscaling:us-west-2:437258487404:autoScalingGroup:886e6b80-b398-4857-9fe8-16d45b429e2b:autoScalingGroupName/asg-production-delta-2685575\",\"AutoScalingGroupName\":\"asg-production-delta-2685575\",\"Service\":\"AWS Auto Scaling\",\"Event\":\"autoscaling:TEST_NOTIFICATION\",\"Time\":\"2016-07-11T20:41:09.815Z\"}", + "Timestamp": "2016-07-11T20:41:09.950Z", + "SignatureVersion": "1", + "Signature": "V/grO2ol3DgM17q87x+ltyp6C2U5EfEpTeeKI4iwCXsUTfVvARkuRRnAuHTcsAZ4cl2mXQEZXVfJIdXfM6jAYx7RVpQQ+Yn5FpuFR8aBcZmDZWCJxNMVGX/HubQXHI9M33/e2sLPZAD8Bok38o8PyuTS77Wfy//jJMtaJ7KsIToHNxrNERFv1zPiURQFQ3xUA9bo87aaU/8MXha91UWglsyJWxZRKdIc/v2dcRQnPHfOFxAbi5wThOHFoNHmrJupaimKwfjf/QVv9kEfVaz8/vQb6Uz/RxLZPxlgfeOTm8HckyizzZsdmYWANk2rD1xlVHZZAU6TNuXoL9by7yiYWQ==", + "SigningCertUrl": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-bb750dd426d95ee9390147a5624348ee.pem", + "UnsubscribeUrl": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:437258487404:asg-events-production-delta:2467eda4-9382-441d-b00f-4baec59c583a", + "MessageAttributes": {} + } + } + ] +} diff --git a/lambda/asg-events/test/index.js b/lambda/asg-events/test/index.js new file mode 100644 index 00000000..d35ef6a5 --- /dev/null +++ b/lambda/asg-events/test/index.js @@ -0,0 +1,110 @@ +const https = require('https') +const EventEmitter = require('events') + +class MockRequest extends EventEmitter { + constructor () { + super() + this._written_data = [] + } + + write (data) { + this._written_data.push(data) + } + + end () { + this._ended = true + } +} + +class MockResponse extends EventEmitter { + setEncoding (encoding) { + this.encoding = encoding + } +} + +const test = require('ava') +const sinon = require('sinon') +const NEW_EC2_INSTANCE = require('./fixtures/new-ec2-instances.json') +const TEST_NOTIFICATION = require('./fixtures/test-notification.json') + +const handler = require('../').handler + +test.before((t) => { + sinon.stub(https, 'request') +}) + +test.beforeEach((t) => { + https.request.reset() +}) + +test.cb('it should call done in the context', (t) => { + const request = new MockRequest() + const response = new MockResponse() + https.request.returns(request) + https.request.yieldsAsync(response) + sinon.stub(request, 'end', () => { + setTimeout(() => { response.emit('end') }) + }) + handler(NEW_EC2_INSTANCE, { + done: () => { + t.pass() + t.end() + } + }) +}) + +test.cb('it should post to slack', (t) => { + const request = new MockRequest() + const response = new MockResponse() + https.request.returns(request) + https.request.yieldsAsync(response) + sinon.stub(request, 'end', () => { + setTimeout(() => { response.emit('end') }) + }) + handler(NEW_EC2_INSTANCE, { done: () => { + sinon.assert.calledOnce(https.request) + sinon.assert.calledWithExactly( + https.request, + { + method: 'POST', + hostname: 'hooks.slack.com', + port: 443, + path: sinon.match(/\/services\/[^\/]+\/[^\/]+\/[^\/]+/) + }, + sinon.match.func + ) + t.end() + } }) +}) + +test.cb('it should send the description and cause', (t) => { + const request = new MockRequest() + const response = new MockResponse() + https.request.returns(request) + https.request.yieldsAsync(response) + sinon.stub(request, 'end', () => { + setTimeout(() => { response.emit('end') }) + }) + handler(NEW_EC2_INSTANCE, { done: () => { + t.is(request._written_data.length, 1, 'one write') + const s = JSON.parse(request._written_data.pop()) + t.regex(s.attachments[0].text, /Launching a new EC2 instance/) + t.end() + } }) +}) + +test.cb('it should send a test message', (t) => { + const request = new MockRequest() + const response = new MockResponse() + https.request.returns(request) + https.request.yieldsAsync(response) + sinon.stub(request, 'end', () => { + setTimeout(() => { response.emit('end') }) + }) + handler(TEST_NOTIFICATION, { done: () => { + t.is(request._written_data.length, 1, 'one write') + const s = JSON.parse(request._written_data.pop()) + t.regex(s.attachments[0].text, /test notification :partyparrot:/) + t.end() + } }) +}) diff --git a/ssh/config b/ssh/config new file mode 100644 index 00000000..ce3455b2 --- /dev/null +++ b/ssh/config @@ -0,0 +1,215 @@ +Host gamma* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + Identityfile ~/.ssh/gamma.pem + +Host delta* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + Identityfile ~/.ssh/delta.pem + + +################################################################################ +# utility +################################################################################ +Host migration-router + HostName 52.24.117.16 + User ubuntu + ForwardAgent yes + IdentityFile ~/.ssh/oregon.pem + +################################################################################ +# DELTA staging runnable.io +################################################################################ + +Host delta-staging-data + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.6.59 22 + +################################################################################ +# Gamma +################################################################################ + +Host gamma-bastion + HostName 54.69.34.161 + Port 60709 + +Host 10.4.*.* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + ProxyCommand ssh -o StrictHostKeyChecking=no -q ubuntu@gamma-bastion nc %h %p + IdentityFile ~/.ssh/gamma.pem + +## Gamma Ingress +Host gamma-ingress + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.0.236 22 + +## Gamma Userland Hipache +Host gamma-userland + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.2.208 22 + +## Gamma App Hipache +Host gamma-hipache + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.0.165 22 + +## Gamma API +Host gamma-api + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.14.3 22 + +Host gamma-api-worker + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.9.84 22 + +Host gamma-api-socket + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.14.4 22 + +Host gamma-api-socket-proxy + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.0.69 22 + +Host gamma-consul-a + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.5.144 22 + +Host gamma-consul-b + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.11.220 22 + +Host gamma-consul-c + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.13.196 22 + +## Gamma MongoDB +Host gamma-mongo-a + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.7.56 22 + +Host gamma-mongo-b + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.8.241 22 + +Host gamma-mongo-c + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.13.116 22 + +## Gamma Rabbit +Host gamma-rabbit + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.19.17 22 + +## Gamma Redis +Host gamma-redis + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.6.45 22 + +Host gamma-redis-slave + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.10.84 22 + +## Gamma Neo4J +Host gamma-neo4j + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.4.210 22 + +# Gamma App Services +Host gamma-app-services + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.13.234 22 + +# Gamma Dock Services +Host gamma-dock-services + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.6.251 22 + +# Gamma Navi +Host gamma-navi + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.7.108 22 + +# Gamma Registry +Host gamma-registry + ProxyCommand ssh -q ubuntu@gamma-bastion nc 10.4.4.82 22 + +################################################################################ +# Delta +################################################################################ + +Host delta-bastion + HostName 52.37.51.230 + Port 60506 + +Host 10.8.*.* + User ubuntu + ForwardAgent yes + StrictHostKeyChecking no + ProxyCommand ssh -o StrictHostKeyChecking=no -q ubuntu@delta-bastion nc %h %p + IdentityFile ~/.ssh/delta.pem + +## Delta API +Host delta-api + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.25 22 + +Host delta-api-worker + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.24 22 + +Host delta-api-socket + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.23 22 + +Host delta-api-socket-proxy + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.0.46 22 + +Host delta-app-services + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.13.102 22 + +Host delta-dock-services + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.5.63 22 + +Host delta-consul-a + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.6.122 22 + +Host delta-consul-b + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.8.130 22 + +Host delta-consul-c + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.86 22 + +Host delta-ingress + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.0.219 22 + +Host delta-metabase + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.14.193 22 + +Host delta-registry + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.126 22 + +Host delta-redis + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.9 22 + +Host delta-redis-slave + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.9.0 22 + +Host delta-rabbit + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.195 22 + +Host delta-neo4j + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.7.191 22 + +Host delta-hipache + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.0.135 22 + +Host delta-userland + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.0.244 22 + +Host delta-mongo-a + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.216 22 + +Host delta-mongo-b + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.10.254 22 + +Host delta-mongo-c + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.13.89 22 + +Host delta-prometheus + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.5.172 22 + +Host delta-navi + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.6.41 22 + +Host delta-swarm-manager + ProxyCommand ssh -q ubuntu@delta-bastion nc 10.8.4.40 22 + +################################################################################ +# other +################################################################################ +Host 127.0.0.1 + StrictHostKeyChecking no + UserKnownHostsFile /dev/null + User core + LogLevel QUIET diff --git a/terraform/Makefile b/terraform/Makefile new file mode 100644 index 00000000..bbdf3b42 --- /dev/null +++ b/terraform/Makefile @@ -0,0 +1,58 @@ +.DEFAULT_GOAL := help +.PHONY: help plan apply deps + +TF_SOURCE := sandbox +BUILD_DIR := .build +BUILD_TARGET := sandbox.tf +CREDENTIALS_FILE := credentials.tfvars + +help: + @echo "Builds Runnable AWS infrastructures with Terraform" + @echo "" + @echo "Environment Variables:" + @echo " TERRAFORM_ENVIRONMENT - Name of the environment for which to apply changes" + @echo "" + @echo "Targets:" + @echo " apply Commits the plan and builds the infrastructure" + @echo " deps Ensures system requirements are met to run Terraform" + @echo " env Display the working environment (TERRAFORM_ENVIRONMENT)" + @echo " help Displays this message" + @echo " plan Builds a new Terraform plan" + @echo "" + @echo "For an indepth guide see: https://github.com/codenow/devops-scripts README" + +env: + @echo "Working environment: ${TERRAFORM_ENVIRONMENT}" + +deps: + @hash terraform > /dev/null 2>&1 || \ + (echo "Terraform not installed (try: brew install terraform)"; exit 1) + @test -n "$(TERRAFORM_ENVIRONMENT)" || \ + (echo "Variable TERRAFORM_ENVIRONMENT is missing"; exit 1) + @test -e "${CREDENTIALS_FILE}" || \ + (echo "Cannot find credentials variables, ask someone for '${CREDENTIALS_FILE}'") + +compile: + @echo "Compiling .tf files from sandbox/" + @mkdir -p .build + @find ${TF_SOURCE} \ + | grep -E '${TF_SOURCE}/.*[.]tf' \ + | xargs cat > ${BUILD_DIR}/${BUILD_TARGET} + +apply: compile deps + terraform apply \ + -var-file="${CREDENTIALS_FILE}" \ + -var-file="environment/${TERRAFORM_ENVIRONMENT}.tfvars" \ + ${BUILD_DIR}/ + +destroy: compile deps + terraform destroy \ + -var-file="${CREDENTIALS_FILE}" \ + -var-file="environment/${TERRAFORM_ENVIRONMENT}.tfvars" \ + ${BUILD_DIR}/ + +plan: compile deps + terraform plan \ + -var-file="${CREDENTIALS_FILE}" \ + -var-file="environment/${TERRAFORM_ENVIRONMENT}.tfvars" \ + ${BUILD_DIR}/ diff --git a/terraform/README.md b/terraform/README.md new file mode 100644 index 00000000..9cb98411 --- /dev/null +++ b/terraform/README.md @@ -0,0 +1,24 @@ +# devops-scripts/terraform + +## Overview +The `terraform/` directory in `devops-scripts` defines our sandboxes infrastructure +as code via [Hashicorp's Terraform Tool](https://terraform.io). To begin let's look +at the overall directory structure: + +- `Makefile` - Makefile used to build, destroy, and mutate the sandboxes AWS infrastructure. +- `environment/` - Holds variable definitions for each of the environments we maintain. +- `sandbox/` - Contains the Terraform files (`*.tf`) that describe the sandboxes infrastructure. + +Take a moment to familiarize yourself with the layout of the files listed above. + +## How to develop + +1. Set the `TERRAFORM_ENVIRONMENT` environment variable to `zeta` in your shell. +2. Get a copy of the `credentials.tfvars` file from @rsandor. +3. Make changes to the `*.tf` files that describe your infrastructure change. +4. Run `make plan` from the `terraform/` directory +5. Once satisfied with the resulting diff, run `make apply` to apply changes. + +**Note:** currently only members of the devops team are allowed to have credentials +that can mutate the entire infrastructure. For the foreseeable future no execeptions +will be made. diff --git a/terraform/environment/zeta.tfvars b/terraform/environment/zeta.tfvars new file mode 100644 index 00000000..a50d725f --- /dev/null +++ b/terraform/environment/zeta.tfvars @@ -0,0 +1,32 @@ +/** + * Variable definitions for the Runnable sandboxes `zeta` environment. This + * environment will be used to migrate from our old "by hand" infrastructure to + * management by terraform. + */ + +environment = "zeta" +key_name = "zeta" +provider.region = "us-west-2" + +/** + * VPC resource configuration. + */ +vpc.cidr_block = "10.248.0.0/16" + +/** + * Subnet configuration. + */ +public-subnet.cidr_block_a = "10.248.0.0/24" +public-subnet.cidr_block_b = "10.248.1.0/24" +public-subnet.cidr_block_c = "10.248.2.0/24" +public-subnet.cidr_block_reserved = "10.248.3.0/24" + +private-subnet.cidr_block_a = "10.248.4.0/24" +private-subnet.cidr_block_b = "10.248.5.0/24" +private-subnet.cidr_block_c = "10.248.6.0/24" +private-subnet.cidr_block_reserved = "10.248.7.0/24" + +/** + * Instance level configuration. + */ +bastion.ssh_port = "22" diff --git a/terraform/sandbox/instance/bastion.tf b/terraform/sandbox/instance/bastion.tf new file mode 100644 index 00000000..03049e1c --- /dev/null +++ b/terraform/sandbox/instance/bastion.tf @@ -0,0 +1,19 @@ +/** + * Bastion server for the VPC. The bastion server allows those with credentials + * (e.g. a signed pem file) to SSH through it and into the private subnet. + */ +resource "aws_instance" "bastion" { + tags { + Name = "bastion" + Environment = "${var.environment}" + } + + ami = "${var.bastion.ami}" + associate_public_ip_address = true + instance_type = "${var.bastion.instance_type}" + key_name = "${var.key_name}" + security_groups = [ + "${aws_security_group.bastion.id}" + ] + subnet_id = "${aws_subnet.public-a.id}" +} diff --git a/terraform/sandbox/instance/private-example.tf b/terraform/sandbox/instance/private-example.tf new file mode 100644 index 00000000..b7974d17 --- /dev/null +++ b/terraform/sandbox/instance/private-example.tf @@ -0,0 +1,19 @@ +/** + * Private example server. This is to test whether or not bastion is working + * correctly in the vpc. + */ +resource "aws_instance" "private-example" { + tags { + Name = "private-example" + Environment = "${var.environment}" + } + + ami = "ami-9abea4fb" + associate_public_ip_address = false + instance_type = "t2.micro" + key_name = "${var.key_name}" + security_groups = [ + "${aws_security_group.private-example.id}" + ] + subnet_id = "${aws_subnet.private-a.id}" +} diff --git a/terraform/sandbox/provider.tf b/terraform/sandbox/provider.tf new file mode 100644 index 00000000..e5813105 --- /dev/null +++ b/terraform/sandbox/provider.tf @@ -0,0 +1,10 @@ +/* + * Default provider for all Runnable AWS resources. + * @see https://www.terraform.io/docs/providers/aws/index.html + * @author Ryan Sandor Richards + */ +provider "aws" { + region = "us-west-2" + access_key = "${var.provider.access_key}" + secret_key = "${var.provider.secret_key}" +} diff --git a/terraform/sandbox/security_group/bastion.tf b/terraform/sandbox/security_group/bastion.tf new file mode 100644 index 00000000..de1a1f6a --- /dev/null +++ b/terraform/sandbox/security_group/bastion.tf @@ -0,0 +1,38 @@ +/** + * Bastion security group. This allows trusted external sources to talk to + * machines within the packer VPCs private subnet. + */ +resource "aws_security_group" "bastion" { + tags { + Name = "bastion" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" + name = "bastion" + description = "Ingress/Egress rules for the VPC bastion server" +} + +/** + * Allows inbound SSH connections from the internet to the bastion server. + */ +resource "aws_security_group_rule" "bastion-ingress-ssh" { + type = "ingress" + security_group_id = "${aws_security_group.bastion.id}" + from_port = "${var.bastion.ssh_port}" + to_port = "${var.bastion.ssh_port}" + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] +} + +/** + * Allows all outbound traffic from the bastion server. + */ +resource "aws_security_group_rule" "bastion-egress-all" { + type = "egress" + security_group_id = "${aws_security_group.bastion.id}" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] +} diff --git a/terraform/sandbox/security_group/private-example.tf b/terraform/sandbox/security_group/private-example.tf new file mode 100644 index 00000000..acda9b28 --- /dev/null +++ b/terraform/sandbox/security_group/private-example.tf @@ -0,0 +1,38 @@ +/** + * Security group for the private-example server. + */ +resource "aws_security_group" "private-example" { + tags { + Name = "private-example" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" + name = "private-example" + description = "Example private subnet security group" +} + +/** + * Allows SSH (port 22) traffic inbound to private-example security group via + * the bastion security group. + */ +resource "aws_security_group_rule" "private-example-inbound-bastion" { + type = "ingress" + security_group_id = "${aws_security_group.private-example.id}" + source_security_group_id = "${aws_security_group.bastion.id}" + from_port = 22 + to_port = 22 + protocol = "tcp" +} + +/** + * Allows all outbound on the private-example security group. + */ +resource "aws_security_group_rule" "private-example-outbound-all" { + type = "egress" + security_group_id = "${aws_security_group.private-example.id}" + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] +} diff --git a/terraform/sandbox/subnet/private-a.tf b/terraform/sandbox/subnet/private-a.tf new file mode 100644 index 00000000..811e20f4 --- /dev/null +++ b/terraform/sandbox/subnet/private-a.tf @@ -0,0 +1,43 @@ +/** + * Private subnet for the VPC bound to the first availability zone. + */ +resource "aws_subnet" "private-a" { + tags { + Name = "private-subnet-a" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" + cidr_block = "${var.private-subnet.cidr_block_a}" + availability_zone = "${var.provider.region}a" + map_public_ip_on_launch = false +} + +/** + * Route table for the sanbox private-a subnet. + */ +resource "aws_route_table" "private-a" { + tags { + Name = "private-subnet-a-route-table" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" +} + +/** + * Routes all outbound traffic from the private-a subnet to the NAT. + */ +resource "aws_route" "private-a-to-nat" { + route_table_id = "${aws_route_table.private-a.id}" + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = "${aws_nat_gateway.nat.id}" +} + +/** + * Maps the route table to the private-a subnet. + */ +resource "aws_route_table_association" "private-a" { + subnet_id = "${aws_subnet.private-a.id}" + route_table_id = "${aws_route_table.private-a.id}" +} diff --git a/terraform/sandbox/subnet/public-a.tf b/terraform/sandbox/subnet/public-a.tf new file mode 100644 index 00000000..406ada80 --- /dev/null +++ b/terraform/sandbox/subnet/public-a.tf @@ -0,0 +1,43 @@ +/** + * Public subnet for the VPC bound to the first availability zone. + */ +resource "aws_subnet" "public-a" { + tags { + Name = "public-subnet-a" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" + cidr_block = "${var.public-subnet.cidr_block_a}" + availability_zone = "${var.provider.region}a" + map_public_ip_on_launch = false +} + +/** + * Route table for the sanbox public-a subnet. + */ +resource "aws_route_table" "public-a" { + tags { + Name = "public-subnet-a-route-table" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" +} + +/** + * Routes all traffic from the public-a subnet through the internet gateway. + */ +resource "aws_route" "public-a-to-internet-gateway" { + route_table_id = "${aws_route_table.public-a.id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.gateway.id}" +} + +/** + * Maps the route table to the public-a subnet. + */ +resource "aws_route_table_association" "public-a" { + subnet_id = "${aws_subnet.public-a.id}" + route_table_id = "${aws_route_table.public-a.id}" +} diff --git a/terraform/sandbox/variables.tf b/terraform/sandbox/variables.tf new file mode 100644 index 00000000..36c6b2dd --- /dev/null +++ b/terraform/sandbox/variables.tf @@ -0,0 +1,77 @@ +/** + * Defines the environment for which to build the infrastructure. This allows + * us to change the basic environment labels for all resources in AWS to easily + * differentiate between VPCs in oregon (us-west-2) + */ +variable "environment" { + type = "string" + default = "undefined" +} + +/** + * AWS key name to use for instances in the VPC. + */ +variable "key_name" { + type = "string" + default = "undefined" +} + +/** + * Details for the AWS provider. Includes region, access keys, etc. + */ +variable "provider" { + type = "map" + default = { + access_key = "undefined" + secret_key = "undefined" + region = "undefined" + } +} + +/** + * VPC specific configuration. + */ +variable "vpc" { + type = "map" + default = { + cidr_block = "10.undefined.0.0/16" + } +} + +/** + * Defines options for changing details about public subnets in the VPC. + */ +variable "public-subnet" { + type = "map" + default = { + cidr_block_a = "10.undefined.0.0/24" + cidr_block_b = "10.undefined.1.0/24" + cidr_block_c = "10.undefined.2.0/24" + cidr_block_reserved = "10.undefined.3.0/24" + } +} + +/** + * Defines options for changing details about private subnets in the VPC. + */ +variable "private-subnet" { + type = "map" + default = { + cidr_block_a = "10.undefined.0.0/24" + cidr_block_b = "10.undefined.1.0/24" + cidr_block_c = "10.undefined.2.0/24" + cidr_block_reserved = "10.undefined.3.0/24" + } +} + +/** + * Bastion options for the VPC. + */ +variable "bastion" { + type = "map" + default = { + ami = "ami-9abea4fb" # Ubuntu Server 14.04 LTS (HVM), SSD Volume Type + instance_type = "t2.micro" + ssh_port = -1 + } +} diff --git a/terraform/sandbox/vpc/gateway.tf b/terraform/sandbox/vpc/gateway.tf new file mode 100644 index 00000000..4e65b9cf --- /dev/null +++ b/terraform/sandbox/vpc/gateway.tf @@ -0,0 +1,11 @@ +/** + * Internet gateway for the sanbox infrastructure. + */ +resource "aws_internet_gateway" "gateway" { + tags { + Name = "gateway" + Environment = "${var.environment}" + } + + vpc_id = "${aws_vpc.sandbox.id}" +} diff --git a/terraform/sandbox/vpc/nat.tf b/terraform/sandbox/vpc/nat.tf new file mode 100644 index 00000000..7e73998c --- /dev/null +++ b/terraform/sandbox/vpc/nat.tf @@ -0,0 +1,16 @@ +/** + * NAT for the VPC. Allows private subnet instances to talk to the internet + * through the vpc gateway. + */ +resource "aws_nat_gateway" "nat" { + allocation_id = "${aws_eip.nat.id}" + subnet_id = "${aws_subnet.public-a.id}" + depends_on = ["aws_internet_gateway.gateway"] +} + +/** + * Elastic IP for the NAT. + */ +resource "aws_eip" "nat" { + vpc = true +} diff --git a/terraform/sandbox/vpc/vpc.tf b/terraform/sandbox/vpc/vpc.tf new file mode 100644 index 00000000..cd22ead0 --- /dev/null +++ b/terraform/sandbox/vpc/vpc.tf @@ -0,0 +1,11 @@ +/* + * VPC Resource for the sandbox product infrastructure. + */ +resource "aws_vpc" "sandbox" { + tags { + Name = "vpc" + Environment = "${var.environment}" + } + + cidr_block = "${var.vpc.cidr_block}" +} diff --git a/wiki/dailynews.png b/wiki/dailynews.png new file mode 100644 index 00000000..dba25023 Binary files /dev/null and b/wiki/dailynews.png differ diff --git a/wiki/rollbar-01-overview.png b/wiki/rollbar-01-overview.png new file mode 100644 index 00000000..e2d4fc2f Binary files /dev/null and b/wiki/rollbar-01-overview.png differ diff --git a/wiki/rollbar-02-projects-dropdown.png b/wiki/rollbar-02-projects-dropdown.png new file mode 100644 index 00000000..b324fb18 Binary files /dev/null and b/wiki/rollbar-02-projects-dropdown.png differ diff --git a/wiki/rollbar-03-error-vs-warning.png b/wiki/rollbar-03-error-vs-warning.png new file mode 100644 index 00000000..7a3f68ec Binary files /dev/null and b/wiki/rollbar-03-error-vs-warning.png differ diff --git a/wiki/rollbar-04-occurrences.png b/wiki/rollbar-04-occurrences.png new file mode 100644 index 00000000..1af3cda3 Binary files /dev/null and b/wiki/rollbar-04-occurrences.png differ diff --git a/wiki/rollbar-05-environment-dropdown.png b/wiki/rollbar-05-environment-dropdown.png new file mode 100644 index 00000000..ba9e4bbc Binary files /dev/null and b/wiki/rollbar-05-environment-dropdown.png differ diff --git a/wiki/rollbar-06-error-details.png b/wiki/rollbar-06-error-details.png new file mode 100644 index 00000000..e7d4282e Binary files /dev/null and b/wiki/rollbar-06-error-details.png differ diff --git a/wiki/rollbar-07-error-controls.png b/wiki/rollbar-07-error-controls.png new file mode 100644 index 00000000..3352428c Binary files /dev/null and b/wiki/rollbar-07-error-controls.png differ diff --git a/wiki/rollbar-08-items-view.png b/wiki/rollbar-08-items-view.png new file mode 100644 index 00000000..82eaa2ef Binary files /dev/null and b/wiki/rollbar-08-items-view.png differ diff --git a/wiki/scurry.jpg b/wiki/scurry.jpg new file mode 100644 index 00000000..b839f7ef Binary files /dev/null and b/wiki/scurry.jpg differ diff --git a/wiki/staging-environment-arch.png b/wiki/staging-environment-arch.png new file mode 100644 index 00000000..edd87087 Binary files /dev/null and b/wiki/staging-environment-arch.png differ diff --git a/wiki/swarm-setup.gif b/wiki/swarm-setup.gif new file mode 100644 index 00000000..b41e6eb1 Binary files /dev/null and b/wiki/swarm-setup.gif differ