From 179200de31589a06b51123f1d77064322e09c551 Mon Sep 17 00:00:00 2001 From: Madhav Puri Date: Fri, 5 Feb 2016 07:50:17 -0800 Subject: [PATCH 1/2] Squashed 'vendor/ansible/' changes from 7aa70f5..ce33ba8 ce33ba8 Merge pull request #68 from mapuri/cephfix bd6e93d Merge pull request #65 from abhinandanpb/master 347319d Merge pull request #62 from mapuri/scheduler fa1f88d remove prebake-for-dev tag from ucp task 963eaea don't set -i flag in ucp image download task. 2502d07 add ceph-install as a dev dependency and move vbox, vagrant, packer to test role bb8dafe updating default and accepted values a0aa815 adding fwd_mode to start netplugin 520a929 changes to bring in ucp based swarm stack d66de22 Merge pull request #57 from DivyaVavili/aci_task_fixes 49bea5c Fixing aci service and mode setup 4d0efd8 Merge pull request #31 from unclejack/bump_go_1.5.3 2632710 Merge pull request #52 from vijayvikrant/bash_autocomplete 80b479d Adding the default values for netplugin_mode. Also mentioning the possible values 9031317 os_agnostic_tasks.yml: bump Go to 1.5.3 3cecf04 Merge pull request #56 from mapuri/cleanup c4e1b42 adding the support for specifying netplugin mode 9502205 consolidate all netplugin binaries/files under /usr/bin/contiv/netplugin and create symlinks to the paths expected cfed34c - add bash autocompletion support for netctl command - install bash-completion package via yum/apt. This is added to the existing base role. - updated the netplugin tar link to the latest release. The tar now has a contrib/completion/bash/netctl packaged in it. - copy the packaged netctl file to /etc/bash_completion.d This needs sudo access. 6f85c9e remove the unused file git-subtree-dir: vendor/ansible git-subtree-split: ce33ba8c141d28b056e7bd47500e982ef1f4d493 --- cleanup.yml | 5 ++ roles/base/tasks/redhat_tasks.yml | 1 + roles/base/tasks/ubuntu_tasks.yml | 1 + roles/contiv_network/defaults/main.yml | 2 + roles/contiv_network/files/netplugin | 1 - roles/contiv_network/tasks/aci_tasks.yml | 2 +- roles/contiv_network/tasks/main.yml | 24 +++++-- roles/contiv_network/templates/aci_gw.j2 | 8 +-- roles/contiv_network/templates/netplugin.j2 | 2 +- roles/dev/meta/main.yml | 2 + roles/dev/tasks/main.yml | 11 --- roles/dev/tasks/os_agnostic_tasks.yml | 27 ++------ roles/scheduler_stack/defaults/main.yml | 4 ++ roles/scheduler_stack/meta/main.yml | 7 ++ roles/swarm/templates/swarm.j2 | 2 - roles/test/tasks/main.yml | 17 +++++ roles/test/tasks/os_agnostic_tasks.yml | 18 +++++ roles/{dev => test}/tasks/redhat_tasks.yml | 0 roles/{dev => test}/tasks/ubuntu_tasks.yml | 0 roles/ucp/defaults/main.yml | 9 +++ roles/ucp/files/ucp.service | 8 +++ roles/ucp/tasks/cleanup.yml | 12 ++++ roles/ucp/tasks/main.yml | 52 +++++++++++++++ roles/ucp/templates/ucp.j2 | 74 +++++++++++++++++++++ site.yml | 5 +- 25 files changed, 244 insertions(+), 50 deletions(-) delete mode 100644 roles/contiv_network/files/netplugin create mode 100644 roles/scheduler_stack/defaults/main.yml create mode 100644 roles/scheduler_stack/meta/main.yml create mode 100644 roles/test/tasks/main.yml create mode 100644 roles/test/tasks/os_agnostic_tasks.yml rename roles/{dev => test}/tasks/redhat_tasks.yml (100%) rename roles/{dev => test}/tasks/ubuntu_tasks.yml (100%) create mode 100644 roles/ucp/defaults/main.yml create mode 100644 roles/ucp/files/ucp.service create mode 100644 roles/ucp/tasks/cleanup.yml create mode 100644 roles/ucp/tasks/main.yml create mode 100644 roles/ucp/templates/ucp.j2 diff --git a/cleanup.yml b/cleanup.yml index b69d227..a2b270a 100644 --- a/cleanup.yml +++ b/cleanup.yml @@ -9,6 +9,9 @@ - include_vars: roles/{{ item }}/vars/main.yml with_items: - "etcd" + - include_vars: roles/{{ item }}/defaults/main.yml + with_items: + - "ucp" - include: roles/contiv_network/tasks/cleanup.yml ignore_errors: yes - include: roles/contiv_storage/tasks/cleanup.yml @@ -17,6 +20,8 @@ ignore_errors: yes - include: roles/swarm/tasks/cleanup.yml ignore_errors: yes + - include: roles/ucp/tasks/cleanup.yml + ignore_errors: yes - include: roles/docker/tasks/cleanup.yml ignore_errors: yes - include: roles/etcd/tasks/cleanup.yml diff --git a/roles/base/tasks/redhat_tasks.yml b/roles/base/tasks/redhat_tasks.yml index c7e77f9..4f64ed5 100644 --- a/roles/base/tasks/redhat_tasks.yml +++ b/roles/base/tasks/redhat_tasks.yml @@ -25,6 +25,7 @@ - librbd1-devel - lshw - python-requests # XXX required by ceph repo, but it has a bad package on it + - bash-completion - name: install and start ntp shell: systemctl enable ntpd diff --git a/roles/base/tasks/ubuntu_tasks.yml b/roles/base/tasks/ubuntu_tasks.yml index 570e8a8..35ffdf9 100644 --- a/roles/base/tasks/ubuntu_tasks.yml +++ b/roles/base/tasks/ubuntu_tasks.yml @@ -18,6 +18,7 @@ - perl - librbd-dev - lshw + - bash-completion - name: add ansible apt repository (debian) apt_repository: diff --git a/roles/contiv_network/defaults/main.yml b/roles/contiv_network/defaults/main.yml index 24fcbc7..b13d8c8 100644 --- a/roles/contiv_network/defaults/main.yml +++ b/roles/contiv_network/defaults/main.yml @@ -5,3 +5,5 @@ # Include variables which need to be overridden by inventory vars here. contiv_network_mode: "standalone" # Accepted values: standalone, aci +netplugin_mode: "docker" # Accepted values: docker, kubernetes +fwd_mode: "bridge" #Accepted values: bridge , routing diff --git a/roles/contiv_network/files/netplugin b/roles/contiv_network/files/netplugin deleted file mode 100644 index 08a154c..0000000 --- a/roles/contiv_network/files/netplugin +++ /dev/null @@ -1 +0,0 @@ -NETPLUGIN_ARGS='--native-integration --docker-plugin' diff --git a/roles/contiv_network/tasks/aci_tasks.yml b/roles/contiv_network/tasks/aci_tasks.yml index bd59f69..d4bbcd8 100644 --- a/roles/contiv_network/tasks/aci_tasks.yml +++ b/roles/contiv_network/tasks/aci_tasks.yml @@ -24,5 +24,5 @@ run_once: true - name: set aci mode - shell: contivctl global set -nwinfra aci + shell: contivctl net global set --fabric-mode aci run_once: true diff --git a/roles/contiv_network/tasks/main.yml b/roles/contiv_network/tasks/main.yml index f923218..93c6db9 100644 --- a/roles/contiv_network/tasks/main.yml +++ b/roles/contiv_network/tasks/main.yml @@ -20,21 +20,35 @@ - name: download netmaster and netplugin get_url: validate_certs: "{{ validate_certs }}" - url: https://github.com/contiv/netplugin/releases/download/v0.0.0-12-11-2015.20-54-40.UTC/netplugin-v0.0.0-12-11-2015.20-54-40.UTC.tar.bz2 + url: https://github.com/contiv/netplugin/releases/download/v0.1-01-28-2016.03-55-05.UTC/netplugin-v0.1-01-28-2016.03-55-05.UTC.tar.bz2 dest: /tmp/contivnet.tar.bz2 +- name: ensure netplugin directory exists + file: path=/usr/bin/contiv/netplugin state=directory + - name: install netmaster and netplugin shell: tar vxjf /tmp/contivnet.tar.bz2 args: - chdir: /usr/bin/ + chdir: /usr/bin/contiv/netplugin creates: netmaster +- name: create links for netplugin binaries + file: src=/usr/bin/contiv/netplugin/{{ item }} dest=/usr/bin/{{ item }} state=link + with_items: + - netctl + - netmaster + - netplugin + - contivk8s + - name: copy environment file for netplugin template: src=netplugin.j2 dest=/etc/default/netplugin - name: copy systemd units for netplugin copy: src=netplugin.service dest=/etc/systemd/system/netplugin.service +- name: copy bash auto complete file for netctl + file: src=/usr/bin/contiv/netplugin/contrib/completion/bash/netctl dest=/etc/bash_completion.d/netctl state=link + - name: start netplugin shell: systemctl daemon-reload && systemctl start netplugin @@ -59,12 +73,12 @@ - name: download contivctl get_url: validate_certs: "{{ validate_certs }}" - url: https://github.com/contiv/contivctl/releases/download/v0.0.0-01-14-2016.01-27-38.UTC/contivctl-v0.0.0-01-14-2016.01-27-38.UTC.tar.bz2 - dest: /tmp/contivctl-v0.0.0-01-14-2016.01-27-38.UTC.tar.bz2 + url: https://github.com/contiv/contivctl/releases/download/v0.0.0-01-31-2016.17-56-53.UTC/contivctl-v0.0.0-01-31-2016.17-56-53.UTC.tar.bz2 + dest: /tmp/contivctl-v0.0.0-01-31-2016.17-56-53.UTC.tar.bz2 force: no - name: install contivctl - shell: tar vxjf /tmp/contivctl-v0.0.0-01-14-2016.01-27-38.UTC.tar.bz2 + shell: tar vxjf /tmp/contivctl-v0.0.0-01-31-2016.17-56-53.UTC.tar.bz2 args: chdir: /usr/bin/ creates: contivctl diff --git a/roles/contiv_network/templates/aci_gw.j2 b/roles/contiv_network/templates/aci_gw.j2 index 480c710..94eefe5 100644 --- a/roles/contiv_network/templates/aci_gw.j2 +++ b/roles/contiv_network/templates/aci_gw.j2 @@ -11,10 +11,10 @@ start) set -e /usr/bin/docker run --net=host \ - -e "APIC_URL: {{ apic_url }}" \ - -e "APIC_USERNAME: {{ apic_username }}" \ - -e "APIC_PASSWORD: {{ apic_password }}" \ - -e "APIC_LEAF_NODE: {{ apic_leaf_nodes }}" \ + -e "APIC_URL={{ apic_url }}" \ + -e "APIC_USERNAME={{ apic_username }}" \ + -e "APIC_PASSWORD={{ apic_password }}" \ + -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \ --name=contiv-aci-gw \ contiv/aci-gw ;; diff --git a/roles/contiv_network/templates/netplugin.j2 b/roles/contiv_network/templates/netplugin.j2 index 783762c..b725e4e 100644 --- a/roles/contiv_network/templates/netplugin.j2 +++ b/roles/contiv_network/templates/netplugin.j2 @@ -1 +1 @@ -NETPLUGIN_ARGS='-docker-plugin -vlan-if {{netplugin_if}} -vtep-ip {{node_addr}}' +NETPLUGIN_ARGS='-plugin-mode {{netplugin_mode}} -vlan-if {{netplugin_if}} -vtep-ip {{node_addr}} -fwd-mode {{fwd_mode}}' diff --git a/roles/dev/meta/main.yml b/roles/dev/meta/main.yml index 9ff458d..517c36a 100644 --- a/roles/dev/meta/main.yml +++ b/roles/dev/meta/main.yml @@ -12,9 +12,11 @@ # 'prebake-for-dev' tag in respective roles. dependencies: +- { role: ceph-install, tags: 'prebake-for-dev' } - { role: etcd } - { role: docker } - { role: swarm } +- { role: ucp } - { role: contiv_cluster } - { role: contiv_network } - { role: contiv_storage } diff --git a/roles/dev/tasks/main.yml b/roles/dev/tasks/main.yml index 73af3d7..a104705 100644 --- a/roles/dev/tasks/main.yml +++ b/roles/dev/tasks/main.yml @@ -16,14 +16,3 @@ - include: os_agnostic_tasks.yml tags: - prebake-for-dev - -- include: ubuntu_tasks.yml - when: ansible_os_family == "Debian" - tags: - - prebake-for-dev - - -- include: redhat_tasks.yml - when: ansible_os_family == "RedHat" - tags: - - prebake-for-dev diff --git a/roles/dev/tasks/os_agnostic_tasks.yml b/roles/dev/tasks/os_agnostic_tasks.yml index 8d85a2a..af9526b 100644 --- a/roles/dev/tasks/os_agnostic_tasks.yml +++ b/roles/dev/tasks/os_agnostic_tasks.yml @@ -1,12 +1,12 @@ -- name: download Golang v1.5.2 +- name: download Golang v1.5.3 get_url: validate_certs: "{{ validate_certs }}" - url: https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz - dest: /tmp/go1.5.2.linux-amd64.tar.gz + url: https://storage.googleapis.com/golang/go1.5.3.linux-amd64.tar.gz + dest: /tmp/go1.5.3.linux-amd64.tar.gz force: no - name: install Golang - shell: tar xfvz /tmp/go1.5.2.linux-amd64.tar.gz + shell: tar xfvz /tmp/go1.5.3.linux-amd64.tar.gz args: chdir: /usr/local/ creates: /usr/local/go/bin/go @@ -15,22 +15,3 @@ copy: dest: /etc/profile.d/00golang.sh content: "export PATH=/opt/golang/bin:/usr/local/go/bin:$PATH; export GOPATH=/opt/golang" - -- name: check packer's version - shell: packer --version - register: packer_version - ignore_errors: yes - -- name: download packer - get_url: - validate_certs: "{{ validate_certs }}" - url: https://releases.hashicorp.com/packer/0.8.6/packer_0.8.6_linux_amd64.zip - dest: /tmp/packer_0.8.6_linux_amd64.zip - force: no - when: packer_version.stdout != "0.8.6" - -- name: install packer - shell: rm -f packer* && unzip /tmp/packer_0.8.6_linux_amd64.zip - args: - chdir: /usr/local/bin - when: packer_version.stdout != "0.8.6" diff --git a/roles/scheduler_stack/defaults/main.yml b/roles/scheduler_stack/defaults/main.yml new file mode 100644 index 0000000..61b7bc9 --- /dev/null +++ b/roles/scheduler_stack/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# Role defaults for contiv_scheduler + +scheduler_provider: "native-swarm" # Accepted values: native-swarm, ucp-swarm diff --git a/roles/scheduler_stack/meta/main.yml b/roles/scheduler_stack/meta/main.yml new file mode 100644 index 0000000..393ced7 --- /dev/null +++ b/roles/scheduler_stack/meta/main.yml @@ -0,0 +1,7 @@ +--- +# This role contains tasks for configuring and starting the scheduler stacks +# like native-swarm, ucp-swarm, k8s, mesos etc + +dependencies: +- { role: swarm, when: scheduler_provider == "native-swarm" } +- { role: ucp, when: scheduler_provider == "ucp-swarm" } diff --git a/roles/swarm/templates/swarm.j2 b/roles/swarm/templates/swarm.j2 index dde61f7..1668185 100644 --- a/roles/swarm/templates/swarm.j2 +++ b/roles/swarm/templates/swarm.j2 @@ -9,8 +9,6 @@ fi case $1 in start) echo starting swarm as {{ run_as }} on {{ node_name }}[{{ node_addr }}] - # XXX: we run etcd as master every where so it is fine to use the node-address for etcd, revisit this once we have - # etcd as master only on a subset of nodes if [[ "{{ run_as }}" == "master" ]]; then /usr/bin/swarm join --advertise={{ node_addr }}:{{ docker_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} & /usr/bin/swarm manage -H tcp://{{ node_addr }}:{{ swarm_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} diff --git a/roles/test/tasks/main.yml b/roles/test/tasks/main.yml new file mode 100644 index 0000000..627e1c4 --- /dev/null +++ b/roles/test/tasks/main.yml @@ -0,0 +1,17 @@ +--- +# This role contains tasks for installing test environment packages + +- include: os_agnostic_tasks.yml + tags: + - prebake-for-test + +- include: ubuntu_tasks.yml + when: ansible_os_family == "Debian" + tags: + - prebake-for-test + + +- include: redhat_tasks.yml + when: ansible_os_family == "RedHat" + tags: + - prebake-for-test diff --git a/roles/test/tasks/os_agnostic_tasks.yml b/roles/test/tasks/os_agnostic_tasks.yml new file mode 100644 index 0000000..9ea05e7 --- /dev/null +++ b/roles/test/tasks/os_agnostic_tasks.yml @@ -0,0 +1,18 @@ +- name: check packer's version + shell: packer --version + register: packer_version + ignore_errors: yes + +- name: download packer + get_url: + validate_certs: "{{ validate_certs }}" + url: https://releases.hashicorp.com/packer/0.8.6/packer_0.8.6_linux_amd64.zip + dest: /tmp/packer_0.8.6_linux_amd64.zip + force: no + when: packer_version.stdout != "0.8.6" + +- name: install packer + shell: rm -f packer* && unzip /tmp/packer_0.8.6_linux_amd64.zip + args: + chdir: /usr/local/bin + when: packer_version.stdout != "0.8.6" diff --git a/roles/dev/tasks/redhat_tasks.yml b/roles/test/tasks/redhat_tasks.yml similarity index 100% rename from roles/dev/tasks/redhat_tasks.yml rename to roles/test/tasks/redhat_tasks.yml diff --git a/roles/dev/tasks/ubuntu_tasks.yml b/roles/test/tasks/ubuntu_tasks.yml similarity index 100% rename from roles/dev/tasks/ubuntu_tasks.yml rename to roles/test/tasks/ubuntu_tasks.yml diff --git a/roles/ucp/defaults/main.yml b/roles/ucp/defaults/main.yml new file mode 100644 index 0000000..f2dd8ee --- /dev/null +++ b/roles/ucp/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# Role defaults for ucp + +ucp_version: "0.7.1" +ucp_local_dir: "fetch/ucp" +ucp_remote_dir: "/tmp" +ucp_instance_id_file: "ucp-instance-id" +ucp_fingerprint_file: "ucp-fingerprint" +ucp_fifo_file: "ucp-fifo" diff --git a/roles/ucp/files/ucp.service b/roles/ucp/files/ucp.service new file mode 100644 index 0000000..d809f14 --- /dev/null +++ b/roles/ucp/files/ucp.service @@ -0,0 +1,8 @@ +[Unit] +Description=Ucp +After=auditd.service systemd-user-sessions.service time-sync.target docker.service + +[Service] +ExecStart=/usr/bin/ucp.sh start +ExecStop=/usr/bin/ucp.sh stop +KillMode=control-group diff --git a/roles/ucp/tasks/cleanup.yml b/roles/ucp/tasks/cleanup.yml new file mode 100644 index 0000000..0cd867c --- /dev/null +++ b/roles/ucp/tasks/cleanup.yml @@ -0,0 +1,12 @@ +--- +# This play contains tasks for cleaning up ucp + +- name: stop ucp + service: name=ucp state=stopped + +- name: cleanup ucp files from remote + file: name="{{ ucp_remote_dir }}/{{ item }}" state=absent + with_items: + - "{{ ucp_fingerprint_file }}" + - "{{ ucp_instance_id_file }}" + - "{{ ucp_fifo_file }}" diff --git a/roles/ucp/tasks/main.yml b/roles/ucp/tasks/main.yml new file mode 100644 index 0000000..53523bb --- /dev/null +++ b/roles/ucp/tasks/main.yml @@ -0,0 +1,52 @@ +--- +# This role contains tasks for configuring and starting the swarm stack using ucp + +- name: download and install ucp images + shell: > + docker run --rm -t \ + --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp \ + images --image-version={{ ucp_version }} + +- name: copy the ucp files to worker nodes + copy: + src: "{{ ucp_local_dir }}/{{ item }}" + dest: "{{ ucp_remote_dir }}/{{ item }}" + with_items: + - "{{ ucp_fingerprint_file }}" + - "{{ ucp_instance_id_file }}" + when: run_as == "worker" + +- name: copy the ucp start/stop script + template: src=ucp.j2 dest=/usr/bin/ucp.sh mode=u=rwx,g=rx,o=rx + +- name: copy systemd units for ucp + copy: src=ucp.service dest=/etc/systemd/system/ucp.service + +- name: start ucp + service: name=ucp state=started + +- name: create a local fetch directory if it doesn't exist + local_action: file path={{ ucp_local_dir }} state=directory + when: run_as == "master" + +- name: wait for ucp files to be created, which ensures the service has started + wait_for: + path: "{{ ucp_remote_dir }}/{{ item }}" + state: present + with_items: + - "{{ ucp_fingerprint_file }}" + - "{{ ucp_instance_id_file }}" + when: run_as == "master" + +- name: fetch the ucp files from master nodes + fetch: + src: "{{ ucp_remote_dir }}/{{ item }}" + dest: "{{ ucp_local_dir }}/{{ item }}" + flat: yes + fail_on_missing: yes + with_items: + - "{{ ucp_fingerprint_file }}" + - "{{ ucp_instance_id_file }}" + when: run_as == "master" diff --git a/roles/ucp/templates/ucp.j2 b/roles/ucp/templates/ucp.j2 new file mode 100644 index 0000000..f767f54 --- /dev/null +++ b/roles/ucp/templates/ucp.j2 @@ -0,0 +1,74 @@ +#!/bin/bash + +usage="$0 start|stop" +if [ $# -ne 1 ]; then + echo USAGE: $usage + exit 1 +fi + +case $1 in +start) + set -e + + echo starting ucp as {{ run_as }} on {{ node_name }}[{{ node_addr }}] + + if [[ "{{ run_as }}" == "master" ]]; then + out=$(/usr/bin/docker run --rm -t --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp install --host-address={{ node_addr }} \ + --image-version={{ ucp_version }}) + echo ${out} + + # copy out the instance ID + instanceId=$(echo ${out} | egrep -o 'UCP instance ID: [a-zA-Z0-9:_]*' | \ + awk --field-separator='UCP instance ID: ' '{print $2}') + echo instance-id: $instanceId + echo ${instanceId} > "{{ ucp_remote_dir }}"/"{{ ucp_instance_id_file }}" + + # copy out the fingerprint. + # XXX: we store the output in variable first than redirecting + # the output directly to file as we wait on this file to be created. So + # redirecting the output let's that task move forward even before the + # file contents have been written. + # XXX: we need a way for this fingerprint to stick around wherever + # contiv/cluster service is running. May be we can save this file on a + # distributed filesystem + out=$(/usr/bin/docker run --rm -t --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp fingerprint) + echo ${out} > "{{ ucp_remote_dir }}"/"{{ ucp_fingerprint_file }}" + else + /usr/bin/docker run --rm -t --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e UCP_ADMIN_USER="admin" -e UCP_ADMIN_PASSWORD="orca" \ + docker/ucp join --host-address={{ node_addr }} \ + --image-version={{ ucp_version }} \ + --url="https://{{ service_vip }}:443" \ + --fingerprint=`cat "{{ ucp_remote_dir }}"/"{{ ucp_fingerprint_file }}"` + fi + + # now just sleep to keep the service up + mkfifo "{{ ucp_remote_dir }}"/"{{ ucp_fifo_file }}" + < "{{ ucp_remote_dir }}"/"{{ ucp_fifo_file }}" + ;; + +stop) + # don't `set -e` as we shouldn't stop on error + + #stop the ucp containers and associated volumes + docker ps -a | grep 'ucp-' | awk '{print $1}' | xargs docker stop + + #remove the ucp containers and associated volumes + docker ps -a | grep 'ucp-' | awk '{print $1}' | xargs docker rm -v + + # XXX: do we need to uninstall ucp too? + #/usr/bin/docker run --rm -t --name ucp \ + # -v /var/run/docker.sock:/var/run/docker.sock \ + # docker/ucp uninstall --id=`cat {{ ucp_remote_dir }}/{{ ucp_instance_id_file }}` \ + ;; + +*) + echo USAGE: $usage + exit 1 + ;; +esac diff --git a/site.yml b/site.yml index 00f7626..18f8b51 100644 --- a/site.yml +++ b/site.yml @@ -14,6 +14,7 @@ - { role: base } - { role: serf } - { role: dev } + - { role: test } - hosts: volplugin-test sudo: true @@ -50,7 +51,7 @@ - { role: etcd, run_as: master } - { role: ceph-mon, mon_group_name: service-master } - { role: ceph-osd, mon_group_name: service-master, osd_group_name: service-master } - - { role: swarm, run_as: master } + - { role: scheduler_stack, run_as: master } - { role: contiv_network, run_as: master } - { role: contiv_storage, run_as: master } @@ -64,7 +65,7 @@ - { role: docker } - { role: etcd, run_as: worker } - { role: ceph-osd, mon_group_name: service-master, osd_group_name: service-worker } - - { role: swarm, run_as: worker } + - { role: scheduler_stack, run_as: worker } - { role: contiv_network, run_as: worker } - { role: contiv_storage, run_as: worker } From acaaa196be279f9911f421d65bfed95f14e933ef Mon Sep 17 00:00:00 2001 From: Madhav Puri Date: Fri, 5 Feb 2016 07:50:38 -0800 Subject: [PATCH 2/2] cleanup the dummy file from node2 as well Signed-off-by: Madhav Puri --- management/src/systemtests/cli_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/management/src/systemtests/cli_test.go b/management/src/systemtests/cli_test.go index 9ff816c..5a7ff59 100644 --- a/management/src/systemtests/cli_test.go +++ b/management/src/systemtests/cli_test.go @@ -105,6 +105,7 @@ func (s *CliTestSuite) TearDownSuite(c *C) { return } s.tbn1 = nil + s.tbn2 = nil s.tb.Teardown() } @@ -125,6 +126,8 @@ func (s *CliTestSuite) SetUpTest(c *C) { file := dummyAnsibleFile out, err := s.tbn1.RunCommandWithOutput(fmt.Sprintf("rm %s", file)) c.Logf("dummy file cleanup. Error: %s, Output: %s", err, out) + out, err = s.tbn2.RunCommandWithOutput(fmt.Sprintf("rm %s", file)) + c.Logf("dummy file cleanup. Error: %s, Output: %s", err, out) // XXX: we cleanup up assets from collins instead of restarting it to save test time. for _, name := range validNodeNames {