diff --git a/vendor/ansible/Vagrantfile b/vendor/ansible/Vagrantfile index d166bd2..4c11b45 100644 --- a/vendor/ansible/Vagrantfile +++ b/vendor/ansible/Vagrantfile @@ -43,6 +43,9 @@ Vagrant.configure(2) do |config| node.vm.provider "virtualbox" do |vb| vb.customize ['modifyvm', :id, '--memory', "4096"] vb.customize ["modifyvm", :id, "--cpus", "2"] + vb.customize ['modifyvm', :id, '--paravirtprovider', 'kvm'] + vb.customize ['modifyvm', :id, '--natdnshostresolver1', 'on'] + vb.customize ['modifyvm', :id, '--natdnsproxy1', 'on'] end if ansible_groups["devtest"] == nil then diff --git a/vendor/ansible/cleanup.yml b/vendor/ansible/cleanup.yml index 7f1f606..61d74e0 100644 --- a/vendor/ansible/cleanup.yml +++ b/vendor/ansible/cleanup.yml @@ -8,34 +8,21 @@ tasks: - include_vars: roles/{{ item }}/defaults/main.yml with_items: - - "etcd" - - "ucp" - - include: roles/contiv_network/tasks/cleanup.yml - ignore_errors: yes - - include: roles/contiv_storage/tasks/cleanup.yml - ignore_errors: yes - - include: roles/contiv_cluster/tasks/cleanup.yml - ignore_errors: yes - - include: roles/swarm/tasks/cleanup.yml - ignore_errors: yes - - include: roles/ucp/tasks/cleanup.yml - ignore_errors: yes - - include: roles/docker/tasks/cleanup.yml - ignore_errors: yes - - include: roles/etcd/tasks/cleanup.yml - ignore_errors: yes - - include: roles/ucarp/tasks/cleanup.yml + - "contiv_network" + - "contiv_storage" + - "contiv_cluster" + - "swarm" + - "ucp" + - "docker" + - "etcd" + - include: roles/{{ item }}/tasks/cleanup.yml + with_items: + - contiv_network + - contiv_storage + - contiv_cluster + - swarm + - ucp + - docker + - etcd + - ucarp ignore_errors: yes - # XXX: following syntax is much cleaner but is available only in v2. - # Will move to this once our packer images and hosts have consistently moved to Ansiblev2 - #- include: roles/{{ item }}/tasks/cleanup.yml - # with_items: - # - contiv_network - # - contiv_storage - # - contiv_cluster - # - swarm - # - ucp - # - docker - # - etcd - # - ucarp - # ignore_errors: yes diff --git a/vendor/ansible/group_vars/all b/vendor/ansible/group_vars/all index 813731b..126d0a4 100644 --- a/vendor/ansible/group_vars/all +++ b/vendor/ansible/group_vars/all @@ -25,3 +25,5 @@ validate_certs: "yes" # env: # service_vip: # control_interface: + +host_capability: "can-run-user-containers, storage" diff --git a/vendor/ansible/roles/ansible/tasks/main.yml b/vendor/ansible/roles/ansible/tasks/main.yml new file mode 100644 index 0000000..1b50903 --- /dev/null +++ b/vendor/ansible/roles/ansible/tasks/main.yml @@ -0,0 +1,20 @@ +--- +# This role contains tasks for installing ansible + +- name: install ansible (redhat) + yum: + name: ansible + enablerepo: epel-testing + state: latest + when: ansible_os_family == "RedHat" + +- name: add ansible apt repository (debian) + apt_repository: + repo: ppa:ansible/ansible + state: present + validate_certs: "{{ validate_certs }}" + when: ansible_os_family == "Debian" + +- name: install ansible (debian) + apt: name=ansible state=latest + when: ansible_os_family == "Debian" diff --git a/vendor/ansible/roles/base/tasks/redhat_tasks.yml b/vendor/ansible/roles/base/tasks/redhat_tasks.yml index 58095f0..c5dad57 100644 --- a/vendor/ansible/roles/base/tasks/redhat_tasks.yml +++ b/vendor/ansible/roles/base/tasks/redhat_tasks.yml @@ -1,46 +1,23 @@ -- name: upgrade system (redhat) - yum: - update_cache: true - name: '*' - state: latest - # install epel-release first to ensure the extra packages can be installed later - name: install epel release package (redhat) yum: name: epel-release + state: latest -- name: install base packages (redhat) +- name: install/upgrade base packages (redhat) yum: name: "{{ item }}" + update_cache: true + state: latest with_items: - ntp - unzip - bzip2 - - vim - curl - - git - - mercurial - - gcc - - perl - librbd1-devel - - lshw - python-requests # XXX required by ceph repo, but it has a bad package on it - bash-completion + - kernel #keep kernel up to date - name: install and start ntp shell: systemctl enable ntpd - -- name: install python-crypto - yum: name=python-crypto state=present - register: python_crypto_result - ignore_errors: yes - -- name: remove python crypt egg file to work-around https://bugs.centos.org/view.php?id=9896&nbn=2 - shell: rm -rf /usr/lib64/python2.7/site-packages/pycrypto-*.egg-info - when: '"Error unpacking rpm package python2-crypto-" in python_crypto_result.msg' - -- name: install ansible (redhat) - yum: - name: ansible - enablerepo: epel-testing - state: latest diff --git a/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml b/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml index 35ffdf9..4536cb9 100644 --- a/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml +++ b/vendor/ansible/roles/base/tasks/ubuntu_tasks.yml @@ -6,25 +6,11 @@ - name: install base packages (debian) apt: name: "{{ item }}" + state: latest with_items: - unzip - bzip2 - - vim-nox - curl - python-software-properties - - git - - mercurial - - build-essential - - perl - librbd-dev - - lshw - bash-completion - -- name: add ansible apt repository (debian) - apt_repository: - repo: ppa:ansible/ansible - state: present - validate_certs: "{{ validate_certs }}" - -- name: install ansible (debian) - apt: name=ansible state=present diff --git a/vendor/ansible/roles/ceph-common/handlers/main.yml b/vendor/ansible/roles/ceph-common/handlers/main.yml index ef109b0..8080c60 100644 --- a/vendor/ansible/roles/ceph-common/handlers/main.yml +++ b/vendor/ansible/roles/ceph-common/handlers/main.yml @@ -3,6 +3,18 @@ apt: update-cache: yes +- name: check for a ceph socket + shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1" + changed_when: false + failed_when: false + register: socket + +- name: check for a rados gateway socket + shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1" + changed_when: false + failed_when: false + register: socketrgw + - name: restart ceph mons command: service ceph restart mon when: diff --git a/vendor/ansible/roles/ceph-common/tasks/main.yml b/vendor/ansible/roles/ceph-common/tasks/main.yml index adae3e6..9fc665c 100644 --- a/vendor/ansible/roles/ceph-common/tasks/main.yml +++ b/vendor/ansible/roles/ceph-common/tasks/main.yml @@ -37,18 +37,6 @@ radosgw_frontend == 'apache' and rgw_group_name in group_names -- name: check for a ceph socket - shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1" - changed_when: false - failed_when: false - register: socket - -- name: check for a rados gateway socket - shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1" - changed_when: false - failed_when: false - register: socketrgw - - name: create a local fetch directory if it doesn't exist local_action: file path={{ fetch_directory }} state=directory changed_when: false @@ -86,6 +74,8 @@ group: root mode: 0644 notify: + - check for a ceph socket + - check for a rados gateway socket - restart ceph mons - restart ceph mons on ubuntu - restart ceph mons with systemd diff --git a/vendor/ansible/roles/contiv_cluster/defaults/main.yml b/vendor/ansible/roles/contiv_cluster/defaults/main.yml index 257d58e..4dcbb14 100644 --- a/vendor/ansible/roles/contiv_cluster/defaults/main.yml +++ b/vendor/ansible/roles/contiv_cluster/defaults/main.yml @@ -1,11 +1,15 @@ --- # role variable for the cluster manager service -collins_image: contiv/collins:02_25_2016 +collins_image: contiv/collins +collins_image_version: "02_25_2016" collins_host_port: 9000 collins_guest_port: 9000 -contiv_cluster_version: "v0.0.0-03-08-2016.00-06-26.UTC" +clusterm_args_file: "clusterm.args" +clusterm_conf_file: "clusterm.conf" + +contiv_cluster_version: "v0.0.0-03-13-2016.03-44-45.UTC" contiv_cluster_tar_file: "cluster-{{ contiv_cluster_version }}.tar.bz2" contiv_cluster_src_file: "https://github.com/contiv/cluster/releases/download/{{ contiv_cluster_version }}/{{ contiv_cluster_tar_file }}" contiv_cluster_dest_file: "/tmp/{{ contiv_cluster_tar_file }}" diff --git a/vendor/ansible/roles/contiv_cluster/files/clusterm b/vendor/ansible/roles/contiv_cluster/files/clusterm deleted file mode 100644 index 079efbd..0000000 --- a/vendor/ansible/roles/contiv_cluster/files/clusterm +++ /dev/null @@ -1 +0,0 @@ -CLUSTERM_ARGS="" diff --git a/vendor/ansible/roles/contiv_cluster/files/clusterm.args b/vendor/ansible/roles/contiv_cluster/files/clusterm.args new file mode 100644 index 0000000..5e27b80 --- /dev/null +++ b/vendor/ansible/roles/contiv_cluster/files/clusterm.args @@ -0,0 +1 @@ +CLUSTERM_ARGS="--config=/etc/default/clusterm/clusterm.conf" diff --git a/vendor/ansible/roles/contiv_cluster/files/clusterm.conf b/vendor/ansible/roles/contiv_cluster/files/clusterm.conf new file mode 100644 index 0000000..6aee63c --- /dev/null +++ b/vendor/ansible/roles/contiv_cluster/files/clusterm.conf @@ -0,0 +1,3 @@ +{ + "comment" : "empty JSON loads a default clusterm configuration. Add configuration here and restart clusterm service to load non-default configuration" +} diff --git a/vendor/ansible/roles/contiv_cluster/meta/main.yml b/vendor/ansible/roles/contiv_cluster/meta/main.yml new file mode 100644 index 0000000..fd088d3 --- /dev/null +++ b/vendor/ansible/roles/contiv_cluster/meta/main.yml @@ -0,0 +1,5 @@ +--- +# The dependecies for cluster-mgr + +dependencies: +- { role: ansible } diff --git a/vendor/ansible/roles/contiv_cluster/tasks/main.yml b/vendor/ansible/roles/contiv_cluster/tasks/main.yml index ca8195b..91731ed 100644 --- a/vendor/ansible/roles/contiv_cluster/tasks/main.yml +++ b/vendor/ansible/roles/contiv_cluster/tasks/main.yml @@ -11,8 +11,16 @@ tags: - prebake-for-dev +- name: check for collins image + shell: "docker images | grep {{ collins_image }} | grep -q {{ collins_image_version }}" + ignore_errors: true + register: collins_exists + tags: + - prebake-for-dev + - name: pull collins container image - shell: docker pull {{ collins_image }} + shell: "docker pull {{ collins_image }}:{{ collins_image_version }}" + when: not collins_exists|success tags: - prebake-for-dev @@ -33,11 +41,22 @@ chdir: /usr/bin/ when: download_result | changed -- name: copy environment file for clusterm - copy: src=clusterm dest=/etc/default/clusterm +- name: create conf dir for clusterm + file: + name: /etc/default/clusterm/ + state: directory + +- name: copy conf files for clusterm + copy: + src: "{{ item }}" + dest: /etc/default/clusterm/{{ item }} + force: yes + with_items: + - "{{ clusterm_args_file }}" + - "{{ clusterm_conf_file }}" - name: copy systemd units for clusterm - copy: src=clusterm.service dest=/etc/systemd/system/clusterm.service + template: src=clusterm.j2 dest=/etc/systemd/system/clusterm.service - name: start clusterm service: name=clusterm state=started diff --git a/vendor/ansible/roles/contiv_cluster/files/clusterm.service b/vendor/ansible/roles/contiv_cluster/templates/clusterm.j2 similarity index 78% rename from vendor/ansible/roles/contiv_cluster/files/clusterm.service rename to vendor/ansible/roles/contiv_cluster/templates/clusterm.j2 index eae327f..7a7d8ee 100644 --- a/vendor/ansible/roles/contiv_cluster/files/clusterm.service +++ b/vendor/ansible/roles/contiv_cluster/templates/clusterm.j2 @@ -3,7 +3,7 @@ Description=Clusterm After=auditd.service systemd-user-sessions.service time-sync.target serf.service collins.service [Service] -EnvironmentFile=/etc/default/clusterm +EnvironmentFile=/etc/default/clusterm/{{ clusterm_args_file }} ExecStart=/usr/bin/clusterm $CLUSTERM_ARGS Restart=on-failure RestartSec=10 diff --git a/vendor/ansible/roles/contiv_cluster/templates/collins.j2 b/vendor/ansible/roles/contiv_cluster/templates/collins.j2 index b0de63b..2174579 100644 --- a/vendor/ansible/roles/contiv_cluster/templates/collins.j2 +++ b/vendor/ansible/roles/contiv_cluster/templates/collins.j2 @@ -11,7 +11,7 @@ start) set -e /usr/bin/docker run -t -p {{ collins_host_port }}:{{ collins_guest_port }} \ - --name collins {{ collins_image }} + --name collins {{ collins_image }}:{{ collins_image_version }} ;; stop) diff --git a/vendor/ansible/roles/contiv_network/defaults/main.yml b/vendor/ansible/roles/contiv_network/defaults/main.yml index 9d44582..951aaa1 100644 --- a/vendor/ansible/roles/contiv_network/defaults/main.yml +++ b/vendor/ansible/roles/contiv_network/defaults/main.yml @@ -7,13 +7,24 @@ contiv_network_mode: "standalone" # Accepted values: standalone, aci netplugin_mode: "docker" # Accepted values: docker, kubernetes fwd_mode: "bridge" #Accepted values: bridge , routing +ofnet_master_port: 9001 +ofnet_agent_port1: 9002 +ofnet_agent_port2: 9003 +netmaster_port: 9999 +gobgp_grpc_port: 8080 +bgp_port: 179 +vxlan_port: 4789 +netplugin_rule_comment: "contiv network traffic" -contiv_network_version: "v0.1-03-05-2016.09-42-48.UTC" +contiv_network_version: "v0.1-03-16-2016.13-43-59.UTC" contiv_network_tar_file: "netplugin-{{ contiv_network_version }}.tar.bz2" contiv_network_src_file: "https://github.com/contiv/netplugin/releases/download/{{ contiv_network_version }}/{{ contiv_network_tar_file }}" contiv_network_dest_file: "/tmp/{{ contiv_network_tar_file }}" -contivctl_version: "v0.0.0-03-07-2016.23-26-25.UTC" +contivctl_version: "v0.0.0-03-10-2016.22-13-24.UTC" contivctl_tar_file: "contivctl-{{ contivctl_version }}.tar.bz2" contivctl_src_file: "https://github.com/contiv/contivctl/releases/download/{{ contivctl_version }}/{{ contivctl_tar_file }}" contivctl_dest_file: "/tmp/{{ contivctl_tar_file }}" + +apic_epg_bridge_domain: "not_specified" +apic_contracts_unrestricted_mode: "no" diff --git a/vendor/ansible/roles/contiv_network/tasks/cleanup.yml b/vendor/ansible/roles/contiv_network/tasks/cleanup.yml index 804181b..d495b0a 100644 --- a/vendor/ansible/roles/contiv_network/tasks/cleanup.yml +++ b/vendor/ansible/roles/contiv_network/tasks/cleanup.yml @@ -7,4 +7,22 @@ - name: stop netplugin service: name=netplugin state=stopped +- name: cleanup netmaster host alias + lineinfile: + dest: /etc/hosts + regexp: " netmaster$" + state: absent + become: true + +- name: cleanup iptables for contiv network control plane + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ netplugin_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ ofnet_master_port }}" + - "{{ ofnet_agent_port1 }}" + - "{{ ofnet_agent_port2 }}" + - "{{ netmaster_port }}" + - "{{ gobgp_grpc_port }}" + - "{{ bgp_port }}" + - include: ovs_cleanup.yml diff --git a/vendor/ansible/roles/contiv_network/tasks/main.yml b/vendor/ansible/roles/contiv_network/tasks/main.yml index c0a0a4d..f3e944e 100644 --- a/vendor/ansible/roles/contiv_network/tasks/main.yml +++ b/vendor/ansible/roles/contiv_network/tasks/main.yml @@ -17,6 +17,19 @@ # install can be conditional based on deployment environment. - include: ovs.yml +- name: setup iptables for contiv network control plane + shell: > + ( iptables -L INPUT | grep "{{ netplugin_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ netplugin_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ ofnet_master_port }}" + - "{{ ofnet_agent_port1 }}" + - "{{ ofnet_agent_port2 }}" + - "{{ netmaster_port }}" + - "{{ gobgp_grpc_port }}" + - "{{ bgp_port }}" + - name: download netmaster and netplugin get_url: validate_certs: "{{ validate_certs }}" @@ -54,7 +67,12 @@ shell: systemctl daemon-reload && systemctl start netplugin - name: setup netmaster host alias - shell: echo "{{ service_vip }} netmaster" >> /etc/hosts + lineinfile: + dest: /etc/hosts + line: "{{ service_vip }} netmaster" + regexp: " netmaster$" + state: present + become: true - name: copy environment file for netmaster copy: src=netmaster dest=/etc/default/netmaster diff --git a/vendor/ansible/roles/contiv_network/tasks/ovs.yml b/vendor/ansible/roles/contiv_network/tasks/ovs.yml index f25d061..da0e052 100644 --- a/vendor/ansible/roles/contiv_network/tasks/ovs.yml +++ b/vendor/ansible/roles/contiv_network/tasks/ovs.yml @@ -52,3 +52,11 @@ with_items: - "tcp:127.0.0.1:6640" - "ptcp:6640" + +- name: setup iptables for vxlan vtep port + shell: > + ( iptables -L INPUT | grep "{{ netplugin_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ netplugin_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ vxlan_port }}" diff --git a/vendor/ansible/roles/contiv_network/tasks/ovs_cleanup.yml b/vendor/ansible/roles/contiv_network/tasks/ovs_cleanup.yml index 2c9b0a5..4aa396d 100644 --- a/vendor/ansible/roles/contiv_network/tasks/ovs_cleanup.yml +++ b/vendor/ansible/roles/contiv_network/tasks/ovs_cleanup.yml @@ -24,3 +24,9 @@ register: ports - debug: var=ports + +- name: cleanup iptables for vxlan vtep port + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ netplugin_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ vxlan_port }}" diff --git a/vendor/ansible/roles/contiv_network/templates/aci_gw.j2 b/vendor/ansible/roles/contiv_network/templates/aci_gw.j2 index 94eefe5..dc72948 100644 --- a/vendor/ansible/roles/contiv_network/templates/aci_gw.j2 +++ b/vendor/ansible/roles/contiv_network/templates/aci_gw.j2 @@ -15,6 +15,9 @@ start) -e "APIC_USERNAME={{ apic_username }}" \ -e "APIC_PASSWORD={{ apic_password }}" \ -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \ + -e "APIC_PHYS_DOMAIN={{ apic_phys_domain }}" \ + -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \ + -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \ --name=contiv-aci-gw \ contiv/aci-gw ;; diff --git a/vendor/ansible/roles/contiv_storage/defaults/main.yml b/vendor/ansible/roles/contiv_storage/defaults/main.yml index 6a63281..afcd2c2 100644 --- a/vendor/ansible/roles/contiv_storage/defaults/main.yml +++ b/vendor/ansible/roles/contiv_storage/defaults/main.yml @@ -2,7 +2,7 @@ # Role defaults for contiv_storage -contiv_storage_version: "v0.0.0-03-07-2016.09-29-13.UTC" +contiv_storage_version: "v0.0.0-03-19-2016.02-39-28.UTC" contiv_storage_tar_file: "volplugin-{{ contiv_storage_version }}.tar.bz2" contiv_storage_src_file: "https://github.com/contiv/volplugin/releases/download/{{ contiv_storage_version }}/{{ contiv_storage_tar_file }}" contiv_storage_dest_file: "/tmp/{{ contiv_storage_tar_file }}" diff --git a/vendor/ansible/roles/dev/meta/main.yml b/vendor/ansible/roles/dev/meta/main.yml index 517c36a..568f773 100644 --- a/vendor/ansible/roles/dev/meta/main.yml +++ b/vendor/ansible/roles/dev/meta/main.yml @@ -13,6 +13,7 @@ dependencies: - { role: ceph-install, tags: 'prebake-for-dev' } +- { role: ansible, tags: 'prebake-for-dev' } - { role: etcd } - { role: docker } - { role: swarm } diff --git a/vendor/ansible/roles/dev/tasks/main.yml b/vendor/ansible/roles/dev/tasks/main.yml index a104705..357a07b 100644 --- a/vendor/ansible/roles/dev/tasks/main.yml +++ b/vendor/ansible/roles/dev/tasks/main.yml @@ -13,6 +13,16 @@ # it explicitly here +- include: ubuntu_tasks.yml + when: ansible_os_family == "Debian" + tags: + - prebake-for-dev + +- include: redhat_tasks.yml + when: ansible_os_family == "RedHat" + tags: + - prebake-for-dev + - include: os_agnostic_tasks.yml tags: - prebake-for-dev diff --git a/vendor/ansible/roles/dev/tasks/os_agnostic_tasks.yml b/vendor/ansible/roles/dev/tasks/os_agnostic_tasks.yml index 7e6e150..68c25aa 100644 --- a/vendor/ansible/roles/dev/tasks/os_agnostic_tasks.yml +++ b/vendor/ansible/roles/dev/tasks/os_agnostic_tasks.yml @@ -4,12 +4,13 @@ url: https://storage.googleapis.com/golang/go1.6.linux-amd64.tar.gz dest: /tmp/go1.6.linux-amd64.tar.gz force: no + register: download_result - name: install Golang - shell: tar xfvz /tmp/go1.6.linux-amd64.tar.gz + shell: rm -rf go/ && tar xfvz /tmp/go1.6.linux-amd64.tar.gz args: chdir: /usr/local/ - creates: /usr/local/go/bin/go + when: download_result | changed - name: setup golang environment copy: diff --git a/vendor/ansible/roles/dev/tasks/redhat_tasks.yml b/vendor/ansible/roles/dev/tasks/redhat_tasks.yml new file mode 100644 index 0000000..6d81bf3 --- /dev/null +++ b/vendor/ansible/roles/dev/tasks/redhat_tasks.yml @@ -0,0 +1,11 @@ +- name: install/upgrade base packages (redhat) + yum: + name: "{{ item }}" + update_cache: true + state: latest + with_items: + - vim + - git + - mercurial + - gcc + - perl diff --git a/vendor/ansible/roles/dev/tasks/ubuntu_tasks.yml b/vendor/ansible/roles/dev/tasks/ubuntu_tasks.yml new file mode 100644 index 0000000..193cccb --- /dev/null +++ b/vendor/ansible/roles/dev/tasks/ubuntu_tasks.yml @@ -0,0 +1,9 @@ +- name: install base packages (debian) + apt: + name: "{{ item }}" + with_items: + - vim-nox + - git + - mercurial + - build-essential + - perl diff --git a/vendor/ansible/roles/docker/defaults/main.yml b/vendor/ansible/roles/docker/defaults/main.yml index 1d767ae..ea50e66 100644 --- a/vendor/ansible/roles/docker/defaults/main.yml +++ b/vendor/ansible/roles/docker/defaults/main.yml @@ -2,5 +2,7 @@ # Default values for docker role docker_api_port: 2385 - docker_version: 1.9.1 +docker_rule_comment: "docker api" +docker_device: "" +docker_device_size: "10000MB" diff --git a/vendor/ansible/roles/docker/tasks/cleanup.yml b/vendor/ansible/roles/docker/tasks/cleanup.yml index 4b7548f..0780220 100644 --- a/vendor/ansible/roles/docker/tasks/cleanup.yml +++ b/vendor/ansible/roles/docker/tasks/cleanup.yml @@ -6,3 +6,9 @@ - name: stop docker tcp socket service: name=docker-tcp.socket state=stopped + +- name: cleanup iptables for docker + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ docker_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ docker_api_port }}" diff --git a/vendor/ansible/roles/docker/tasks/create_docker_device.yml b/vendor/ansible/roles/docker/tasks/create_docker_device.yml new file mode 100644 index 0000000..8568da2 --- /dev/null +++ b/vendor/ansible/roles/docker/tasks/create_docker_device.yml @@ -0,0 +1,29 @@ +--- + +- name: pvcreate check for {{ docker_device }} + shell: "pvdisplay {{ docker_device }}" + register: pvcreated + ignore_errors: true + +- name: pvcreate {{ docker_device }} + shell: "pvcreate {{ docker_device }}" + when: pvcreated|failed + +- name: vgcreate check for {{ docker_device }} + shell: "vgdisplay contiv" + register: vgcreated + ignore_errors: true + +- name: vgcreate contiv + shell: "vgcreate contiv {{ docker_device }}" + when: vgcreated|failed + +- name: lvcreate check for {{ docker_device }} + shell: "lvdisplay contiv | grep -q dockerthin" + register: lvcreated + ignore_errors: true + +- name: lvcreate contiv-dockerthin + shell: lvcreate -n dockerthin -T contiv --size {{ docker_device_size }} + when: lvcreated|failed + register: thin_provisioned diff --git a/vendor/ansible/roles/docker/tasks/main.yml b/vendor/ansible/roles/docker/tasks/main.yml index 45a53eb..06d80ec 100644 --- a/vendor/ansible/roles/docker/tasks/main.yml +++ b/vendor/ansible/roles/docker/tasks/main.yml @@ -1,5 +1,6 @@ --- # This role contains tasks for configuring and starting docker service +# - name: check docker version shell: docker --version @@ -36,6 +37,14 @@ tags: - prebake-for-dev +- name: setup iptables for docker + shell: > + ( iptables -L INPUT | grep "{{ docker_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ docker_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ docker_api_port }}" + - name: copy systemd units for docker(enable cluster store) (debian) template: src=docker-svc.j2 dest=/lib/systemd/system/docker.service when: ansible_os_family == "Debian" @@ -46,19 +55,36 @@ - name: check docker-tcp socket state shell: systemctl status docker-tcp.socket | grep 'Active.*active' -o + ignore_errors: true register: docker_tcp_socket_state +- include: create_docker_device.yml + when: docker_device != "" + - name: copy systemd units for docker tcp socket settings template: src=docker-tcp.j2 dest=/etc/systemd/system/docker-tcp.socket register: docker_tcp_socket # tcp socket service requires docker service to be started after it -- name: start docker tcp socket service - shell: sudo systemctl daemon-reload && sudo systemctl stop docker && sudo systemctl start docker-tcp.socket && sudo systemctl start docker +- name: reload systemd configuration + shell: sudo systemctl daemon-reload + when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')" + +- name: stop docker + service: + name: docker + state: stopped + when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')" + +- name: start docker-tcp service + service: + name: docker-tcp.socket + state: started when: "(docker_tcp_socket | changed) or (docker_tcp_socket_state.stdout != 'Active: active')" - name: check docker service state shell: sudo systemctl status docker | grep 'Active.*active' -o + ignore_errors: true register: docker_service_state tags: - prebake-for-dev @@ -69,13 +95,38 @@ # XXX: service module doesn't do daemon-reload yet, so need to use shell module here # https://github.com/ansible/ansible-modules-core/issues/191 -- name: restart docker +- name: reload docker systemd configuration #service: name=docker state=restarted - shell: sudo systemctl daemon-reload && sudo systemctl restart docker + shell: sudo systemctl daemon-reload when: "(docker_service_state.stderr | match('.*docker.service changed on disk.*')) or (docker_service_state.stdout != 'Active: active')" tags: - prebake-for-dev +# XXX: this needs to happen twice after setting up the thin provisioner because +# of some docker bug I've not investigated. +- name: restart docker (first time) + service: + name: docker + state: restarted + when: thin_provisioned|changed + ignore_errors: true + tags: + - prebake-for-dev + +- name: ensure docker is started + service: + name: docker + state: started + tags: + - prebake-for-dev + +- stat: path=/var/docker_images + register: docker_images + +- name: Import saved docker images + shell: set -e; cd /var/docker_images; images=$(ls); for i in $images; do docker load -i $i; rm $i; done + when: docker_images.stat.isdir is defined and docker_images.stat.isdir + - name: check docker-compose version shell: docker-compose --version register: docker_compose_version diff --git a/vendor/ansible/roles/docker/templates/docker-svc.j2 b/vendor/ansible/roles/docker/templates/docker-svc.j2 index 8511784..4a1d87f 100644 --- a/vendor/ansible/roles/docker/templates/docker-svc.j2 +++ b/vendor/ansible/roles/docker/templates/docker-svc.j2 @@ -6,7 +6,11 @@ Requires=docker.socket [Service] Type=notify -ExecStart=/usr/bin/docker daemon -s overlay -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }} +{% if docker_device != "" %} +ExecStart=/usr/bin/docker daemon --storage-opt dm.thinpooldev=/dev/mapper/contiv-dockerthin -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }} +{% else %} +ExecStart=/usr/bin/docker daemon -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }} +{% endif %} MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 diff --git a/vendor/ansible/roles/docker/vars/main.yml b/vendor/ansible/roles/docker/vars/main.yml new file mode 100644 index 0000000..21e7e7d --- /dev/null +++ b/vendor/ansible/roles/docker/vars/main.yml @@ -0,0 +1,2 @@ +--- +thin_provisioned: false diff --git a/vendor/ansible/roles/etcd/defaults/main.yml b/vendor/ansible/roles/etcd/defaults/main.yml index fdaa933..9f38316 100644 --- a/vendor/ansible/roles/etcd/defaults/main.yml +++ b/vendor/ansible/roles/etcd/defaults/main.yml @@ -6,9 +6,9 @@ etcd_client_port2: 4001 etcd_peer_port1: 2380 etcd_peer_port2: 7001 etcd_peers_group: "service-master" -etcd_peer_interface: "{{ control_interface }}" etcd_init_cluster: true etcd_tmp_filename: "/tmp/etcd.existing" +etcd_rule_comment: "etcd traffic" # following variables are used in one or more roles, but have no good default value to pick from. # Leaving them as commented so that playbooks can fail early with variable not defined error. diff --git a/vendor/ansible/roles/etcd/tasks/cleanup.yml b/vendor/ansible/roles/etcd/tasks/cleanup.yml index e505daa..73cda0d 100644 --- a/vendor/ansible/roles/etcd/tasks/cleanup.yml +++ b/vendor/ansible/roles/etcd/tasks/cleanup.yml @@ -6,3 +6,12 @@ - name: remove the temp etcd file file: name={{ etcd_tmp_filename }} state=absent + +- name: cleanup iptables for etcd + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ etcd_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ etcd_client_port1 }}" + - "{{ etcd_client_port2 }}" + - "{{ etcd_peer_port1 }}" + - "{{ etcd_peer_port2 }}" diff --git a/vendor/ansible/roles/etcd/tasks/main.yml b/vendor/ansible/roles/etcd/tasks/main.yml index 4f6a4d3..9d4bb34 100644 --- a/vendor/ansible/roles/etcd/tasks/main.yml +++ b/vendor/ansible/roles/etcd/tasks/main.yml @@ -15,6 +15,17 @@ tags: - prebake-for-dev +- name: setup iptables for etcd + shell: > + ( iptables -L INPUT | grep "{{ etcd_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ etcd_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ etcd_client_port1 }}" + - "{{ etcd_client_port2 }}" + - "{{ etcd_peer_port1 }}" + - "{{ etcd_peer_port2 }}" + # The second part of the condition avoids reconfiguring master if it was already present in the host-group - name: copy the etcd start/stop script template: src=etcd.j2 dest=/usr/bin/etcd.sh mode=u=rwx,g=rx,o=rx @@ -25,3 +36,5 @@ - name: start etcd service: name=etcd state=started + + diff --git a/vendor/ansible/roles/etcd/templates/etcd.j2 b/vendor/ansible/roles/etcd/templates/etcd.j2 index dbb75bb..1635a94 100644 --- a/vendor/ansible/roles/etcd/templates/etcd.j2 +++ b/vendor/ansible/roles/etcd/templates/etcd.j2 @@ -41,9 +41,9 @@ start) export ETCD_INITIAL_CLUSTER=" {%- for host in groups[etcd_peers_group] -%} {%- if loop.last -%} - {{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + etcd_peer_interface]['ipv4']['address'] }}:{{ etcd_peer_port1 }},{{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + etcd_peer_interface]['ipv4']['address'] }}:{{ etcd_peer_port2 }} + {{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + hostvars[host]['control_interface']]['ipv4']['address'] }}:{{ etcd_peer_port1 }},{{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + hostvars[host]['control_interface']]['ipv4']['address'] }}:{{ etcd_peer_port2 }} {%- else -%} - {{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + etcd_peer_interface]['ipv4']['address'] }}:{{ etcd_peer_port1 }},{{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + etcd_peer_interface]['ipv4']['address'] }}:{{ etcd_peer_port2 }}, + {{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + hostvars[host]['control_interface']]['ipv4']['address'] }}:{{ etcd_peer_port1 }},{{ hostvars[host]['inventory_hostname'] }}=http://{{ hostvars[host]['ansible_' + hostvars[host]['control_interface']]['ipv4']['address'] }}:{{ etcd_peer_port2 }}, {%- endif -%} {% endfor -%} " @@ -59,7 +59,7 @@ start) {%- endif %} {% endfor -%} {% if peers %} - {% set peer_addr=hostvars[peers[0]]['ansible_' + etcd_peer_interface]['ipv4']['address'] -%} + {% set peer_addr=hostvars[peers[0]]['ansible_' + hostvars[peers[0]]['control_interface']]['ipv4']['address'] -%} {{ add_member(peer_addr=peer_addr) }} {%- else -%} {# This condition shall not arise, so fail early #} diff --git a/vendor/ansible/roles/serf/files/serf.service b/vendor/ansible/roles/serf/files/serf.service index 4b4e874..d502b4c 100644 --- a/vendor/ansible/roles/serf/files/serf.service +++ b/vendor/ansible/roles/serf/files/serf.service @@ -4,6 +4,7 @@ After=auditd.service systemd-user-sessions.service time-sync.target [Service] ExecStart=/usr/bin/serf.sh start +ExecStop=/usr/bin/serf.sh stop Restart=on-failure RestartSec=10 KillMode=control-group diff --git a/vendor/ansible/roles/serf/tasks/main.yml b/vendor/ansible/roles/serf/tasks/main.yml index 3d525f8..3e8cffb 100644 --- a/vendor/ansible/roles/serf/tasks/main.yml +++ b/vendor/ansible/roles/serf/tasks/main.yml @@ -1,6 +1,23 @@ --- # This role contains tasks for configuring and starting serf service +- name: install lshw (debian) + apt: + name: "{{ item }}" + state: latest + with_items: + - lshw + when: ansible_os_family == "Debian" + +- name: install/upgrade base packages (redhat) + yum: + name: "{{ item }}" + update_cache: true + state: latest + with_items: + - lshw + when: ansible_os_family == "RedHat" + - name: download serf binary get_url: validate_certs: "{{ validate_certs }}" diff --git a/vendor/ansible/roles/serf/templates/serf.j2 b/vendor/ansible/roles/serf/templates/serf.j2 index 162341a..61edcf4 100644 --- a/vendor/ansible/roles/serf/templates/serf.j2 +++ b/vendor/ansible/roles/serf/templates/serf.j2 @@ -6,19 +6,38 @@ if [ $# -ne 1 ]; then exit 1 fi +{% set mdns_sport_comment="'serf discovery sport'" -%} +{%- set mdns_sport_rule="-p udp --sport 5353 -i " + + serf_discovery_interface + + " -j ACCEPT -m comment --comment " + + mdns_sport_comment -%} +{%- set mdns_dport_comment="'serf discovery dport'" -%} +{%- set mdns_dport_rule="-p udp --dport 5353 -i " + + serf_discovery_interface + + " -j ACCEPT -m comment --comment " + + mdns_dport_comment -%} +{%- set serf_tcp_comment="'serf control'" -%} +{%- set serf_tcp_rule="-p tcp --dport 7946 -i " + + serf_discovery_interface + + " -j ACCEPT -m comment --comment " + + serf_tcp_comment -%} + case $1 in start) # fail on error set -e - # install necessary iptables to let mdns work - # XXX: the interface name should be discovered - echo setting up iptables for mdns - iptables -I INPUT -p udp --dport 5353 -i {{ serf_discovery_interface }} -j ACCEPT && \ - iptables -I INPUT -p udp --sport 5353 -i {{ serf_discovery_interface }} -j ACCEPT + # install necessary iptables to let serf work + echo setting up iptables for serf + ( /sbin/iptables -L INPUT | grep {{ mdns_sport_comment }} || \ + /sbin/iptables -I INPUT 1 {{ mdns_sport_rule }} ) + ( /sbin/iptables -L INPUT | grep {{ mdns_dport_comment }} || \ + /sbin/iptables -I INPUT 1 {{ mdns_dport_rule }} ) + ( /sbin/iptables -L INPUT | grep {{ serf_tcp_comment }} || \ + /sbin/iptables -I INPUT 1 {{ serf_tcp_rule }} ) echo starting serf - label=$(hostname) + label=$(hostname -s) serial=$(lshw -c system | grep serial | awk '{print $2}') addr=$(ip addr list dev {{ serf_discovery_interface }} | \ grep inet | grep {{ serf_discovery_interface }} | \ @@ -29,12 +48,19 @@ start) fi # start serf - serf agent -discover mycluster -iface eth1 \ + /usr/bin/serf agent -node="$label-$serial" -discover mycluster -iface {{ serf_discovery_interface }} \ -tag NodeLabel=$label \ -tag NodeSerial=$serial \ -tag NodeAddr=$addr ;; +stop) + # cleanup iptables + /sbin/iptables -D INPUT {{ mdns_sport_rule }} + /sbin/iptables -D INPUT {{ mdns_dport_rule }} + /sbin/iptables -D INPUT {{ serf_tcp_rule }} + ;; + *) echo USAGE: $usage exit 1 diff --git a/vendor/ansible/roles/swarm/defaults/main.yml b/vendor/ansible/roles/swarm/defaults/main.yml index eedb394..6db3841 100644 --- a/vendor/ansible/roles/swarm/defaults/main.yml +++ b/vendor/ansible/roles/swarm/defaults/main.yml @@ -3,3 +3,4 @@ # swarm_api_port: 2375 swarm_version: "1.1.2" +swarm_rule_comment: "swarm traffic" diff --git a/vendor/ansible/roles/swarm/tasks/cleanup.yml b/vendor/ansible/roles/swarm/tasks/cleanup.yml index 85707ce..0d23cd6 100644 --- a/vendor/ansible/roles/swarm/tasks/cleanup.yml +++ b/vendor/ansible/roles/swarm/tasks/cleanup.yml @@ -3,3 +3,9 @@ - name: stop swarm service: name=swarm state=stopped + +- name: cleanup iptables for swarm + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ swarm_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ swarm_api_port }}" diff --git a/vendor/ansible/roles/swarm/tasks/main.yml b/vendor/ansible/roles/swarm/tasks/main.yml index 5ce5ab2..8b27e33 100644 --- a/vendor/ansible/roles/swarm/tasks/main.yml +++ b/vendor/ansible/roles/swarm/tasks/main.yml @@ -1,10 +1,25 @@ --- # This role contains tasks for configuring and starting swarm service +- name: check for swarm image + shell: "docker images | grep swarm | grep -q {{ swarm_version }}" + ignore_errors: true + register: swarm_exists + tags: + - prebake-for-dev - name: download swarm container image shell: docker pull swarm:{{ swarm_version }} tags: - prebake-for-dev + when: not swarm_exists|success + +- name: setup iptables for swarm + shell: > + ( iptables -L INPUT | grep "{{ swarm_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ swarm_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ swarm_api_port }}" - name: copy the swarm start/stop script template: src=swarm.j2 dest=/usr/bin/swarm.sh mode=u=rwx,g=rx,o=rx diff --git a/vendor/ansible/roles/ucarp/files/ucarp/vip_up.sh b/vendor/ansible/roles/ucarp/files/ucarp/vip_up.sh index 5385199..46327a6 100644 --- a/vendor/ansible/roles/ucarp/files/ucarp/vip_up.sh +++ b/vendor/ansible/roles/ucarp/files/ucarp/vip_up.sh @@ -13,7 +13,6 @@ vip=$2 /sbin/ip link add name ${intf}_0 type dummy -# XXX: the subnet needs to be derived from underlying parent interface -/sbin/ip addr add ${vip}/24 dev ${intf}_0 +/sbin/ip addr add ${vip} dev ${intf}_0 /sbin/ip link set dev ${intf}_0 up diff --git a/vendor/ansible/roles/ucp/defaults/main.yml b/vendor/ansible/roles/ucp/defaults/main.yml index 80c0c32..6f39317 100644 --- a/vendor/ansible/roles/ucp/defaults/main.yml +++ b/vendor/ansible/roles/ucp/defaults/main.yml @@ -8,7 +8,14 @@ ucp_instance_id_file: "ucp-instance-id" ucp_fingerprint_file: "ucp-fingerprint" ucp_fifo_file: "ucp-fifo" ucp_bootstrap_node_name: "" - ucp_admin_user: "admin" ucp_admin_password: "orca" ucp_controller_replica: "--replica" +ucp_rule_comment: "ucp traffic" +ucp_port1: "12376" +ucp_port2: "12379" +ucp_port3: "12380" +ucp_port4: "12381" +ucp_port5: "12382" +ucp_swarm_port: "2376" +ucp_controller_port: "443" diff --git a/vendor/ansible/roles/ucp/tasks/cleanup.yml b/vendor/ansible/roles/ucp/tasks/cleanup.yml index 0cd867c..b7258a9 100644 --- a/vendor/ansible/roles/ucp/tasks/cleanup.yml +++ b/vendor/ansible/roles/ucp/tasks/cleanup.yml @@ -10,3 +10,15 @@ - "{{ ucp_fingerprint_file }}" - "{{ ucp_instance_id_file }}" - "{{ ucp_fifo_file }}" + +- name: cleanup iptables for ucp + shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ ucp_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ ucp_port1 }}" + - "{{ ucp_port2 }}" + - "{{ ucp_port3 }}" + - "{{ ucp_port4 }}" + - "{{ ucp_port5 }}" + - "{{ ucp_swarm_port }}" + - "{{ ucp_controller_port }}" diff --git a/vendor/ansible/roles/ucp/tasks/main.yml b/vendor/ansible/roles/ucp/tasks/main.yml index 0519b18..5cd4dea 100644 --- a/vendor/ansible/roles/ucp/tasks/main.yml +++ b/vendor/ansible/roles/ucp/tasks/main.yml @@ -9,6 +9,20 @@ docker/ucp \ images --image-version={{ ucp_version }} +- name: setup iptables for ucp + shell: > + ( iptables -L INPUT | grep "{{ ucp_rule_comment }} ({{ item }})" ) || \ + iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ ucp_rule_comment }} ({{ item }})" + become: true + with_items: + - "{{ ucp_port1 }}" + - "{{ ucp_port2 }}" + - "{{ ucp_port3 }}" + - "{{ ucp_port4 }}" + - "{{ ucp_port5 }}" + - "{{ ucp_swarm_port }}" + - "{{ ucp_controller_port }}" + - name: copy the ucp files to worker nodes copy: src: "{{ ucp_local_dir }}/{{ item }}" diff --git a/vendor/ansible/roles/ucp/templates/ucp.j2 b/vendor/ansible/roles/ucp/templates/ucp.j2 index bc12b34..128587f 100644 --- a/vendor/ansible/roles/ucp/templates/ucp.j2 +++ b/vendor/ansible/roles/ucp/templates/ucp.j2 @@ -16,8 +16,8 @@ start) out=$(/usr/bin/docker run --rm -t --name ucp \ -v /var/run/docker.sock:/var/run/docker.sock \ docker/ucp install --host-address={{ node_addr }} \ - --image-version={{ ucp_version }}) - echo ${out} + --swarm-port={{ ucp_swarm_port }} --controller-port={{ ucp_controller_port }} \ + --image-version={{ ucp_version }} | tee /dev/stdout) # copy out the instance ID instanceId=$(echo ${out} | egrep -o 'UCP instance ID: [a-zA-Z0-9:_]*' | \ @@ -42,6 +42,7 @@ start) -v /var/run/docker.sock:/var/run/docker.sock \ -e UCP_ADMIN_USER={{ ucp_admin_user }} -e UCP_ADMIN_PASSWORD={{ ucp_admin_password }} \ docker/ucp join --host-address={{ node_addr }} \ + --swarm-port={{ ucp_swarm_port }} --controller-port={{ ucp_controller_port }} \ --image-version={{ ucp_version }} \ --url="https://{{ service_vip }}:443" \ {% if run_as == "master" -%} diff --git a/vendor/ansible/site.yml b/vendor/ansible/site.yml index 9d5fad5..b057d64 100644 --- a/vendor/ansible/site.yml +++ b/vendor/ansible/site.yml @@ -59,11 +59,11 @@ - { role: ucarp } - { role: docker } - { role: etcd, run_as: master } - #- { role: ceph-mon, mon_group_name: service-master } - #- { role: ceph-osd, mon_group_name: service-master, osd_group_name: service-master } - - { role: scheduler_stack, run_as: master } - - { role: contiv_network, run_as: master } - #- { role: contiv_storage, run_as: master } +# - { role: ceph-mon, mon_group_name: service-master, when: host_capability|match('.*can-run-user-containers.*') } +# - { role: ceph-osd, mon_group_name: service-master, osd_group_name: service-master, when: host_capability|match('.*storage.*') } + - { role: scheduler_stack, run_as: master, when: host_capability|match('.*can-run-user-containers.*') } + - { role: contiv_network, run_as: master, when: host_capability|match('.*can-run-user-containers.*') } +# - { role: contiv_storage, run_as: master } # service-worker hosts correspond to cluster machines that run the worker/driver # logic of the infra services. @@ -74,10 +74,10 @@ - { role: base } - { role: docker } - { role: etcd, run_as: worker } - #- { role: ceph-osd, mon_group_name: service-master, osd_group_name: service-worker } - - { role: scheduler_stack, run_as: worker } - - { role: contiv_network, run_as: worker } - #- { role: contiv_storage, run_as: worker } +# - { role: ceph-osd, mon_group_name: service-master, osd_group_name: service-worker, when: host_capability|match('.*storage.*') } + - { role: scheduler_stack, run_as: worker, when: host_capability|match('.*can-run-user-containers.*') } + - { role: contiv_network, run_as: worker, when: host_capability|match('.*can-run-user-containers.*') } +# - { role: contiv_storage, run_as: worker } # netplugin-node hosts set up netmast/netplugin in a cluster - hosts: netplugin-node