diff --git a/Vagrantfile b/Vagrantfile index b303d0a..1b162dd 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -49,6 +49,9 @@ ansible_extra_vars = { "env" => host_env, "service_vip" => "#{base_ip}252", "validate_certs" => "no", + "control_interface" => "eth1", + "netplugin_if" => "eth2", + "docker_version" => "1.10.1", } ansible_extra_vars = ansible_extra_vars.merge(ceph_vars) @@ -79,17 +82,21 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # use a private key from within the repo for demo environment. This is used for # pushing configuration config.ssh.private_key_path = "./management/src/demo/files/insecure_private_key" - num_nodes.times do |n| + (0..num_nodes-1).reverse_each do |n| node_name = node_names[n] node_addr = node_ips[n] node_vars = { "etcd_master_addr" => node_ips[0], "etcd_master_name" => node_names[0], + "swarm_bootstrap_node_addr" => node_ips[0], + "ucp_bootstrap_node_addr" => node_ips[0], } config.vm.define node_name do |node| node.vm.hostname = node_name # create an interface for cluster (control) traffic node.vm.network :private_network, ip: node_addr, virtualbox__intnet: "true" + # create an interface for cluster (data) traffic + node.vm.network :private_network, ip: "0.0.0.0", virtualbox__intnet: "true" node.vm.provider "virtualbox" do |v| # give enough ram and memory for docker to run fine v.customize ['modifyvm', :id, '--memory', "4096"] @@ -99,7 +106,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # which are used by default by virtualbox v.customize ['modifyvm', :id, '--nictype1', 'virtio'] v.customize ['modifyvm', :id, '--nictype2', 'virtio'] + v.customize ['modifyvm', :id, '--nictype3', 'virtio'] v.customize ['modifyvm', :id, '--nicpromisc2', 'allow-all'] + v.customize ['modifyvm', :id, '--nicpromisc3', 'allow-all'] # create disks for ceph (0..1).each do |d| disk_path = "disk-#{n}-#{d}" @@ -118,6 +127,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| '--medium', vdi_disk_path] end end + + # provision base packages needed for cluster management + if ansible_groups["cluster-node"] == nil then + ansible_groups["cluster-node"] = [ ] + end + ansible_groups["cluster-node"] << node_name + # The first vm stimulates the first manually **configured** nodes # in a cluster if n == 0 then @@ -134,9 +150,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| # add this node to cluster-control host group ansible_groups["cluster-control"] = [node_name] - node.vm.provision "shell" do |s| - s.inline = shell_provision - end end if service_init @@ -144,9 +157,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| node.vm.synced_folder "shared", "/shared" ansible_extra_vars = ansible_extra_vars.merge(node_vars) - if n == 0 then + if n <= 2 then # if we are bringing up services as part of the cluster, then start - # master services on the first vm + # master services on the first three vms if ansible_groups["service-master"] == nil then ansible_groups["service-master"] = [ ] end @@ -162,13 +175,18 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| end # Run the provisioner after all machines are up - if n == (num_nodes - 1) then + if n == 0 then node.vm.provision 'ansible' do |ansible| ansible.groups = ansible_groups ansible.playbook = ansible_playbook ansible.extra_vars = ansible_extra_vars ansible.limit = 'all' end + # run shell provisioner for first node to correctly mount dev + # binaries if needed + node.vm.provision "shell" do |s| + s.inline = shell_provision + end end end end diff --git a/management/src/clusterm/manager/utils.go b/management/src/clusterm/manager/utils.go index 1c98f5f..386b2ae 100644 --- a/management/src/clusterm/manager/utils.go +++ b/management/src/clusterm/manager/utils.go @@ -3,6 +3,7 @@ package manager import ( "fmt" + log "github.com/Sirupsen/logrus" "github.com/contiv/cluster/management/src/inventory" ) @@ -34,6 +35,7 @@ func (m *Manager) isMasterNode(name string) (bool, error) { if n.cInfo == nil { return false, nodeConfigNotExistsError(name) } + log.Debugf("node: %q, group: %q", name, n.cInfo.GetGroup()) return n.cInfo.GetGroup() == ansibleMasterGroupName, nil } @@ -45,6 +47,7 @@ func (m *Manager) isWorkerNode(name string) (bool, error) { if n.cInfo == nil { return false, nodeConfigNotExistsError(name) } + log.Debugf("node: %q, group: %q", name, n.cInfo.GetGroup()) return n.cInfo.GetGroup() == ansibleWorkerGroupName, nil } @@ -57,5 +60,6 @@ func (m *Manager) isDiscoveredAndAllocatedNode(name string) (bool, error) { return false, nodeInventoryNotExistsError(name) } status, state := n.iInfo.GetStatus() + log.Debugf("node: %q, status: %q, state: %q", name, status, state) return state == inventory.Discovered && status == inventory.Allocated, nil } diff --git a/management/src/systemtests/cli_test.go b/management/src/systemtests/cli_test.go index 5a7ff59..d1c7601 100644 --- a/management/src/systemtests/cli_test.go +++ b/management/src/systemtests/cli_test.go @@ -112,9 +112,9 @@ func (s *CliTestSuite) TearDownSuite(c *C) { func (s *CliTestSuite) nukeNodeInCollins(c *C, nodeName string) { // Ignore errors here as asset might not exist. out, err := s.tbn1.RunCommandWithOutput(fmt.Sprintf(`curl --basic -u blake:admin:first -d status="Decommissioned" -d reason="test" -X POST http://localhost:9000/api/asset/%s`, nodeName)) - c.Logf("asset decommission result: %s. Output: %s", err, out) + c.Logf("asset decommission result: %v. Output: %s", err, out) out, err = s.tbn1.RunCommandWithOutput(fmt.Sprintf(`curl --basic -u blake:admin:first -d reason=test -X DELETE http://localhost:9000/api/asset/%s`, nodeName)) - c.Logf("asset deletion result: %s. Output: %s", err, out) + c.Logf("asset deletion result: %v. Output: %s", err, out) } func (s *CliTestSuite) SetUpTest(c *C) { @@ -125,9 +125,9 @@ func (s *CliTestSuite) SetUpTest(c *C) { //cleanup an existing dummy file, if any that our test ansible will create. Ignore error, if any. file := dummyAnsibleFile out, err := s.tbn1.RunCommandWithOutput(fmt.Sprintf("rm %s", file)) - c.Logf("dummy file cleanup. Error: %s, Output: %s", err, out) + c.Logf("dummy file cleanup. Error: %v, Output: %s", err, out) out, err = s.tbn2.RunCommandWithOutput(fmt.Sprintf("rm %s", file)) - c.Logf("dummy file cleanup. Error: %s, Output: %s", err, out) + c.Logf("dummy file cleanup. Error: %v, Output: %s", err, out) // XXX: we cleanup up assets from collins instead of restarting it to save test time. for _, name := range validNodeNames { diff --git a/vendor/ansible/Vagrantfile b/vendor/ansible/Vagrantfile index c64e5a9..dd6242b 100644 --- a/vendor/ansible/Vagrantfile +++ b/vendor/ansible/Vagrantfile @@ -23,6 +23,7 @@ ansible_tags = ENV["CONTIV_ANSIBLE_TAGS"] || "prebake-for-dev" ansible_extra_vars = { "env" => host_env, "validate_certs" => "no", + "control_interface" => "eth1", } puts "Host environment: #{host_env}" diff --git a/vendor/ansible/roles/base/tasks/os_agnostic_tasks.yml b/vendor/ansible/roles/base/tasks/os_agnostic_tasks.yml index 41876fe..efcd540 100644 --- a/vendor/ansible/roles/base/tasks/os_agnostic_tasks.yml +++ b/vendor/ansible/roles/base/tasks/os_agnostic_tasks.yml @@ -1,7 +1,7 @@ - name: download consul binary get_url: validate_certs: "{{ validate_certs }}" - url: https://dl.bintray.com/mitchellh/consul/0.5.2_linux_amd64.zip + url: https://releases.hashicorp.com/consul/0.5.2/consul_0.5.2_linux_amd64.zip dest: /tmp/consul.zip - name: install consul diff --git a/vendor/ansible/roles/contiv_cluster/defaults/main.yml b/vendor/ansible/roles/contiv_cluster/defaults/main.yml new file mode 100644 index 0000000..6188d39 --- /dev/null +++ b/vendor/ansible/roles/contiv_cluster/defaults/main.yml @@ -0,0 +1,11 @@ +--- +# role variable for the cluster manager service + +collins_image: contiv/collins:01_25_2016 +collins_host_port: 9000 +collins_guest_port: 9000 + +contiv_cluster_version: "v0.0.0-01-05-2016.22-21-32.UTC" +contiv_cluster_tar_file: "cluster-{{ contiv_cluster_version }}.tar.bz2" +contiv_cluster_src_file: "https://github.com/contiv/cluster/releases/download/{{ contiv_cluster_version }}/{{ contiv_cluster_tar_file }}" +contiv_cluster_dest_file: "/tmp/{{ contiv_cluster_tar_file }}" diff --git a/vendor/ansible/roles/contiv_cluster/tasks/main.yml b/vendor/ansible/roles/contiv_cluster/tasks/main.yml index b49dea9..ca8195b 100644 --- a/vendor/ansible/roles/contiv_cluster/tasks/main.yml +++ b/vendor/ansible/roles/contiv_cluster/tasks/main.yml @@ -22,15 +22,16 @@ - name: download clusterm get_url: validate_certs: "{{ validate_certs }}" - url: https://github.com/contiv/cluster/releases/download/v0.0.0-01-05-2016.22-21-32.UTC/cluster-v0.0.0-01-05-2016.22-21-32.UTC.tar.bz2 - dest: /tmp/cluster-v0.0.0-01-05-2016.22-21-32.UTC.tar.bz2 + url: "{{ contiv_cluster_src_file }}" + dest: "{{ contiv_cluster_dest_file }}" force: no + register: download_result - name: install clusterm - shell: tar vxjf /tmp/cluster-v0.0.0-01-05-2016.22-21-32.UTC.tar.bz2 + shell: tar vxjf {{ contiv_cluster_dest_file }} args: chdir: /usr/bin/ - creates: clusterm + when: download_result | changed - name: copy environment file for clusterm copy: src=clusterm dest=/etc/default/clusterm diff --git a/vendor/ansible/roles/contiv_cluster/vars/main.yml b/vendor/ansible/roles/contiv_cluster/vars/main.yml deleted file mode 100644 index 5587596..0000000 --- a/vendor/ansible/roles/contiv_cluster/vars/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# role variable for the cluster manager service - -collins_image: contiv/collins:01_25_2016 -collins_host_port: 9000 -collins_guest_port: 9000 diff --git a/vendor/ansible/roles/contiv_network/defaults/main.yml b/vendor/ansible/roles/contiv_network/defaults/main.yml index b13d8c8..7a6b384 100644 --- a/vendor/ansible/roles/contiv_network/defaults/main.yml +++ b/vendor/ansible/roles/contiv_network/defaults/main.yml @@ -7,3 +7,8 @@ contiv_network_mode: "standalone" # Accepted values: standalone, aci netplugin_mode: "docker" # Accepted values: docker, kubernetes fwd_mode: "bridge" #Accepted values: bridge , routing + +contiv_network_version: "v0.1-02-06-2016.14-42-05.UTC" +contiv_network_tar_file: "netplugin-{{ contiv_network_version }}.tar.bz2" +contiv_network_src_file: "https://github.com/contiv/netplugin/releases/download/{{ contiv_network_version }}/{{ contiv_network_tar_file }}" +contiv_network_dest_file: "/tmp/{{ contiv_network_tar_file }}" diff --git a/vendor/ansible/roles/contiv_network/tasks/main.yml b/vendor/ansible/roles/contiv_network/tasks/main.yml index 93c6db9..7f2ebb6 100644 --- a/vendor/ansible/roles/contiv_network/tasks/main.yml +++ b/vendor/ansible/roles/contiv_network/tasks/main.yml @@ -20,20 +20,21 @@ - name: download netmaster and netplugin get_url: validate_certs: "{{ validate_certs }}" - url: https://github.com/contiv/netplugin/releases/download/v0.1-01-28-2016.03-55-05.UTC/netplugin-v0.1-01-28-2016.03-55-05.UTC.tar.bz2 - dest: /tmp/contivnet.tar.bz2 + url: "{{ contiv_network_src_file }}" + dest: "{{ contiv_network_dest_file }}" + register: download_result - name: ensure netplugin directory exists file: path=/usr/bin/contiv/netplugin state=directory - name: install netmaster and netplugin - shell: tar vxjf /tmp/contivnet.tar.bz2 + shell: tar vxjf {{ contiv_network_dest_file }} args: chdir: /usr/bin/contiv/netplugin - creates: netmaster + when: download_result | changed - name: create links for netplugin binaries - file: src=/usr/bin/contiv/netplugin/{{ item }} dest=/usr/bin/{{ item }} state=link + file: src=/usr/bin/contiv/netplugin/{{ item }} dest=/usr/bin/{{ item }} state=link force=yes with_items: - netctl - netmaster diff --git a/vendor/ansible/roles/contiv_storage/defaults/main.yml b/vendor/ansible/roles/contiv_storage/defaults/main.yml new file mode 100644 index 0000000..9c8989b --- /dev/null +++ b/vendor/ansible/roles/contiv_storage/defaults/main.yml @@ -0,0 +1,8 @@ +--- + +# Role defaults for contiv_storage + +contiv_storage_version: "v0.0.0-12-14-2015.00-48-49.UTC" +contiv_storage_tar_file: "volplugin-{{ contiv_storage_version }}.tar.bz2" +contiv_storage_src_file: "https://github.com/contiv/volplugin/releases/download/{{ contiv_storage_version }}/{{ contiv_storage_tar_file }}" +contiv_storage_dest_file: "/tmp/{{ contiv_storage_tar_file }}" diff --git a/vendor/ansible/roles/contiv_storage/tasks/main.yml b/vendor/ansible/roles/contiv_storage/tasks/main.yml index e780698..2aab7f6 100644 --- a/vendor/ansible/roles/contiv_storage/tasks/main.yml +++ b/vendor/ansible/roles/contiv_storage/tasks/main.yml @@ -4,14 +4,15 @@ - name: download storage service binaries get_url: validate_certs: "{{ validate_certs }}" - url: https://github.com/contiv/volplugin/releases/download/v0.0.0-12-14-2015.00-48-49.UTC/volplugin-v0.0.0-12-14-2015.00-48-49.UTC.tar.bz2 - dest: /tmp/contivvol.tar.bz2 + url: "{{ contiv_storage_src_file }}" + dest: "{{ contiv_storage_dest_file }}" + register: download_result - name: install storage service - shell: tar vxjf /tmp/contivvol.tar.bz2 + shell: tar vxjf {{ contiv_storage_dest_file }} args: chdir: /usr/bin/ - creates: volmaster + when: download_result | changed - name: copy environment file for volmaster copy: src=volmaster dest=/etc/default/volmaster diff --git a/vendor/ansible/roles/docker/defaults/main.yml b/vendor/ansible/roles/docker/defaults/main.yml new file mode 100644 index 0000000..1d767ae --- /dev/null +++ b/vendor/ansible/roles/docker/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# Default values for docker role + +docker_api_port: 2385 + +docker_version: 1.9.1 diff --git a/vendor/ansible/roles/docker/tasks/main.yml b/vendor/ansible/roles/docker/tasks/main.yml index fd1b52c..9bd4adf 100644 --- a/vendor/ansible/roles/docker/tasks/main.yml +++ b/vendor/ansible/roles/docker/tasks/main.yml @@ -1,9 +1,28 @@ --- # This role contains tasks for configuring and starting docker service -# XXX: figure a way to download a specific docker version -- name: install docker - shell: creates=/usr/bin/docker curl https://get.docker.com | bash +- name: check docker version + shell: docker --version + register: docker_installed_version + ignore_errors: yes + tags: + - prebake-for-dev + +- name: install docker (debian) + shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash + when: (ansible_os_family == "Debian") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*")) + tags: + - prebake-for-dev + +- name: remove docker (redhat) + yum: name=docker-engine state=absent + when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*")) + tags: + - prebake-for-dev + +- name: install docker (redhat) + shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash + when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*")) tags: - prebake-for-dev @@ -35,7 +54,7 @@ # tcp socket service requires docker service to be started after it - name: start docker tcp socket service - shell: sudo systemctl stop docker && sudo systemctl start docker-tcp.socket && sudo systemctl start docker + shell: sudo systemctl daemon-reload && sudo systemctl stop docker && sudo systemctl start docker-tcp.socket && sudo systemctl start docker when: (docker_tcp_socket | changed) or not docker_tcp_socket_state.stdout | match("Active.* active") - name: remove the docker key file, if any. It shall be regenerated by docker on restart diff --git a/vendor/ansible/roles/docker/templates/docker-svc.j2 b/vendor/ansible/roles/docker/templates/docker-svc.j2 index f5e66f2..8511784 100644 --- a/vendor/ansible/roles/docker/templates/docker-svc.j2 +++ b/vendor/ansible/roles/docker/templates/docker-svc.j2 @@ -6,7 +6,7 @@ Requires=docker.socket [Service] Type=notify -ExecStart=/usr/bin/docker daemon -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }} +ExecStart=/usr/bin/docker daemon -s overlay -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }} MountFlags=slave LimitNOFILE=1048576 LimitNPROC=1048576 diff --git a/vendor/ansible/roles/docker/vars/main.yml b/vendor/ansible/roles/docker/vars/main.yml deleted file mode 100644 index d755c51..0000000 --- a/vendor/ansible/roles/docker/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# role variable for the swarm service - -docker_api_port: 2385 diff --git a/vendor/ansible/roles/etcd/vars/main.yml b/vendor/ansible/roles/etcd/defaults/main.yml similarity index 100% rename from vendor/ansible/roles/etcd/vars/main.yml rename to vendor/ansible/roles/etcd/defaults/main.yml diff --git a/vendor/ansible/roles/serf/defaults/main.yml b/vendor/ansible/roles/serf/defaults/main.yml new file mode 100644 index 0000000..37c5a09 --- /dev/null +++ b/vendor/ansible/roles/serf/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# role variable for the serf service + +serf_discovery_interface: "{{ control_interface }}" diff --git a/vendor/ansible/roles/serf/tasks/main.yml b/vendor/ansible/roles/serf/tasks/main.yml index 18d95dc..009f5eb 100644 --- a/vendor/ansible/roles/serf/tasks/main.yml +++ b/vendor/ansible/roles/serf/tasks/main.yml @@ -4,10 +4,8 @@ - name: download serf binary get_url: validate_certs: "{{ validate_certs }}" - url: https://dl.bintray.com/mitchellh/serf/0.6.4_linux_amd64.zip + url: https://releases.hashicorp.com/serf/0.6.4/serf_0.6.4_linux_amd64.zip dest: /tmp/0.6.4_linux_amd64.zip - tags: - - prebake-for-dev - name: install serf unarchive: @@ -15,20 +13,12 @@ src: /tmp/0.6.4_linux_amd64.zip dest: /usr/bin creates: /usr/bin/serf - tags: - - prebake-for-dev - name: copy the serf start/stop script template: src=serf.j2 dest=/usr/bin/serf.sh mode=u=rwx,g=rx,o=rx - tags: - - prebake-for-dev - name: copy systemd units for serf copy: src=serf.service dest=/etc/systemd/system/serf.service - tags: - - prebake-for-dev - name: enable serf to be started on boot-up and start it as well service: name=serf state=started enabled=yes - tags: - - prebake-for-dev diff --git a/vendor/ansible/roles/serf/vars/main.yml b/vendor/ansible/roles/serf/vars/main.yml deleted file mode 100644 index 68535ff..0000000 --- a/vendor/ansible/roles/serf/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# role variable for the serf service - -serf_discovery_interface: eth1 diff --git a/vendor/ansible/roles/swarm/vars/main.yml b/vendor/ansible/roles/swarm/defaults/main.yml similarity index 68% rename from vendor/ansible/roles/swarm/vars/main.yml rename to vendor/ansible/roles/swarm/defaults/main.yml index 1d12552..a5bcc84 100644 --- a/vendor/ansible/roles/swarm/vars/main.yml +++ b/vendor/ansible/roles/swarm/defaults/main.yml @@ -2,3 +2,4 @@ # role variable for the swarm service # swarm_api_port: 2375 +swarm_bootstrap_node_addr: "" diff --git a/vendor/ansible/roles/swarm/templates/swarm.j2 b/vendor/ansible/roles/swarm/templates/swarm.j2 index 1668185..77e6721 100644 --- a/vendor/ansible/roles/swarm/templates/swarm.j2 +++ b/vendor/ansible/roles/swarm/templates/swarm.j2 @@ -8,13 +8,12 @@ fi case $1 in start) - echo starting swarm as {{ run_as }} on {{ node_name }}[{{ node_addr }}] - if [[ "{{ run_as }}" == "master" ]]; then - /usr/bin/swarm join --advertise={{ node_addr }}:{{ docker_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} & - /usr/bin/swarm manage -H tcp://{{ node_addr }}:{{ swarm_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} - else - /usr/bin/swarm join --advertise={{ node_addr }}:{{ docker_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} - fi + echo starting swarm on {{ node_name }}[{{ node_addr }}] + {% if swarm_bootstrap_node_addr == node_addr -%} + /usr/bin/swarm manage -H tcp://{{ node_addr }}:{{ swarm_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} & + {% endif %} + + /usr/bin/swarm join --advertise={{ node_addr }}:{{ docker_api_port }} etcd://{{ node_addr }}:{{ etcd_client_port1 }} ;; *) diff --git a/vendor/ansible/roles/ucp/defaults/main.yml b/vendor/ansible/roles/ucp/defaults/main.yml index f2dd8ee..c1266d7 100644 --- a/vendor/ansible/roles/ucp/defaults/main.yml +++ b/vendor/ansible/roles/ucp/defaults/main.yml @@ -7,3 +7,4 @@ ucp_remote_dir: "/tmp" ucp_instance_id_file: "ucp-instance-id" ucp_fingerprint_file: "ucp-fingerprint" ucp_fifo_file: "ucp-fifo" +ucp_bootstrap_node_addr: "" diff --git a/vendor/ansible/roles/ucp/templates/ucp.j2 b/vendor/ansible/roles/ucp/templates/ucp.j2 index f767f54..41f5789 100644 --- a/vendor/ansible/roles/ucp/templates/ucp.j2 +++ b/vendor/ansible/roles/ucp/templates/ucp.j2 @@ -10,42 +10,42 @@ case $1 in start) set -e - echo starting ucp as {{ run_as }} on {{ node_name }}[{{ node_addr }}] + echo starting ucp on {{ node_name }}[{{ node_addr }}] - if [[ "{{ run_as }}" == "master" ]]; then - out=$(/usr/bin/docker run --rm -t --name ucp \ - -v /var/run/docker.sock:/var/run/docker.sock \ - docker/ucp install --host-address={{ node_addr }} \ - --image-version={{ ucp_version }}) - echo ${out} + {% if ucp_bootstrap_node_addr == node_addr -%} + out=$(/usr/bin/docker run --rm -t --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp install --host-address={{ node_addr }} \ + --image-version={{ ucp_version }}) + echo ${out} - # copy out the instance ID - instanceId=$(echo ${out} | egrep -o 'UCP instance ID: [a-zA-Z0-9:_]*' | \ - awk --field-separator='UCP instance ID: ' '{print $2}') - echo instance-id: $instanceId - echo ${instanceId} > "{{ ucp_remote_dir }}"/"{{ ucp_instance_id_file }}" + # copy out the instance ID + instanceId=$(echo ${out} | egrep -o 'UCP instance ID: [a-zA-Z0-9:_]*' | \ + awk --field-separator='UCP instance ID: ' '{print $2}') + echo instance-id: $instanceId + echo ${instanceId} > "{{ ucp_remote_dir }}"/"{{ ucp_instance_id_file }}" - # copy out the fingerprint. - # XXX: we store the output in variable first than redirecting - # the output directly to file as we wait on this file to be created. So - # redirecting the output let's that task move forward even before the - # file contents have been written. - # XXX: we need a way for this fingerprint to stick around wherever - # contiv/cluster service is running. May be we can save this file on a - # distributed filesystem - out=$(/usr/bin/docker run --rm -t --name ucp \ - -v /var/run/docker.sock:/var/run/docker.sock \ - docker/ucp fingerprint) - echo ${out} > "{{ ucp_remote_dir }}"/"{{ ucp_fingerprint_file }}" - else - /usr/bin/docker run --rm -t --name ucp \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -e UCP_ADMIN_USER="admin" -e UCP_ADMIN_PASSWORD="orca" \ - docker/ucp join --host-address={{ node_addr }} \ - --image-version={{ ucp_version }} \ - --url="https://{{ service_vip }}:443" \ - --fingerprint=`cat "{{ ucp_remote_dir }}"/"{{ ucp_fingerprint_file }}"` - fi + # copy out the fingerprint. + # XXX: we store the output in variable first than redirecting + # the output directly to file as we wait on this file to be created. So + # redirecting the output let's that task move forward even before the + # file contents have been written. + # XXX: we need a way for this fingerprint to stick around wherever + # contiv/cluster service is running. May be we can save this file on a + # distributed filesystem + out=$(/usr/bin/docker run --rm -t --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + docker/ucp fingerprint) + echo ${out} > "{{ ucp_remote_dir }}"/"{{ ucp_fingerprint_file }}" + {% else -%} + /usr/bin/docker run --rm -t --name ucp \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e UCP_ADMIN_USER="admin" -e UCP_ADMIN_PASSWORD="orca" \ + docker/ucp join --host-address={{ node_addr }} \ + --image-version={{ ucp_version }} \ + --url="https://{{ service_vip }}:443" \ + --fingerprint=`cat "{{ ucp_remote_dir }}"/"{{ ucp_fingerprint_file }}"` + {% endif %} # now just sleep to keep the service up mkfifo "{{ ucp_remote_dir }}"/"{{ ucp_fifo_file }}" diff --git a/vendor/ansible/roles/vagrant/tasks/main.yml b/vendor/ansible/roles/vagrant/tasks/main.yml index cecea58..0b9f65d 100644 --- a/vendor/ansible/roles/vagrant/tasks/main.yml +++ b/vendor/ansible/roles/vagrant/tasks/main.yml @@ -20,3 +20,7 @@ group: vagrant mode: 0755 when: vagrant_exist + +- name: add vagrant user to docker group + command: gpasswd -a vagrant docker + when: vagrant_exist diff --git a/vendor/ansible/site.yml b/vendor/ansible/site.yml index 18f8b51..c82e54c 100644 --- a/vendor/ansible/site.yml +++ b/vendor/ansible/site.yml @@ -12,7 +12,6 @@ environment: '{{ env }}' roles: - { role: base } - - { role: serf } - { role: dev } - { role: test } @@ -29,6 +28,16 @@ - { role: ceph-osd, mon_group_name: volplugin-test, osd_group_name: volplugin-test } - { role: swarm, run_as: master } +# cluster-node hosts corresponds to the hosts that shall be managed by cluster manager. +# This host group shall provision a host with all required packages needed to make +# the node ready to be managed by cluster-manager +- hosts: cluster-node + sudo: true + environment: '{{ env }}' + roles: + - { role: base } + - { role: serf } + # cluster-control hosts corresponds to the first machine in the cluster that is provisioned # to bootstrap the cluster by starting cluster manager and inventory database (collins) - hosts: cluster-control