Skip to content

Commit

Permalink
Merge pull request #47 from mapuri/ansible
Browse files Browse the repository at this point in the history
vendor the recent ansible repo
  • Loading branch information
mapuri committed Feb 18, 2016
2 parents acbe606 + 61cfac2 commit b93389c
Show file tree
Hide file tree
Showing 26 changed files with 165 additions and 96 deletions.
32 changes: 25 additions & 7 deletions Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,9 @@ ansible_extra_vars = {
"env" => host_env,
"service_vip" => "#{base_ip}252",
"validate_certs" => "no",
"control_interface" => "eth1",
"netplugin_if" => "eth2",
"docker_version" => "1.10.1",
}
ansible_extra_vars = ansible_extra_vars.merge(ceph_vars)

Expand Down Expand Up @@ -79,17 +82,21 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# use a private key from within the repo for demo environment. This is used for
# pushing configuration
config.ssh.private_key_path = "./management/src/demo/files/insecure_private_key"
num_nodes.times do |n|
(0..num_nodes-1).reverse_each do |n|
node_name = node_names[n]
node_addr = node_ips[n]
node_vars = {
"etcd_master_addr" => node_ips[0],
"etcd_master_name" => node_names[0],
"swarm_bootstrap_node_addr" => node_ips[0],
"ucp_bootstrap_node_addr" => node_ips[0],
}
config.vm.define node_name do |node|
node.vm.hostname = node_name
# create an interface for cluster (control) traffic
node.vm.network :private_network, ip: node_addr, virtualbox__intnet: "true"
# create an interface for cluster (data) traffic
node.vm.network :private_network, ip: "0.0.0.0", virtualbox__intnet: "true"
node.vm.provider "virtualbox" do |v|
# give enough ram and memory for docker to run fine
v.customize ['modifyvm', :id, '--memory', "4096"]
Expand All @@ -99,7 +106,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
# which are used by default by virtualbox
v.customize ['modifyvm', :id, '--nictype1', 'virtio']
v.customize ['modifyvm', :id, '--nictype2', 'virtio']
v.customize ['modifyvm', :id, '--nictype3', 'virtio']
v.customize ['modifyvm', :id, '--nicpromisc2', 'allow-all']
v.customize ['modifyvm', :id, '--nicpromisc3', 'allow-all']
# create disks for ceph
(0..1).each do |d|
disk_path = "disk-#{n}-#{d}"
Expand All @@ -118,6 +127,13 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
'--medium', vdi_disk_path]
end
end

# provision base packages needed for cluster management
if ansible_groups["cluster-node"] == nil then
ansible_groups["cluster-node"] = [ ]
end
ansible_groups["cluster-node"] << node_name

# The first vm stimulates the first manually **configured** nodes
# in a cluster
if n == 0 then
Expand All @@ -134,19 +150,16 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|

# add this node to cluster-control host group
ansible_groups["cluster-control"] = [node_name]
node.vm.provision "shell" do |s|
s.inline = shell_provision
end
end

if service_init
# Share anything in `shared` to '/shared' on the cluster hosts.
node.vm.synced_folder "shared", "/shared"

ansible_extra_vars = ansible_extra_vars.merge(node_vars)
if n == 0 then
if n <= 2 then
# if we are bringing up services as part of the cluster, then start
# master services on the first vm
# master services on the first three vms
if ansible_groups["service-master"] == nil then
ansible_groups["service-master"] = [ ]
end
Expand All @@ -162,13 +175,18 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
end

# Run the provisioner after all machines are up
if n == (num_nodes - 1) then
if n == 0 then
node.vm.provision 'ansible' do |ansible|
ansible.groups = ansible_groups
ansible.playbook = ansible_playbook
ansible.extra_vars = ansible_extra_vars
ansible.limit = 'all'
end
# run shell provisioner for first node to correctly mount dev
# binaries if needed
node.vm.provision "shell" do |s|
s.inline = shell_provision
end
end
end
end
Expand Down
4 changes: 4 additions & 0 deletions management/src/clusterm/manager/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package manager
import (
"fmt"

log "github.com/Sirupsen/logrus"
"github.com/contiv/cluster/management/src/inventory"
)

Expand Down Expand Up @@ -34,6 +35,7 @@ func (m *Manager) isMasterNode(name string) (bool, error) {
if n.cInfo == nil {
return false, nodeConfigNotExistsError(name)
}
log.Debugf("node: %q, group: %q", name, n.cInfo.GetGroup())
return n.cInfo.GetGroup() == ansibleMasterGroupName, nil
}

Expand All @@ -45,6 +47,7 @@ func (m *Manager) isWorkerNode(name string) (bool, error) {
if n.cInfo == nil {
return false, nodeConfigNotExistsError(name)
}
log.Debugf("node: %q, group: %q", name, n.cInfo.GetGroup())
return n.cInfo.GetGroup() == ansibleWorkerGroupName, nil
}

Expand All @@ -57,5 +60,6 @@ func (m *Manager) isDiscoveredAndAllocatedNode(name string) (bool, error) {
return false, nodeInventoryNotExistsError(name)
}
status, state := n.iInfo.GetStatus()
log.Debugf("node: %q, status: %q, state: %q", name, status, state)
return state == inventory.Discovered && status == inventory.Allocated, nil
}
8 changes: 4 additions & 4 deletions management/src/systemtests/cli_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,9 @@ func (s *CliTestSuite) TearDownSuite(c *C) {
func (s *CliTestSuite) nukeNodeInCollins(c *C, nodeName string) {
// Ignore errors here as asset might not exist.
out, err := s.tbn1.RunCommandWithOutput(fmt.Sprintf(`curl --basic -u blake:admin:first -d status="Decommissioned" -d reason="test" -X POST http://localhost:9000/api/asset/%s`, nodeName))
c.Logf("asset decommission result: %s. Output: %s", err, out)
c.Logf("asset decommission result: %v. Output: %s", err, out)
out, err = s.tbn1.RunCommandWithOutput(fmt.Sprintf(`curl --basic -u blake:admin:first -d reason=test -X DELETE http://localhost:9000/api/asset/%s`, nodeName))
c.Logf("asset deletion result: %s. Output: %s", err, out)
c.Logf("asset deletion result: %v. Output: %s", err, out)
}

func (s *CliTestSuite) SetUpTest(c *C) {
Expand All @@ -125,9 +125,9 @@ func (s *CliTestSuite) SetUpTest(c *C) {
//cleanup an existing dummy file, if any that our test ansible will create. Ignore error, if any.
file := dummyAnsibleFile
out, err := s.tbn1.RunCommandWithOutput(fmt.Sprintf("rm %s", file))
c.Logf("dummy file cleanup. Error: %s, Output: %s", err, out)
c.Logf("dummy file cleanup. Error: %v, Output: %s", err, out)
out, err = s.tbn2.RunCommandWithOutput(fmt.Sprintf("rm %s", file))
c.Logf("dummy file cleanup. Error: %s, Output: %s", err, out)
c.Logf("dummy file cleanup. Error: %v, Output: %s", err, out)

// XXX: we cleanup up assets from collins instead of restarting it to save test time.
for _, name := range validNodeNames {
Expand Down
1 change: 1 addition & 0 deletions vendor/ansible/Vagrantfile
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ ansible_tags = ENV["CONTIV_ANSIBLE_TAGS"] || "prebake-for-dev"
ansible_extra_vars = {
"env" => host_env,
"validate_certs" => "no",
"control_interface" => "eth1",
}

puts "Host environment: #{host_env}"
Expand Down
2 changes: 1 addition & 1 deletion vendor/ansible/roles/base/tasks/os_agnostic_tasks.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
- name: download consul binary
get_url:
validate_certs: "{{ validate_certs }}"
url: https://dl.bintray.com/mitchellh/consul/0.5.2_linux_amd64.zip
url: https://releases.hashicorp.com/consul/0.5.2/consul_0.5.2_linux_amd64.zip
dest: /tmp/consul.zip

- name: install consul
Expand Down
11 changes: 11 additions & 0 deletions vendor/ansible/roles/contiv_cluster/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
# role variable for the cluster manager service

collins_image: contiv/collins:01_25_2016
collins_host_port: 9000
collins_guest_port: 9000

contiv_cluster_version: "v0.0.0-01-05-2016.22-21-32.UTC"
contiv_cluster_tar_file: "cluster-{{ contiv_cluster_version }}.tar.bz2"
contiv_cluster_src_file: "https://github.com/contiv/cluster/releases/download/{{ contiv_cluster_version }}/{{ contiv_cluster_tar_file }}"
contiv_cluster_dest_file: "/tmp/{{ contiv_cluster_tar_file }}"
9 changes: 5 additions & 4 deletions vendor/ansible/roles/contiv_cluster/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,15 +22,16 @@
- name: download clusterm
get_url:
validate_certs: "{{ validate_certs }}"
url: https://github.com/contiv/cluster/releases/download/v0.0.0-01-05-2016.22-21-32.UTC/cluster-v0.0.0-01-05-2016.22-21-32.UTC.tar.bz2
dest: /tmp/cluster-v0.0.0-01-05-2016.22-21-32.UTC.tar.bz2
url: "{{ contiv_cluster_src_file }}"
dest: "{{ contiv_cluster_dest_file }}"
force: no
register: download_result

- name: install clusterm
shell: tar vxjf /tmp/cluster-v0.0.0-01-05-2016.22-21-32.UTC.tar.bz2
shell: tar vxjf {{ contiv_cluster_dest_file }}
args:
chdir: /usr/bin/
creates: clusterm
when: download_result | changed

- name: copy environment file for clusterm
copy: src=clusterm dest=/etc/default/clusterm
Expand Down
6 changes: 0 additions & 6 deletions vendor/ansible/roles/contiv_cluster/vars/main.yml

This file was deleted.

5 changes: 5 additions & 0 deletions vendor/ansible/roles/contiv_network/defaults/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,8 @@
contiv_network_mode: "standalone" # Accepted values: standalone, aci
netplugin_mode: "docker" # Accepted values: docker, kubernetes
fwd_mode: "bridge" #Accepted values: bridge , routing

contiv_network_version: "v0.1-02-06-2016.14-42-05.UTC"
contiv_network_tar_file: "netplugin-{{ contiv_network_version }}.tar.bz2"
contiv_network_src_file: "https://github.com/contiv/netplugin/releases/download/{{ contiv_network_version }}/{{ contiv_network_tar_file }}"
contiv_network_dest_file: "/tmp/{{ contiv_network_tar_file }}"
11 changes: 6 additions & 5 deletions vendor/ansible/roles/contiv_network/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,20 +20,21 @@
- name: download netmaster and netplugin
get_url:
validate_certs: "{{ validate_certs }}"
url: https://github.com/contiv/netplugin/releases/download/v0.1-01-28-2016.03-55-05.UTC/netplugin-v0.1-01-28-2016.03-55-05.UTC.tar.bz2
dest: /tmp/contivnet.tar.bz2
url: "{{ contiv_network_src_file }}"
dest: "{{ contiv_network_dest_file }}"
register: download_result

- name: ensure netplugin directory exists
file: path=/usr/bin/contiv/netplugin state=directory

- name: install netmaster and netplugin
shell: tar vxjf /tmp/contivnet.tar.bz2
shell: tar vxjf {{ contiv_network_dest_file }}
args:
chdir: /usr/bin/contiv/netplugin
creates: netmaster
when: download_result | changed

- name: create links for netplugin binaries
file: src=/usr/bin/contiv/netplugin/{{ item }} dest=/usr/bin/{{ item }} state=link
file: src=/usr/bin/contiv/netplugin/{{ item }} dest=/usr/bin/{{ item }} state=link force=yes
with_items:
- netctl
- netmaster
Expand Down
8 changes: 8 additions & 0 deletions vendor/ansible/roles/contiv_storage/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---

# Role defaults for contiv_storage

contiv_storage_version: "v0.0.0-12-14-2015.00-48-49.UTC"
contiv_storage_tar_file: "volplugin-{{ contiv_storage_version }}.tar.bz2"
contiv_storage_src_file: "https://github.com/contiv/volplugin/releases/download/{{ contiv_storage_version }}/{{ contiv_storage_tar_file }}"
contiv_storage_dest_file: "/tmp/{{ contiv_storage_tar_file }}"
9 changes: 5 additions & 4 deletions vendor/ansible/roles/contiv_storage/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,15 @@
- name: download storage service binaries
get_url:
validate_certs: "{{ validate_certs }}"
url: https://github.com/contiv/volplugin/releases/download/v0.0.0-12-14-2015.00-48-49.UTC/volplugin-v0.0.0-12-14-2015.00-48-49.UTC.tar.bz2
dest: /tmp/contivvol.tar.bz2
url: "{{ contiv_storage_src_file }}"
dest: "{{ contiv_storage_dest_file }}"
register: download_result

- name: install storage service
shell: tar vxjf /tmp/contivvol.tar.bz2
shell: tar vxjf {{ contiv_storage_dest_file }}
args:
chdir: /usr/bin/
creates: volmaster
when: download_result | changed

- name: copy environment file for volmaster
copy: src=volmaster dest=/etc/default/volmaster
Expand Down
6 changes: 6 additions & 0 deletions vendor/ansible/roles/docker/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
---
# Default values for docker role

docker_api_port: 2385

docker_version: 1.9.1
27 changes: 23 additions & 4 deletions vendor/ansible/roles/docker/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,9 +1,28 @@
---
# This role contains tasks for configuring and starting docker service

# XXX: figure a way to download a specific docker version
- name: install docker
shell: creates=/usr/bin/docker curl https://get.docker.com | bash
- name: check docker version
shell: docker --version
register: docker_installed_version
ignore_errors: yes
tags:
- prebake-for-dev

- name: install docker (debian)
shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash
when: (ansible_os_family == "Debian") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*"))
tags:
- prebake-for-dev

- name: remove docker (redhat)
yum: name=docker-engine state=absent
when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*"))
tags:
- prebake-for-dev

- name: install docker (redhat)
shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash
when: (ansible_os_family == "RedHat") and not (docker_installed_version.stdout | match("Docker version {{ docker_version }}, build.*"))
tags:
- prebake-for-dev

Expand Down Expand Up @@ -35,7 +54,7 @@

# tcp socket service requires docker service to be started after it
- name: start docker tcp socket service
shell: sudo systemctl stop docker && sudo systemctl start docker-tcp.socket && sudo systemctl start docker
shell: sudo systemctl daemon-reload && sudo systemctl stop docker && sudo systemctl start docker-tcp.socket && sudo systemctl start docker
when: (docker_tcp_socket | changed) or not docker_tcp_socket_state.stdout | match("Active.* active")

- name: remove the docker key file, if any. It shall be regenerated by docker on restart
Expand Down
2 changes: 1 addition & 1 deletion vendor/ansible/roles/docker/templates/docker-svc.j2
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Requires=docker.socket

[Service]
Type=notify
ExecStart=/usr/bin/docker daemon -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }}
ExecStart=/usr/bin/docker daemon -s overlay -H fd:// --cluster-store=etcd://localhost:{{ etcd_client_port1 }}
MountFlags=slave
LimitNOFILE=1048576
LimitNPROC=1048576
Expand Down
4 changes: 0 additions & 4 deletions vendor/ansible/roles/docker/vars/main.yml

This file was deleted.

File renamed without changes.
4 changes: 4 additions & 0 deletions vendor/ansible/roles/serf/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
---
# role variable for the serf service

serf_discovery_interface: "{{ control_interface }}"
12 changes: 1 addition & 11 deletions vendor/ansible/roles/serf/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,31 +4,21 @@
- name: download serf binary
get_url:
validate_certs: "{{ validate_certs }}"
url: https://dl.bintray.com/mitchellh/serf/0.6.4_linux_amd64.zip
url: https://releases.hashicorp.com/serf/0.6.4/serf_0.6.4_linux_amd64.zip
dest: /tmp/0.6.4_linux_amd64.zip
tags:
- prebake-for-dev

- name: install serf
unarchive:
copy: no
src: /tmp/0.6.4_linux_amd64.zip
dest: /usr/bin
creates: /usr/bin/serf
tags:
- prebake-for-dev

- name: copy the serf start/stop script
template: src=serf.j2 dest=/usr/bin/serf.sh mode=u=rwx,g=rx,o=rx
tags:
- prebake-for-dev

- name: copy systemd units for serf
copy: src=serf.service dest=/etc/systemd/system/serf.service
tags:
- prebake-for-dev

- name: enable serf to be started on boot-up and start it as well
service: name=serf state=started enabled=yes
tags:
- prebake-for-dev
4 changes: 0 additions & 4 deletions vendor/ansible/roles/serf/vars/main.yml

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@
# role variable for the swarm service
#
swarm_api_port: 2375
swarm_bootstrap_node_addr: ""
Loading

0 comments on commit b93389c

Please sign in to comment.