From a0b6dcc8567aa533a05ea325409436a3acee77df Mon Sep 17 00:00:00 2001 From: root Date: Wed, 13 Oct 2021 17:47:20 +0800 Subject: [PATCH] =?UTF-8?q?=E6=9B=B4=E6=96=B0=20k8s=20version=20=E5=88=B0?= =?UTF-8?q?=201.22.2=EF=BC=8C=E9=80=82=E9=85=8D=20centos8,=20=E4=B8=8D?= =?UTF-8?q?=E5=85=BC=E5=AE=B9=20centos7,=20=E5=A2=9E=E5=8A=A0=20keepalived?= =?UTF-8?q?+haproxy=20=E9=AB=98=E5=8F=AF=E7=94=A8=E9=85=8D=E7=BD=AE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- README.md | 5 ++ ansible.hosts.ha.publicnetwork.tpl | 65 ++++++++++++++++ ansible.hosts.ha.tpl | 14 ++-- ansible.hosts.ha.vip.tpl | 75 +++++++++++++++++++ install_init/installAnsible.sh | 7 +- playbooks/k8s/k8s.yml | 10 +++ read-ansible-hosts.py | 2 +- roles/ha-loadbalance/.travis.yml | 29 +++++++ roles/ha-loadbalance/README.md | 38 ++++++++++ roles/ha-loadbalance/defaults/main.yml | 2 + roles/ha-loadbalance/handlers/main.yml | 2 + roles/ha-loadbalance/meta/main.yml | 53 +++++++++++++ .../tasks/keepalived-haproxy.yml | 60 +++++++++++++++ roles/ha-loadbalance/tasks/main.yml | 4 + .../templates/check_apiserver.sh.j2 | 12 +++ roles/ha-loadbalance/templates/haproxy.cfg.j2 | 51 +++++++++++++ .../ha-loadbalance/templates/haproxy.yaml.j2 | 26 +++++++ .../templates/keepalived.conf.j2 | 42 +++++++++++ .../templates/keepalived.yaml.j2 | 31 ++++++++ roles/ha-loadbalance/tests/inventory | 2 + roles/ha-loadbalance/tests/test.yml | 5 ++ roles/ha-loadbalance/vars/main.yml | 2 + roles/host-init/tasks/docker.yml | 7 ++ roles/host-init/tasks/installKubeadm.yml | 1 + roles/host-init/tasks/ipvs.yml | 2 +- roles/host-init/tasks/main.yml | 4 +- roles/host-init/tasks/update.yml | 3 + roles/host-init/tasks/yum.yml | 5 -- roles/k8s-masters/tasks/joinControlPlane.yml | 10 +++ .../templates/kubeadm-init.yaml.j2 | 10 +++ 30 files changed, 562 insertions(+), 17 deletions(-) create mode 100644 ansible.hosts.ha.publicnetwork.tpl create mode 100644 ansible.hosts.ha.vip.tpl create mode 100644 roles/ha-loadbalance/.travis.yml create mode 100644 roles/ha-loadbalance/README.md create mode 100644 roles/ha-loadbalance/defaults/main.yml create mode 100644 roles/ha-loadbalance/handlers/main.yml create mode 100644 roles/ha-loadbalance/meta/main.yml create mode 100644 roles/ha-loadbalance/tasks/keepalived-haproxy.yml create mode 100644 roles/ha-loadbalance/tasks/main.yml create mode 100644 roles/ha-loadbalance/templates/check_apiserver.sh.j2 create mode 100644 roles/ha-loadbalance/templates/haproxy.cfg.j2 create mode 100644 roles/ha-loadbalance/templates/haproxy.yaml.j2 create mode 100644 roles/ha-loadbalance/templates/keepalived.conf.j2 create mode 100644 roles/ha-loadbalance/templates/keepalived.yaml.j2 create mode 100644 roles/ha-loadbalance/tests/inventory create mode 100644 roles/ha-loadbalance/tests/test.yml create mode 100644 roles/ha-loadbalance/vars/main.yml create mode 100644 roles/host-init/tasks/update.yml diff --git a/README.md b/README.md index 54df681..240be77 100644 --- a/README.md +++ b/README.md @@ -1 +1,6 @@ # ansible-k8s + +ansible-k8s version|os|k8s version +-----|-----|----- +v0.2.0|CentOS 8|1.22.2 +v0.1.0|CentOS 7|1.21.4 diff --git a/ansible.hosts.ha.publicnetwork.tpl b/ansible.hosts.ha.publicnetwork.tpl new file mode 100644 index 0000000..7a9e1be --- /dev/null +++ b/ansible.hosts.ha.publicnetwork.tpl @@ -0,0 +1,65 @@ +[k8sCluster:children] +masters +nodes +install +new_nodes + +# Set variables common for all k8s-cluster hosts +[k8sCluster:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root +ansible_port=22 + +# If ansible_ssh_user is not root, ansible_become must be set to true +ansible_become=false + +# 是否更新操作系统及内核 +is_system_update=True + +# 默认节点不是公网节点 +public_network_node = True + +# 是否开启 flannel +flannel_enable=True + +# api server 域名 +master_vip="172.26.181.239" +master_vip_advertise_address="x.x.x.x" +node_domain=solarfs.k8s +install_domain=install.{{node_domain}} +api_server_domain="api-server.{{node_domain}}" +api_server_port="6443" + +# k8s 版本 +k8s_version=1.21.4 +# 定义外部镜像仓库 +registry_domain=docker.io +registry_repo="{{registry_domain}}" +kubeadm_registry_repo="registry.cn-hangzhou.aliyuncs.com" +coredns_image_repo="{{registry_repo}}/coredns" +coredns_image_tag="1.8.4" +flannel_image_repo="quay.io" +flannel_image_tag="v0.14.0" + +# subnet +service_subnet=10.96.0.0/12 +pod_subnet=10.128.0.0/16 + +# helm +helm_binary_md5=e4500993ba21e5e6bdfbc084b4342025 +helm_binary_url=https://pnode.solarfs.io/dn/file/{{helm_binary_md5}}/helm-v3.6.0-linux-amd64.tar.gz + +[install] +master1.solarfs.k8s + +[masters] +master1.solarfs.k8s ansible_host=172.26.181.236 advertise_address="x.x.x.x" +master2.solarfs.k8s ansible_host=172.26.181.237 advertise_address="x.x.x.x" +master3.solarfs.k8s ansible_host=172.26.181.238 advertise_address="x.x.x.x" + +[nodes] +node1.solarfs.k8s ansible_host=172.26.181.240 advertise_address="x.x.x.x" +node2.solarfs.k8s ansible_host=x.x.x.x advertise_address="x.x.x.x" + +[new_nodes] +node3.solarfs.k8s ansible_host=x.x.x.x advertise_address="x.x.x.x" diff --git a/ansible.hosts.ha.tpl b/ansible.hosts.ha.tpl index 7a9e1be..ededcd8 100644 --- a/ansible.hosts.ha.tpl +++ b/ansible.hosts.ha.tpl @@ -17,7 +17,7 @@ ansible_become=false is_system_update=True # 默认节点不是公网节点 -public_network_node = True +public_network_node = False # 是否开启 flannel flannel_enable=True @@ -53,13 +53,13 @@ helm_binary_url=https://pnode.solarfs.io/dn/file/{{helm_binary_md5}}/helm-v3.6.0 master1.solarfs.k8s [masters] -master1.solarfs.k8s ansible_host=172.26.181.236 advertise_address="x.x.x.x" -master2.solarfs.k8s ansible_host=172.26.181.237 advertise_address="x.x.x.x" -master3.solarfs.k8s ansible_host=172.26.181.238 advertise_address="x.x.x.x" +master1.solarfs.k8s ansible_host=172.26.181.236 +master2.solarfs.k8s ansible_host=172.26.181.237 +master3.solarfs.k8s ansible_host=172.26.181.238 [nodes] -node1.solarfs.k8s ansible_host=172.26.181.240 advertise_address="x.x.x.x" -node2.solarfs.k8s ansible_host=x.x.x.x advertise_address="x.x.x.x" +node1.solarfs.k8s ansible_host=172.26.181.240 +node2.solarfs.k8s ansible_host=x.x.x.x [new_nodes] -node3.solarfs.k8s ansible_host=x.x.x.x advertise_address="x.x.x.x" +node3.solarfs.k8s ansible_host=x.x.x.x diff --git a/ansible.hosts.ha.vip.tpl b/ansible.hosts.ha.vip.tpl new file mode 100644 index 0000000..7b3cd66 --- /dev/null +++ b/ansible.hosts.ha.vip.tpl @@ -0,0 +1,75 @@ +[k8sCluster:children] +masters +nodes +install +new_nodes + +# Set variables common for all k8s-cluster hosts +[k8sCluster:vars] +# SSH user, this user should allow ssh based auth without requiring a password +ansible_ssh_user=root +#ansible_ssh_pass=123456 +ansible_port=22 + +# If ansible_ssh_user is not root, ansible_become must be set to true +ansible_become=false + +# 是否更新操作系统及内核 +is_system_update=True + +# 默认节点不是公网节点 +public_network_node = False + +# 是否开启 flannel +flannel_enable=True + +# k8s 版本 +k8s_version=1.22.2 +# 定义外部镜像仓库 +registry_domain=docker.io +registry_repo="{{registry_domain}}" +kubeadm_registry_repo="registry.cn-hangzhou.aliyuncs.com" +coredns_image_repo="{{registry_repo}}/coredns" +coredns_image_tag="1.8.6" +flannel_image_repo="quay.io" +flannel_image_tag="v0.14.0" + +# subnet +service_subnet=10.96.0.0/12 +pod_subnet=10.128.0.0/16 + +# api server +master_vip="172.16.92.250" +master_vip_advertise_address="172.16.92.250" +node_domain=solarfs.k8s +install_domain=install.{{node_domain}} +api_server_domain="api-server.{{node_domain}}" +api_server_src_port="6443" +api_server_port="8443" +haproxy_image="{{registry_repo}}/library/haproxy:2.1.4" +# keepalived +keepalived_haproxy_enabled=True +keepalived_image="{{registry_repo}}/osixia/keepalived:2.0.17" +# keepalived router id , 不同集群 id 不同 +keepalived_router_id=250 +keepalived_auth_pass=solarfs{{keepalived_router_id}} + +# helm +helm_binary_md5=e4500993ba21e5e6bdfbc084b4342025 +helm_binary_url=https://pnode.solarfs.io/dn/file/{{helm_binary_md5}}/helm-v3.6.0-linux-amd64.tar.gz + +[install] +master1.solarfs.k8s + +[masters] +master1.solarfs.k8s ansible_host=172.16.188.11 +master2.solarfs.k8s ansible_host=172.16.94.181 +master3.solarfs.k8s ansible_host=172.16.241.26 + +[nodes] +logging1.solarfs.k8s ansible_host=172.16.13.77 +logging2.solarfs.k8s ansible_host=172.16.36.25 +logging3.solarfs.k8s ansible_host=172.16.115.194 + +[new_nodes] +#node3.solarfs.k8s ansible_host=x.x.x.x diff --git a/install_init/installAnsible.sh b/install_init/installAnsible.sh index f32693c..5ec7af8 100755 --- a/install_init/installAnsible.sh +++ b/install_init/installAnsible.sh @@ -5,7 +5,7 @@ BASE_DIR=$(cd `dirname $0` && pwd) cd $BASE_DIR . ../config.cfg - +. /etc/os-release # install ansible installAnsible(){ @@ -14,7 +14,10 @@ installAnsible(){ if [ "$is_offline" == "True" ];then yum --disablerepo=\* --enablerepo=offline-k8s* install -y ansible pyOpenSSL else - yum install -y ansible pyOpenSSL + yum install -y ansible + if [ "$version" == "7" ];then + yum install -y pyOpenSSL + fi fi # 配置/etc/ansible/ansible.cfg \cp -f ../ansible.cfg /etc/ansible/ansible.cfg diff --git a/playbooks/k8s/k8s.yml b/playbooks/k8s/k8s.yml index f6058f1..115c532 100644 --- a/playbooks/k8s/k8s.yml +++ b/playbooks/k8s/k8s.yml @@ -4,12 +4,22 @@ roles: - localhost-init +- hosts: 'masters[0]' + become: yes + roles: + - ha-loadbalance + - hosts: 'masters' # gather_facts: False become: yes roles: - k8s-masters +- hosts: 'masters' + become: yes + roles: + - ha-loadbalance + - hosts: 'masters[0]' become: yes roles: diff --git a/read-ansible-hosts.py b/read-ansible-hosts.py index b2a2524..310faa5 100755 --- a/read-ansible-hosts.py +++ b/read-ansible-hosts.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/python2 # _*_ coding:utf-8 _*_ __author__ = 'yhchen' diff --git a/roles/ha-loadbalance/.travis.yml b/roles/ha-loadbalance/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/roles/ha-loadbalance/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/roles/ha-loadbalance/README.md b/roles/ha-loadbalance/README.md new file mode 100644 index 0000000..225dd44 --- /dev/null +++ b/roles/ha-loadbalance/README.md @@ -0,0 +1,38 @@ +Role Name +========= + +A brief description of the role goes here. + +Requirements +------------ + +Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. + +Role Variables +-------------- + +A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. + +Dependencies +------------ + +A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. + +Example Playbook +---------------- + +Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: + + - hosts: servers + roles: + - { role: username.rolename, x: 42 } + +License +------- + +BSD + +Author Information +------------------ + +An optional section for the role authors to include contact information, or a website (HTML is not allowed). diff --git a/roles/ha-loadbalance/defaults/main.yml b/roles/ha-loadbalance/defaults/main.yml new file mode 100644 index 0000000..620899f --- /dev/null +++ b/roles/ha-loadbalance/defaults/main.yml @@ -0,0 +1,2 @@ +--- +# defaults file for ha-loadbalance \ No newline at end of file diff --git a/roles/ha-loadbalance/handlers/main.yml b/roles/ha-loadbalance/handlers/main.yml new file mode 100644 index 0000000..c7d1b0f --- /dev/null +++ b/roles/ha-loadbalance/handlers/main.yml @@ -0,0 +1,2 @@ +--- +# handlers file for ha-loadbalance \ No newline at end of file diff --git a/roles/ha-loadbalance/meta/main.yml b/roles/ha-loadbalance/meta/main.yml new file mode 100644 index 0000000..227ad9c --- /dev/null +++ b/roles/ha-loadbalance/meta/main.yml @@ -0,0 +1,53 @@ +galaxy_info: + author: your name + description: your role description + company: your company (optional) + + # If the issue tracker for your role is not on github, uncomment the + # next line and provide a value + # issue_tracker_url: http://example.com/issue/tracker + + # Choose a valid license ID from https://spdx.org - some suggested licenses: + # - BSD-3-Clause (default) + # - MIT + # - GPL-2.0-or-later + # - GPL-3.0-only + # - Apache-2.0 + # - CC-BY-4.0 + license: license (GPL-2.0-or-later, MIT, etc) + + min_ansible_version: 2.9 + + # If this a Container Enabled role, provide the minimum Ansible Container version. + # min_ansible_container_version: + + # + # Provide a list of supported platforms, and for each platform a list of versions. + # If you don't wish to enumerate all versions for a particular platform, use 'all'. + # To view available platforms and versions (or releases), visit: + # https://galaxy.ansible.com/api/v1/platforms/ + # + # platforms: + # - name: Fedora + # versions: + # - all + # - 25 + # - name: SomePlatform + # versions: + # - all + # - 1.0 + # - 7 + # - 99.99 + + galaxy_tags: [] + # List tags for your role here, one per line. A tag is a keyword that describes + # and categorizes the role. Users find roles by searching for tags. Be sure to + # remove the '[]' above, if you add tags to this list. + # + # NOTE: A tag is limited to a single word comprised of alphanumeric characters. + # Maximum 20 tags per role. + +dependencies: [] + # List your role dependencies here, one per line. Be sure to remove the '[]' above, + # if you add dependencies to this list. + \ No newline at end of file diff --git a/roles/ha-loadbalance/tasks/keepalived-haproxy.yml b/roles/ha-loadbalance/tasks/keepalived-haproxy.yml new file mode 100644 index 0000000..10e935c --- /dev/null +++ b/roles/ha-loadbalance/tasks/keepalived-haproxy.yml @@ -0,0 +1,60 @@ +--- +# copy keepalived conf +- name: Create /etc/keepalived directory + file: + path: /etc/keepalived + state: directory + tags: + - keepalived + - ha-loadbalance + +- name: Override keepalived.conf + template: src=keepalived.conf.j2 dest=/etc/keepalived/keepalived.conf owner=root group=root mode=644 backup=yes + tags: + - keepalived + - ha-loadbalance + +# copy check_apiserver.sh +- name: Override check_apiserver.sh + template: src=check_apiserver.sh.j2 dest=/etc/keepalived/check_apiserver.sh owner=root group=root mode=755 + tags: + - keepalived + - ha-loadbalance + +- name: Create /etc/kubernetes/manifests/ directory + file: + path: /etc/kubernetes/manifests/ + state: directory + tags: + - keepalived + - haproxy + - ha-loadbalance + +# copy static pod keepalived.yaml +- name: Override /etc/kubernetes/manifests/keepalived.yaml + template: src=keepalived.yaml.j2 dest=/etc/kubernetes/manifests/keepalived.yaml owner=root group=root mode=644 backup=yes + tags: + - keepalived + - ha-loadbalance + +# copy haproxy.cfg +- name: Create /etc/haproxy directory + file: + path: /etc/haproxy + state: directory + tags: + - haproxy + - ha-loadbalance + +- name: Override haproxy.cfg + template: src=haproxy.cfg.j2 dest=/etc/haproxy/haproxy.cfg owner=root group=root mode=644 backup=yes + tags: + - haproxy + - ha-loadbalance + +# copy static pod haproxy.yaml +- name: Override /etc/kubernetes/manifests/haproxy.yaml + template: src=haproxy.yaml.j2 dest=/etc/kubernetes/manifests/haproxy.yaml owner=root group=root mode=644 backup=yes + tags: + - keepalived + - ha-loadbalance diff --git a/roles/ha-loadbalance/tasks/main.yml b/roles/ha-loadbalance/tasks/main.yml new file mode 100644 index 0000000..75de039 --- /dev/null +++ b/roles/ha-loadbalance/tasks/main.yml @@ -0,0 +1,4 @@ +--- +# tasks file for ha-loadbalance +- include: keepalived-haproxy.yml + when: keepalived_haproxy_enabled diff --git a/roles/ha-loadbalance/templates/check_apiserver.sh.j2 b/roles/ha-loadbalance/templates/check_apiserver.sh.j2 new file mode 100644 index 0000000..745bc97 --- /dev/null +++ b/roles/ha-loadbalance/templates/check_apiserver.sh.j2 @@ -0,0 +1,12 @@ +#!/bin/sh + +errorExit() { + echo "*** $*" 1>&2 + exit 1 +}} + +curl --silent --max-time 2 --insecure https://localhost:{{api_server_port}}/ -o /dev/null || errorExit "Error GET https://localhost:{{api_server_port}}/" +if ip addr | grep -q {{master_vip}}; then + curl --silent --max-time 2 --insecure https://{{master_vip}}:{{api_server_port}}/ -o /dev/null || errorExit "Error GET https://{{master_vip}}:{{api_server_port}}/" +fi + diff --git a/roles/ha-loadbalance/templates/haproxy.cfg.j2 b/roles/ha-loadbalance/templates/haproxy.cfg.j2 new file mode 100644 index 0000000..84d94d7 --- /dev/null +++ b/roles/ha-loadbalance/templates/haproxy.cfg.j2 @@ -0,0 +1,51 @@ +# /etc/haproxy/haproxy.cfg +#--------------------------------------------------------------------- +# Global settings +#--------------------------------------------------------------------- +global + log /dev/log local0 + log /dev/log local1 notice + daemon + +#--------------------------------------------------------------------- +# common defaults that all the 'listen' and 'backend' sections will +# use if not designated in their block +#--------------------------------------------------------------------- +defaults + mode http + log global + option httplog + option dontlognull + option http-server-close + option forwardfor except 127.0.0.0/8 + option redispatch + retries 1 + timeout http-request 10s + timeout queue 20s + timeout connect 5s + timeout client 20s + timeout server 20s + timeout http-keep-alive 10s + timeout check 10s + +#--------------------------------------------------------------------- +# apiserver frontend which proxys to the control plane nodes +#--------------------------------------------------------------------- +frontend apiserver + bind *:{{api_server_port}} + mode tcp + option tcplog + default_backend apiserver + +#--------------------------------------------------------------------- +# round robin balancing for apiserver +#--------------------------------------------------------------------- +backend apiserver + option httpchk GET /healthz + http-check expect status 200 + mode tcp + option ssl-hello-chk + balance roundrobin +{% for host in groups['masters'] %} + server {{ hostvars[host].inventory_hostname}} {{ hostvars[host].ansible_host }}:{{api_server_src_port}} check +{% endfor %} diff --git a/roles/ha-loadbalance/templates/haproxy.yaml.j2 b/roles/ha-loadbalance/templates/haproxy.yaml.j2 new file mode 100644 index 0000000..8ba5f15 --- /dev/null +++ b/roles/ha-loadbalance/templates/haproxy.yaml.j2 @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Pod +metadata: + name: haproxy + namespace: kube-system +spec: + containers: + - image: {{haproxy_image}} + name: haproxy + livenessProbe: + failureThreshold: 8 + httpGet: + host: localhost + path: /healthz + port: {{api_server_port}} + scheme: HTTPS + volumeMounts: + - mountPath: /usr/local/etc/haproxy/haproxy.cfg + name: haproxyconf + readOnly: true + hostNetwork: true + volumes: + - hostPath: + path: /etc/haproxy/haproxy.cfg + type: FileOrCreate + name: haproxyconf diff --git a/roles/ha-loadbalance/templates/keepalived.conf.j2 b/roles/ha-loadbalance/templates/keepalived.conf.j2 new file mode 100644 index 0000000..1a88458 --- /dev/null +++ b/roles/ha-loadbalance/templates/keepalived.conf.j2 @@ -0,0 +1,42 @@ +! /etc/keepalived/keepalived.conf +! Configuration File for keepalived +global_defs { + router_id LVS_DEVEL +} +vrrp_script check_apiserver { + script "/etc/keepalived/check_apiserver.sh" + interval 3 + weight -2 + fall 10 + rise 2 +} + +vrrp_instance master-vip { +{% if hostvars[ groups['masters'][0] ].inventory_hostname == inventory_hostname %} + state MASTER +{% else %} + state BACKUP +{% endif %} + interface {{LOCAL_ENNAME}} + virtual_router_id {{keepalived_router_id}} + priority 100 + authentication { + auth_type PASS + auth_pass {{keepalived_auth_pass}} + } + unicast_src_ip {{ansible_host}} # The IP address of this machine + unicast_peer { +{% for host in groups['masters'] %} + {% if hostvars[host].inventory_hostname != inventory_hostname %} {{hostvars[host].ansible_host}} + {% endif %} +{% endfor %} + } + + virtual_ipaddress { + {{master_vip}} + } + track_script { + check_apiserver + } +} + diff --git a/roles/ha-loadbalance/templates/keepalived.yaml.j2 b/roles/ha-loadbalance/templates/keepalived.yaml.j2 new file mode 100644 index 0000000..094f43f --- /dev/null +++ b/roles/ha-loadbalance/templates/keepalived.yaml.j2 @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: keepalived + namespace: kube-system +spec: + containers: + - image: {{ keepalived_image }} + name: keepalived + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_BROADCAST + - NET_RAW + volumeMounts: + - mountPath: /usr/local/etc/keepalived/keepalived.conf + name: config + - mountPath: /etc/keepalived/check_apiserver.sh + name: check + hostNetwork: true + volumes: + - hostPath: + path: /etc/keepalived/keepalived.conf + name: config + - hostPath: + path: /etc/keepalived/check_apiserver.sh + name: check +status: {} diff --git a/roles/ha-loadbalance/tests/inventory b/roles/ha-loadbalance/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/roles/ha-loadbalance/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/roles/ha-loadbalance/tests/test.yml b/roles/ha-loadbalance/tests/test.yml new file mode 100644 index 0000000..38f6e5a --- /dev/null +++ b/roles/ha-loadbalance/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ha-loadbalance \ No newline at end of file diff --git a/roles/ha-loadbalance/vars/main.yml b/roles/ha-loadbalance/vars/main.yml new file mode 100644 index 0000000..780c89f --- /dev/null +++ b/roles/ha-loadbalance/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for ha-loadbalance \ No newline at end of file diff --git a/roles/host-init/tasks/docker.yml b/roles/host-init/tasks/docker.yml index eeadf94..7b2132e 100644 --- a/roles/host-init/tasks/docker.yml +++ b/roles/host-init/tasks/docker.yml @@ -1,3 +1,10 @@ +- name: uninstall podman + yum: + name: + - runc + state: absent + autoremove: true + - name: Install docker ce yum: name: diff --git a/roles/host-init/tasks/installKubeadm.yml b/roles/host-init/tasks/installKubeadm.yml index a61b105..cb43c43 100644 --- a/roles/host-init/tasks/installKubeadm.yml +++ b/roles/host-init/tasks/installKubeadm.yml @@ -5,6 +5,7 @@ - kubeadm-{{k8s_version}} - kubectl-{{k8s_version}} - bash-completion + - tc tags: kubeadm - name: Kubectl completion bash diff --git a/roles/host-init/tasks/ipvs.yml b/roles/host-init/tasks/ipvs.yml index 88f0779..cddb188 100644 --- a/roles/host-init/tasks/ipvs.yml +++ b/roles/host-init/tasks/ipvs.yml @@ -17,6 +17,6 @@ - ip_vs_rr - ip_vs_wrr - ip_vs_sh - - nf_conntrack_ipv4 + - nf_conntrack tags: - ipvs diff --git a/roles/host-init/tasks/main.yml b/roles/host-init/tasks/main.yml index ce044f2..aa4f212 100644 --- a/roles/host-init/tasks/main.yml +++ b/roles/host-init/tasks/main.yml @@ -7,6 +7,7 @@ - include: yum.yml - include: selinux.yml - include: ulimit.yml +- include: ipvs.yml - include: sysctl.yml - include: swap.yml - include: centos_ssh.yml @@ -14,7 +15,8 @@ when: chronyd_install - include: hostname.yml - include: docker.yml -- include: ipvs.yml +- include: update.yml + when: is_system_update - include: installKubeadm.yml - include: apiServerDns.yml - include: resolv.yml diff --git a/roles/host-init/tasks/update.yml b/roles/host-init/tasks/update.yml new file mode 100644 index 0000000..93f8bf9 --- /dev/null +++ b/roles/host-init/tasks/update.yml @@ -0,0 +1,3 @@ +- name: yum update + command: yum update -y + tags: yum-update diff --git a/roles/host-init/tasks/yum.yml b/roles/host-init/tasks/yum.yml index aba8cbf..45f75b3 100644 --- a/roles/host-init/tasks/yum.yml +++ b/roles/host-init/tasks/yum.yml @@ -36,8 +36,3 @@ - iotop - dnsmasq tags: install-base-tools - -- name: yum update - command: yum update -y - tags: yum-update - when: is_system_update diff --git a/roles/k8s-masters/tasks/joinControlPlane.yml b/roles/k8s-masters/tasks/joinControlPlane.yml index c4a9f4e..f36afd8 100644 --- a/roles/k8s-masters/tasks/joinControlPlane.yml +++ b/roles/k8s-masters/tasks/joinControlPlane.yml @@ -35,6 +35,15 @@ when: check_ret.rc == 1 and hostvars[ groups['masters'][0] ].inventory_hostname != inventory_hostname and advertise_address is defined tags: joinControlPlane +- name: Resolve api server domain to master vip + lineinfile: + path: "/etc/hosts" + regexp: '.*{{ api_server_domain }}' + line: "{{ master_vip }} {{ api_server_domain }}" + state: present + tags: joinControlPlane + when: keepalived_haproxy_enabled is defined and keepalived_haproxy_enabled == True + - name: Resolve api server domain to local master lineinfile: path: "/etc/hosts" @@ -42,3 +51,4 @@ line: "{{ ansible_host }} {{ api_server_domain }}" state: present tags: joinControlPlane + when: keepalived_haproxy_enabled is not defined or keepalived_haproxy_enabled != True diff --git a/roles/k8s-masters/templates/kubeadm-init.yaml.j2 b/roles/k8s-masters/templates/kubeadm-init.yaml.j2 index b42c892..c3815ac 100644 --- a/roles/k8s-masters/templates/kubeadm-init.yaml.j2 +++ b/roles/k8s-masters/templates/kubeadm-init.yaml.j2 @@ -9,7 +9,11 @@ bootstrapTokens: - authentication kind: InitConfiguration localAPIEndpoint: +{% if advertise_address is defined %} advertiseAddress: {{ advertise_address }} +{% else %} + advertiseAddress: {{ ansible_host }} +{% endif %} bindPort: 6443 nodeRegistration: criSocket: /var/run/dockershim.sock @@ -21,12 +25,18 @@ apiServer: apiVersion: kubeadm.k8s.io/v1beta2 certificatesDir: /etc/kubernetes/pki clusterName: kubernetes +{% if api_server_domain is defined %} controlPlaneEndpoint: {{api_server_domain}}:{{api_server_port}} +{% endif %} controllerManager: {} dns: type: CoreDNS +{% if coredns_image_repo is defined %} imageRepository: {{coredns_image_repo}} +{% endif %} +{% if coredns_image_tag is defined %} imageTag: {{coredns_image_tag}} +{% endif %} etcd: local: dataDir: /var/lib/etcd