openshift single node 是可以用installer来安装的,但是很多客户都遇到问题,这里我们就来试一下。
本文有一个前导实验,就是创建 helper node , 这个工具机用来做一个跳板,模拟离线环境的proxy
installer 的内部安装逻辑图:
视频讲解
# switch to you install version
export BUILDNUMBER=4.11.6
pushd /data/ocp4/${BUILDNUMBER}
tar -xzf openshift-client-linux-${BUILDNUMBER}.tar.gz -C /usr/local/bin/
tar -xzf openshift-install-linux-${BUILDNUMBER}.tar.gz -C /usr/local/bin/
# tar -xzf oc-mirror.tar.gz -C /usr/local/bin/
# chmod +x /usr/local/bin/oc-mirror
install -m 755 /data/ocp4/clients/butane-amd64 /usr/local/bin/butane
install -m 755 /data/ocp4/clients/coreos-installer_amd64 /usr/local/bin/coreos-installer
popd
# create a user and create the cluster under the user
useradd -m 3node
su - 3node
ssh-keygen
cat << EOF > ~/.ssh/config
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
EOF
chmod 600 ~/.ssh/config
cat << 'EOF' >> ~/.bashrc
export BASE_DIR='/home/3node/'
EOF
# export BASE_DIR='/home/3node/'
mkdir -p ${BASE_DIR}/data/{sno/disconnected,install}
# set some parameter of you rcluster
NODE_SSH_KEY="$(cat ${BASE_DIR}/.ssh/id_rsa.pub)"
INSTALL_IMAGE_REGISTRY=quaylab.infra.redhat.ren:5443
PULL_SECRET='{"auths":{"registry.redhat.io": {"auth": "ZHVtbXk6ZHVtbXk=","email": "noemail@localhost"},"registry.ocp4.redhat.ren:5443": {"auth": "ZHVtbXk6ZHVtbXk=","email": "noemail@localhost"},"'${INSTALL_IMAGE_REGISTRY}'": {"auth": "'$( echo -n 'admin:shadowman' | openssl base64 )'","email": "noemail@localhost"}}}'
NTP_SERVER=192.168.7.11
HELP_SERVER=192.168.7.11
KVM_HOST=192.168.7.11
API_VIP=192.168.7.100
INGRESS_VIP=192.168.7.101
CLUSTER_PROVISION_IP=192.168.7.103
BOOTSTRAP_IP=192.168.7.12
# 定义单节点集群的节点信息
SNO_CLUSTER_NAME=acm-demo-one
SNO_BASE_DOMAIN=redhat.ren
# echo ${SNO_IF_MAC} > /data/sno/sno.mac
mkdir -p ${BASE_DIR}/data/install
cd ${BASE_DIR}/data/install
/bin/rm -rf *.ign .openshift_install_state.json auth bootstrap manifests master*[0-9] worker*[0-9]
cat << EOF > ${BASE_DIR}/data/install/install-config.yaml
apiVersion: v1
baseDomain: $SNO_BASE_DOMAIN
compute:
- name: worker
replicas: 0
controlPlane:
name: master
replicas: 3
metadata:
name: $SNO_CLUSTER_NAME
networking:
# OVNKubernetes , OpenShiftSDN
networkType: OVNKubernetes
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
serviceNetwork:
- 172.31.0.0/16
platform:
none: {}
pullSecret: '${PULL_SECRET}'
sshKey: |
$( cat ${BASE_DIR}/.ssh/id_rsa.pub | sed 's/^/ /g' )
additionalTrustBundle: |
$( cat /etc/crts/redhat.ren.ca.crt | sed 's/^/ /g' )
imageContentSources:
- mirrors:
- ${INSTALL_IMAGE_REGISTRY}/openshift/release-images
source: quay.io/openshift-release-dev/ocp-release
- mirrors:
- ${INSTALL_IMAGE_REGISTRY}/openshift/release
source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
EOF
/bin/cp -f ${BASE_DIR}/data/install/install-config.yaml ${BASE_DIR}/data/install/install-config.yaml.bak
openshift-install create manifests --dir=${BASE_DIR}/data/install
/bin/cp -f /data/ocp4/ansible-helper/files/* ${BASE_DIR}/data/install/openshift/
#############################################
# run as root if you have not run below, at least one time
# it will generate registry configuration
# copy image registry proxy related config
cd /data/ocp4
bash image.registries.conf.sh nexus.infra.redhat.ren:8083
/bin/cp -f /data/ocp4/image.registries.conf /etc/containers/registries.conf.d/
#############################################
/bin/cp -f /data/ocp4/99-worker-container-registries.yaml ${BASE_DIR}/data/install/openshift
/bin/cp -f /data/ocp4/99-master-container-registries.yaml ${BASE_DIR}/data/install/openshift
cd ${BASE_DIR}/data/install/
openshift-install --dir=${BASE_DIR}/data/install create ignition-configs
BOOTSTRAP_IP=192.168.7.22
MASTER_01_IP=192.168.7.23
MASTER_02_IP=192.168.7.24
MASTER_03_IP=192.168.7.25
BOOTSTRAP_HOSTNAME=bootstrap-demo
MASTER_01_HOSTNAME=master-01-demo
MASTER_02_HOSTNAME=master-02-demo
MASTER_03_HOSTNAME=master-03-demo
BOOTSTRAP_INTERFACE=enp1s0
MASTER_01_INTERFACE=enp1s0
MASTER_02_INTERFACE=enp1s0
MASTER_03_INTERFACE=enp1s0
BOOTSTRAP_DISK=/dev/vda
MASTER_01_DISK=/dev/vda
MASTER_02_DISK=/dev/vda
MASTER_03_DISK=/dev/vda
OCP_GW=192.168.7.11
OCP_NETMASK=255.255.255.0
OCP_NETMASK_S=24
OCP_DNS=192.168.7.11
# HTTP_PATH=http://192.168.7.11:8080/ignition
source /data/ocp4/acm.fn.sh
# 我们会创建一个wzh用户,密码是redhat,这个可以在第一次启动的是,从console/ssh直接用用户名口令登录
# 方便排错和研究
VAR_PWD_HASH="$(python3 -c 'import crypt,getpass; print(crypt.crypt("redhat"))')"
cd ${BASE_DIR}/data/install/
/bin/cp -f /data/ocp4/rhcos-live.x86_64.iso bootstrap.iso
/bin/cp -f bootstrap.iso master01.iso
/bin/cp -f bootstrap.iso master02.iso
/bin/cp -f bootstrap.iso master03.iso
fn_static_ip() {
VAR_INTERFACE=$1
VAR_IP=$2
VAR_NETMASK_S=$3
VAR_GW=$4
VAR_HOSTNAME=$5
VAR_DNS=$6
cat << EOF > ${BASE_DIR}/data/sno/${VAR_INTERFACE}.nmconnection
[connection]
id=${VAR_INTERFACE}
type=ethernet
autoconnect-retries=1
interface-name=${VAR_INTERFACE}
multi-connect=1
permissions=
wait-device-timeout=60000
[ethernet]
mac-address-blacklist=
[ipv4]
address1=${VAR_IP}/${VAR_NETMASK_S=24},${VAR_GW}
dhcp-hostname=${VAR_HOSTNAME}
dhcp-timeout=90
dns=${VAR_DNS};
dns-search=
may-fail=false
method=manual
[ipv6]
addr-gen-mode=eui64
dhcp-hostname=${VAR_HOSTNAME}
dhcp-timeout=90
dns-search=
method=disabled
[proxy]
EOF
cat << EOF > ${BASE_DIR}/data/sno/static.hostname.bu
variant: openshift
version: 4.9.0
metadata:
labels:
machineconfiguration.openshift.io/role: master
name: 99-zzz-master-static-hostname
storage:
files:
- path: /etc/hostname
mode: 0644
overwrite: true
contents:
inline: |
${VAR_HOSTNAME}
EOF
}
fn_static_ip $BOOTSTRAP_INTERFACE \
$BOOTSTRAP_IP \
$OCP_NETMASK_S \
$OCP_GW \
$BOOTSTRAP_HOSTNAME \
$OCP_DNS
butane ${BASE_DIR}/data/sno/static.hostname.bu > ${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml
get_file_content_for_ignition "/opt/openshift/openshift/99-zzz-master-static-hostname.yaml" "${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml"
VAR_99_master_master_static_hostname=$RET_VAL
VAR_99_master_master_static_hostname_2=$RET_VAL_2
cat ${BASE_DIR}/data/install/bootstrap.ign \
| jq --arg VAR "$VAR_PWD_HASH" --arg VAR_SSH "$NODE_SSH_KEY" '.passwd.users += [{ "name": "wzh", "system": true, "passwordHash": $VAR , "sshAuthorizedKeys": [ $VAR_SSH ], "groups": [ "adm", "wheel", "sudo", "systemd-journal" ] }]' \
| jq --argjson VAR "$VAR_99_master_master_static_hostname_2" '.storage.files += [$VAR] ' \
| jq -c . \
> ${BASE_DIR}/data/install/bootstrap-iso.ign
coreos-installer iso reset bootstrap.iso
coreos-installer iso customize -f bootstrap.iso \
--network-keyfile ${BASE_DIR}/data/sno/${BOOTSTRAP_INTERFACE}.nmconnection \
--dest-ignition ${BASE_DIR}/data/install/bootstrap-iso.ign \
--dest-device $BOOTSTRAP_DISK
fn_static_ip $MASTER_01_INTERFACE \
$MASTER_01_IP \
$OCP_NETMASK_S \
$OCP_GW \
$MASTER_01_HOSTNAME \
$OCP_DNS
butane ${BASE_DIR}/data/sno/static.hostname.bu > ${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml
get_file_content_for_ignition "/opt/openshift/openshift/99-zzz-master-static-hostname.yaml" "${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml"
VAR_99_master_master_static_hostname=$RET_VAL
VAR_99_master_master_static_hostname_2=$RET_VAL_2
cat ${BASE_DIR}/data/install/master.ign \
| jq --arg VAR "$VAR_PWD_HASH" --arg VAR_SSH "$NODE_SSH_KEY" '.passwd.users += [{ "name": "wzh", "system": true, "passwordHash": $VAR , "sshAuthorizedKeys": [ $VAR_SSH ], "groups": [ "adm", "wheel", "sudo", "systemd-journal" ] }]' \
| jq --argjson VAR "$VAR_99_master_master_static_hostname_2" '.storage.files += [$VAR] ' \
| jq -c . \
> ${BASE_DIR}/data/install/master-01-iso.ign
coreos-installer iso reset master01.iso
coreos-installer iso customize -f master01.iso \
--network-keyfile ${BASE_DIR}/data/sno/${MASTER_01_INTERFACE}.nmconnection \
--dest-ignition ${BASE_DIR}/data/install/master-01-iso.ign \
--dest-device $MASTER_01_DISK
fn_static_ip $MASTER_02_INTERFACE \
$MASTER_02_IP \
$OCP_NETMASK_S \
$OCP_GW \
$MASTER_02_HOSTNAME \
$OCP_DNS
butane ${BASE_DIR}/data/sno/static.hostname.bu > ${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml
get_file_content_for_ignition "/opt/openshift/openshift/99-zzz-master-static-hostname.yaml" "${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml"
VAR_99_master_master_static_hostname=$RET_VAL
VAR_99_master_master_static_hostname_2=$RET_VAL_2
cat ${BASE_DIR}/data/install/master.ign \
| jq --arg VAR "$VAR_PWD_HASH" --arg VAR_SSH "$NODE_SSH_KEY" '.passwd.users += [{ "name": "wzh", "system": true, "passwordHash": $VAR , "sshAuthorizedKeys": [ $VAR_SSH ], "groups": [ "adm", "wheel", "sudo", "systemd-journal" ] }]' \
| jq --argjson VAR "$VAR_99_master_master_static_hostname_2" '.storage.files += [$VAR] ' \
| jq -c . \
> ${BASE_DIR}/data/install/master-02-iso.ign
coreos-installer iso reset master02.iso
coreos-installer iso customize -f master02.iso \
--network-keyfile ${BASE_DIR}/data/sno/${MASTER_02_INTERFACE}.nmconnection \
--dest-ignition ${BASE_DIR}/data/install/master-02-iso.ign \
--dest-device $MASTER_02_DISK
fn_static_ip $MASTER_03_INTERFACE \
$MASTER_03_IP \
$OCP_NETMASK_S \
$OCP_GW \
$MASTER_03_HOSTNAME \
$OCP_DNS
butane ${BASE_DIR}/data/sno/static.hostname.bu > ${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml
get_file_content_for_ignition "/opt/openshift/openshift/99-zzz-master-static-hostname.yaml" "${BASE_DIR}/data/sno/disconnected/99-zzz-master-static-hostname.yaml"
VAR_99_master_master_static_hostname=$RET_VAL
VAR_99_master_master_static_hostname_2=$RET_VAL_2
cat ${BASE_DIR}/data/install/master.ign \
| jq --arg VAR "$VAR_PWD_HASH" --arg VAR_SSH "$NODE_SSH_KEY" '.passwd.users += [{ "name": "wzh", "system": true, "passwordHash": $VAR , "sshAuthorizedKeys": [ $VAR_SSH ], "groups": [ "adm", "wheel", "sudo", "systemd-journal" ] }]' \
| jq --argjson VAR "$VAR_99_master_master_static_hostname_2" '.storage.files += [$VAR] ' \
| jq -c . \
> ${BASE_DIR}/data/install/master-03-iso.ign
coreos-installer iso reset master03.iso
coreos-installer iso customize -f master03.iso \
--network-keyfile ${BASE_DIR}/data/sno/${MASTER_03_INTERFACE}.nmconnection \
--dest-ignition ${BASE_DIR}/data/install/master-03-iso.ign \
--dest-device $MASTER_03_DISK
# coreos-installer iso ignition embed master03.iso \
# --ignition-file ${BASE_DIR}/data/install/master-iso.ign
# coreos-installer iso customize -f master03.iso \
# --dest-device $MASTER_03_DISK
# coreos-installer iso kargs modify master03.iso \
# -a ip=$MASTER_03_IP::$OCP_GW:$OCP_NETMASK:$MASTER_03_HOSTNAME:$MASTER_03_INTERFACE:none \
# -a nameserver=$OCP_DNS \
# -a coreos.inst.install_dev=$MASTER_03_DISK
# coreos-installer iso customize -f sno.iso \
# --network-keyfile ${BASE_DIR}/data/sno/${SNO_IF}.nmconnection
# --dest-ignition /data/install/iso.ign \
# --dest-device $SNO_DISK \
# --live-ignition /data/sno/static.user.ign \
# --live-ignition /data/sno/disconnected/99-zzz-master-static-hostname.ign \
# --live-ignition /data/install/bootstrap-in-place-for-live-iso.ign
# coreos-installer iso ignition embed sno.iso \
# --config /data/sno/disconnected/99-zzz-master-static-hostname.ign
# coreos-installer iso ignition show sno.iso | jq .
create_lv() {
var_vg=$1
var_pool=$2
var_lv=$3
var_size=$4
var_action=$5
lvremove -f $var_vg/$var_lv
# lvcreate -y -L $var_size -n $var_lv $var_vg
if [ "$var_action" == "recreate" ]; then
lvcreate --type thin -n $var_lv -V $var_size --thinpool $var_vg/$var_pool
wipefs --all --force /dev/$var_vg/$var_lv
fi
}
virsh destroy ocp4-acm-one-bootstrap
virsh undefine ocp4-acm-one-bootstrap
create_lv vgdata poolA lvacm-one-bootstrap 500G
create_lv vgdata poolA lvacm-one-bootstrap-data 500G
virsh destroy ocp4-acm-one-master-01
virsh undefine ocp4-acm-one-master-01
create_lv vgdata poolA lvacm-one-master-01 500G
create_lv vgdata poolA lvacm-one-master-01-data 500G
virsh destroy ocp4-acm-one-master-02
virsh undefine ocp4-acm-one-master-02
create_lv vgdata poolA lvacm-one-master-02 500G
create_lv vgdata poolA lvacm-one-master-02-data 500G
virsh destroy ocp4-acm-one-master-03
virsh undefine ocp4-acm-one-master-03
create_lv vgdata poolA lvacm-one-master-03 500G
create_lv vgdata poolA lvacm-one-master-03-data 500G
cat << EOF >> /etc/sysctl.d/99-wzh-sysctl.conf
vm.overcommit_memory = 1
EOF
sysctl --system
# 创建实验用虚拟网络
mkdir -p /data/kvm
cd /data/kvm
cat << 'EOF' > /data/kvm/bridge.sh
#!/usr/bin/env bash
PUB_CONN='eno1'
PUB_IP='172.21.6.103/24'
PUB_GW='172.21.6.254'
PUB_DNS='172.21.1.1'
nmcli con down "$PUB_CONN"
nmcli con delete "$PUB_CONN"
nmcli con down baremetal
nmcli con delete baremetal
# RHEL 8.1 appends the word "System" in front of the connection,delete in case it exists
nmcli con down "System $PUB_CONN"
nmcli con delete "System $PUB_CONN"
nmcli connection add ifname baremetal type bridge con-name baremetal ipv4.method 'manual' \
ipv4.address "$PUB_IP" \
ipv4.gateway "$PUB_GW" \
ipv4.dns "$PUB_DNS"
nmcli con add type bridge-slave ifname "$PUB_CONN" master baremetal
nmcli con down "$PUB_CONN";pkill dhclient;dhclient baremetal
nmcli con up baremetal
EOF
bash /data/kvm/bridge.sh
nmcli con mod baremetal +ipv4.addresses "192.168.7.103/24"
nmcli con up baremetal
cat << EOF > /root/.ssh/config
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
EOF
pvcreate -y /dev/vdb
vgcreate vgdate /dev/vdb
# https://access.redhat.com/articles/766133
lvcreate -y -n poolA -L 500G vgdata
lvcreate -y -n poolA_meta -L 10G vgdata
lvconvert -y --thinpool vgdata/poolA --poolmetadata vgdata/poolA_meta
lvextend -l +100%FREE vgdata/poolA
mkdir -p /data/kvm/one/
for var_file in bootstrap master01 master02 master03
do
scp [email protected]:/home/3node/data/install/$var_file.iso /data/kvm/one/
done
create_lv() {
var_vg=$1
var_pool=$2
var_lv=$3
var_size=$4
var_action=$5
lvremove -f $var_vg/$var_lv
# lvcreate -y -L $var_size -n $var_lv $var_vg
if [ "$var_action" == "recreate" ]; then
lvcreate --type thin -n $var_lv -V $var_size --thinpool $var_vg/$var_pool
wipefs --all --force /dev/$var_vg/$var_lv
fi
}
virsh destroy ocp4-acm-one-bootstrap
virsh undefine ocp4-acm-one-bootstrap
create_lv vgdata poolA lvacm-one-bootstrap 500G recreate
create_lv vgdata poolA lvacm-one-bootstrap-data 500G recreate
SNO_MEM=32
virt-install --name=ocp4-acm-one-bootstrap --vcpus=16 --ram=$(($SNO_MEM*1024)) \
--cpu=host-model \
--disk path=/dev/vgdata/lvacm-one-bootstrap,device=disk,bus=virtio,format=raw \
--disk path=/dev/vgdata/lvacm-one-bootstrap-data,device=disk,bus=virtio,format=raw \
--os-variant rhel8.3 --network bridge=baremetal,model=virtio \
--graphics vnc,port=59002 \
--boot menu=on --cdrom /data/kvm/one/bootstrap.iso
# --disk path=/dev/vgdata/lvacmhub-data,device=disk,bus=virtio,format=raw \
# --autoconsole text
# --graphics vnc,port=59002 \
# virsh console ocp4-acm-one-bootstrap
virsh destroy ocp4-acm-one-master-01
virsh undefine ocp4-acm-one-master-01
create_lv vgdata poolA lvacm-one-master-01 500G recreate
create_lv vgdata poolA lvacm-one-master-01-data 500G recreate
virt-install --name=ocp4-acm-one-master-01 --vcpus=16 --ram=$(($SNO_MEM*1024)) \
--cpu=host-model \
--disk path=/dev/vgdata/lvacm-one-master-01,device=disk,bus=virtio,format=raw \
--disk path=/dev/vgdata/lvacm-one-master-01-data,device=disk,bus=virtio,format=raw \
--os-variant rhel8.3 --network bridge=baremetal,model=virtio \
--graphics vnc,port=59003 \
--boot menu=on --cdrom /data/kvm/one/master01.iso
virsh destroy ocp4-acm-one-master-02
virsh undefine ocp4-acm-one-master-02
create_lv vgdata poolA lvacm-one-master-02 500G recreate
create_lv vgdata poolA lvacm-one-master-02-data 500G recreate
virt-install --name=ocp4-acm-one-master-02 --vcpus=16 --ram=$(($SNO_MEM*1024)) \
--cpu=host-model \
--disk path=/dev/vgdata/lvacm-one-master-02,device=disk,bus=virtio,format=raw \
--disk path=/dev/vgdata/lvacm-one-master-02-data,device=disk,bus=virtio,format=raw \
--os-variant rhel8.3 --network bridge=baremetal,model=virtio \
--graphics vnc,port=59004 \
--boot menu=on --cdrom /data/kvm/one/master02.iso
virsh destroy ocp4-acm-one-master-03
virsh undefine ocp4-acm-one-master-03
create_lv vgdata poolA lvacm-one-master-03 500G recreate
create_lv vgdata poolA lvacm-one-master-03-data 500G recreate
virt-install --name=ocp4-acm-one-master-03 --vcpus=16 --ram=$(($SNO_MEM*1024)) \
--cpu=host-model \
--disk path=/dev/vgdata/lvacm-one-master-03,device=disk,bus=virtio,format=raw \
--disk path=/dev/vgdata/lvacm-one-master-03-data,device=disk,bus=virtio,format=raw \
--os-variant rhel8.3 --network bridge=baremetal,model=virtio \
--graphics vnc,port=59005 \
--boot menu=on --cdrom /data/kvm/one/master03.iso
cd ${BASE_DIR}/data/install
export KUBECONFIG=${BASE_DIR}/data/install/auth/kubeconfig
echo "export KUBECONFIG=${BASE_DIR}/data/install/auth/kubeconfig" >> ~/.bashrc
# oc completion bash | sudo tee /etc/bash_completion.d/openshift > /dev/null
cd ${BASE_DIR}/data/install
openshift-install wait-for bootstrap-complete --log-level debug
# ......
# INFO Waiting up to 30m0s (until 4:44PM) for bootstrapping to complete...
# DEBUG Bootstrap status: complete
# INFO It is now safe to remove the bootstrap resources
# DEBUG Time elapsed per stage:
# DEBUG Bootstrap Complete: 9m33s
# INFO Time elapsed: 9m33s
openshift-install wait-for install-complete --log-level debug
# INFO Install complete!
# INFO To access the cluster as the system:admin user when using 'oc', run
# INFO export KUBECONFIG=/home/3node/data/install/auth/kubeconfig
# INFO Access the OpenShift web-console here: https://console-openshift-console.apps.acm-demo-one.redhat.ren
# INFO Login to the console with user: "kubeadmin", and password: "UTVLE-NfUqA-PpDQb-SNXpp"
# DEBUG Time elapsed per stage:
# DEBUG Cluster Operators: 2m56s
# INFO Time elapsed: 2m56s
# init setting for helper node
cat << EOF > ~/.ssh/config
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
EOF
chmod 600 ~/.ssh/config
# ssh core@*****
# sudo -i
# # change password for root
# echo 'redhat' | passwd --stdin root
# sed -i "s|^PasswordAuthentication no$|PasswordAuthentication yes|g" /etc/ssh/sshd_config
# sed -i "s|^PermitRootLogin no$|PermitRootLogin yes|g" /etc/ssh/sshd_config
# sed -i "s|^#ClientAliveInterval 180$|ClientAliveInterval 1800|g" /etc/ssh/sshd_config
# systemctl restart sshd
# # set env, so oc can be used
# cat << EOF >> ~/.bashrc
# export KUBECONFIG=/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig
# RET=`oc config use-context system:admin`
# EOF
cat > ${BASE_DIR}/data/install/crack.txt << EOF
echo redhat | sudo passwd --stdin root
sudo sed -i "s|^PasswordAuthentication no$|PasswordAuthentication yes|g" /etc/ssh/sshd_config
sudo sed -i "s|^PermitRootLogin no$|PermitRootLogin yes|g" /etc/ssh/sshd_config
sudo sed -i "s|^#ClientAliveInterval 180$|ClientAliveInterval 1800|g" /etc/ssh/sshd_config
sudo systemctl restart sshd
sudo sh -c 'echo "export KUBECONFIG=/etc/kubernetes/static-pod-resources/kube-apiserver-certs/secrets/node-kubeconfigs/localhost.kubeconfig" >> /root/.bashrc'
sudo sh -c 'echo "RET=\\\`oc config use-context system:admin\\\`" >> /root/.bashrc'
EOF
for i in 23 24 25
do
ssh [email protected].$i < ${BASE_DIR}/data/install/crack.txt
done
# https://unix.stackexchange.com/questions/230084/send-the-password-through-stdin-in-ssh-copy-id
dnf install -y sshpass
for i in 23 24 25
do
sshpass -p 'redhat' ssh-copy-id [email protected].$i
done
for i in 23 24 25
do
ssh [email protected].$i poweroff
done
virsh start ocp4-acm-one-master-01
virsh start ocp4-acm-one-master-02
virsh start ocp4-acm-one-master-03
mkdir -p ~/.kube/bak/
var_date=$(date '+%Y-%m-%d-%H%M')
/bin/cp -f /data/install/auth/kubeconfig ~/.kube/bak/kubeconfig-$var_date
/bin/cp -f /data/install/auth/kubeadmin-password ~/.kube/bak/kubeadmin-password-$var_date
sed "s/admin/admin\/$SNO_CLUSTER_NAME/g" /data/install/auth/kubeconfig > /tmp/config.new
# https://medium.com/@jacobtomlinson/how-to-merge-kubernetes-kubectl-config-files-737b61bd517d
/bin/cp -f ~/.kube/config ~/.kube/config.bak && KUBECONFIG=~/.kube/config:/tmp/config.new kubectl config view --flatten > /tmp/config && /bin/mv -f /tmp/config ~/.kube/config
unset KUBECONFIG
我们装好了single node,那么接下来,我们还可以给这个single node添加worker节点,让这个single node cluster变成一个单master的集群。
# first, lets stick ingress to master
oc label node acm-demo-hub-master ocp-ingress-run="true"
oc patch ingresscontroller default -n openshift-ingress-operator --type=merge --patch='{"spec":{"nodePlacement":{"nodeSelector": {"matchLabels":{"ocp-ingress-run":"true"}}}}}'
# we are testing env, so we don't need ingress replicas.
oc patch --namespace=openshift-ingress-operator --patch='{"spec": {"replicas": 1}}' --type=merge ingresscontroller/default
oc get -n openshift-ingress-operator ingresscontroller/default -o yaml
# then we get worker's ignition file, and start worker node, add it to cluster
oc extract -n openshift-machine-api secret/worker-user-data --keys=userData --to=- > /var/www/html/ignition/sno-worker.ign
HELP_SERVER=192.168.7.11
# 定义单节点集群的节点信息
SNO_IP=192.168.7.16
SNO_GW=192.168.7.11
SNO_NETMAST=255.255.255.0
SNO_HOSTNAME=acm-demo-hub-worker-01
SNO_IF=enp1s0
SNO_DNS=192.168.7.11
SNO_DISK=/dev/vda
SNO_MEM=16
BOOT_ARG=" ip=$SNO_IP::$SNO_GW:$SNO_NETMAST:$SNO_HOSTNAME:$SNO_IF:none nameserver=$SNO_DNS coreos.inst.install_dev=${SNO_DISK##*/} coreos.inst.ignition_url=http://$HELP_SERVER:8080/ignition/sno-worker.ign"
/bin/cp -f /data/ocp4/rhcos-live.x86_64.iso sno.iso
coreos-installer iso kargs modify -a "$BOOT_ARG" sno.iso
# go to kvm host ( 103 )
scp [email protected]:/data/install/sno.iso /data/kvm/
virsh destroy ocp4-acm-hub-worker01
virsh undefine ocp4-acm-hub-worker01
create_lv() {
var_vg=$1
var_pool=$2
var_lv=$3
var_size=$4
var_action=$5
lvremove -f $var_vg/$var_lv
# lvcreate -y -L $var_size -n $var_lv $var_vg
if [ "$var_action" == "recreate" ]; then
lvcreate --type thin -n $var_lv -V $var_size --thinpool $var_vg/$var_pool
wipefs --all --force /dev/$var_vg/$var_lv
fi
}
create_lv vgdata poolA lvacmhub-worker01 500G recreate
# create_lv vgdata poolA lvacmhub-worker01-data 500G remove
virt-install --name=ocp4-acm-hub-worker01 --vcpus=16 --ram=$(($SNO_MEM*1024)) \
--cpu=host-model \
--disk path=/dev/vgdata/lvacmhub-worker01,device=disk,bus=virtio,format=raw \
`# --disk path=/dev/vgdata/lvacmhub-data,device=disk,bus=virtio,format=raw` \
--os-variant rhel8.3 --network bridge=baremetal,model=virtio \
--graphics vnc,port=59003 \
--boot menu=on --cdrom /data/kvm/sno.iso
# after 2 boot up,
# go back to helper
oc get csr
oc get csr -ojson | jq -r '.items[] | select(.status == {} ) | .metadata.name' | xargs oc adm certificate approve