Deployment Scheme
Prequisite
- 9 Node (2 loadbalancer, 1 nfs-storage, 3 master-k8s, 3 worker-k8s)
- 6 Subnet
- Akses Internet pada masing masing network
- Setiap node menggunakan os ubuntu 20.04
- HA-Proxy version 2.0.13-2ubuntu0.3
- Keepalived v2.0.19
NFS Server Version 4
Docker version 19.03.14
Kubernetes v1.20.11
List Host
# nano /etc/hosts
10.10.25.11 mb-master-01
10.10.25.12 mb-masyer-02
10.10.25.13 mb-master-03
10.10.25.14 mb-worker-01
10.10.25.15 mb-worker-02
10.10.25.16 mb-worker-03
10.10.25.17 mb-nfs
10.10.25.18 mb-lb-kube01
10.10.25.19 mb-lb-kube02
10.10.25.20 vip
Update & Upgrade
For all-node :
# apt update -y; apt upgrade -y
Key Host Checking
For all-node to automatic checking such as yes/no:
# vim ~/.ssh/config
Add Value :
Host *
StrictHostKeyChecking accept-new
Setup Haproxy & Keepalived
## On all node
$ sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf
$ echo "net.ipv4.ip_nonlocal_bind = 1" >> /etc/sysctl.conf
$ sysctl -p
## OR
for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16 10.10.25.17 10.10.25.18 10.10.25.19;do ssh $i "sudo sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/' /etc/sysctl.conf";ssh $i "sudo echo 'net.ipv4.ip_nonlocal_bind = 1' >> /etc/sysctl.conf";ssh $i "sudo sed -i 'sysctl -p";done
## On mb-lb-kube01 and mb-lb-kube02
$ sudo apt install haproxy keepalived -y
$ sudo nano /etc/haproxy/haproxy.cfg
## add new line here
frontend apiservers
bind *:8443
mode tcp
option tcplog
default_backend k8s_apiservers
backend k8s_apiservers
mode tcp
option tcplog
option tcp-check
option log-health-checks
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server cluster1-master-1 mb-master-01:6443 check
server cluster1-master-2 mb-master-02:6443 check
server cluster1-master-3 mb-master-03:6443 check
$ sudo systemctl restart haproxy
buat file /etc/keepalived/keepalived.conf
$ nano /etc/keepalived/keepalived.conf
## in mb-lb-kube01
vrrp_script chk_haproxy {
script "/usr/bin/killall -0 haproxy"
interval 2
weight 2
}
vrrp_instance LB_VIP {
interface ens3
state MASTER
priority 101
virtual_router_id 52
authentication {
auth_type PASS
auth_pass P@ssw0rd
}
virtual_ipaddress {
10.10.25.20
}
track_script {
chk_haproxy
}
}
## in mb-lb-kube02
vrrp_script chk_haproxy {
script "/usr/bin/killall -0 haproxy"
interval 2
weight 2
}
vrrp_instance LB_VIP {
interface ens3
state BACKUP
priority 100
virtual_router_id 52
authentication {
auth_type PASS
auth_pass P@ssw0rd
}
virtual_ipaddress {
10.10.25.20
}
track_script {
chk_haproxy
}
}
$ sudo systemctl restart keepalived
$ systemctl enable --now keepalived
Setup NFS Server & Client
$ sudo apt update
$ sudo apt install nfs-kernel-server
$ sudo systemctl status nfs-server
$ sudo mkdir /mnt/kube_file
$ sudo chown nobody:nogroup /mnt/kube_file
$ sudo chmod -R 777 /mnt/kube_file
$ sudo nano /etc/exports
add line
/mnt/kube_file/ 10.20.30.0/24(rw,sync,no_subtree_check)
or
/mnt/kube_file/ *(rw,sync,no_subtree_check)
$ export -a
$ exportfs -rv
$ systemctl restart nfs-kernel-server
## Tes Client
$ sudo apt install nfs-common (better install client on all worker for dynamic provisioning)
$ sudo mount -t nfs <ip nfs-server>:/mnt/kube_file /mnt
Setup Docker
$ apt-get install apt-transport-https ca-certificates
$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
$ sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
$ sudo apt-get update
$ apt install docker-ce=5:20.10.9~3-0~ubuntu-focal
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"storage-driver": "overlay2"
}
EOF
for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15;do ssh $i "apt-get install apt-transport-https ca-certificates";ssh $i "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -";ssh $i "sudo add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable'";ssh $i "sudo apt-get update";ssh $i "apt install docker-ce=5:20.10.9~3-0~ubuntu-focal";done
$ for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do cat /etc/docker/daemon.json | ssh $i "tee -i /etc/docker/daemon.json";done
$ for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do ssh $i "sudo systemctl enable --now docker";done
$ for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do ssh $i "sudo systemctl restart docker";done
$ for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do ssh $i "sudo systemctl status docker";done
Setup K8S
## On All Nodes
$ sudo swapoff -a
$ sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
$ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
$ echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
$ apt-get update
$ apt install kubeadm=1.20.11-00 kubelet=1.20.11-00 kubectl=1.20.11-00
Turn off automatic update
sudo apt-mark hold kubelet kubeadm kubectl
for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do ssh $i "sudo apt-mark hold kubelet kubeadm kubectl";done
for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do ssh $i "sudo systemctl enable --now kubelet";done
for i in 10.10.25.11 10.10.25.12 10.10.25.13 10.10.25.14 10.10.25.15 10.10.25.16;do ssh $i "systemctl restart kubelet";done
## Only run on one of master node (mb-master-01 or mb-maste-02)
$ sudo kubeadm init --kubernetes-version "1.20.11" --pod-network-cidr "192.168.0.0/16" --control-plane-endpoint "vip:8443" --upload-certs
------ LAST OUTPUT-------
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of the control-plane node running the following command on each as root:
kubeadm join 10.10.25.20:8443 --token 7okxhf.af5gnm29wjw90zti \
--discovery-token-ca-cert-hash sha256:c05b210bcc51272ac1720080ec39723b33df62fb7570969941c6f06be7e3d765 \
--control-plane --certificate-key 203e5c6fd0199c28ffe2cdeb3b3b8c73108f32e835ce6c9edc889b5bd10f018e
Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.25.20:8443 --token 7okxhf.af5gnm29wjw90zti \
--discovery-token-ca-cert-hash sha256:c05b210bcc51272ac1720080ec39723b33df62fb7570969941c6f06be7e3d765
-------LAST OUTPUT--------------
$ kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
$ kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
$ watch kubectl get pods -n calico-system
$ kubectl get pods -n calico-system (ensure status is ready)
Setup Dynamic Provisioning Storage NFS
$ git clone https://exxsyseng@bitbucket.org/exxsyseng/nfs-provisioning.git
## or manual config
$ mkdir nfs-provisioning && cd nfs-provisioning
$ nano namespaces.yaml
apiVersion: v1
kind: Namespace
metadata:
name: nfsc-provisioner
$ nano rbac.yaml
kind: ServiceAccount
apiVersion: v1
metadata:
name: nfsc-provisioner-sa
namespace: nfsc-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfsc-provisioner-cr
namespace: nfsc-provisioner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfsc-provisioner-crb
namespace: nfsc-provisioner
subjects:
- kind: ServiceAccount
name: nfsc-provisioner-sa
namespace: nfsc-provisioner
roleRef:
kind: ClusterRole
name: nfsc-provisioner-cr
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfsc-provisioner-role
namespace: nfsc-provisioner
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfsc-provisioner-role
subjects:
- kind: ServiceAccount
name: nfsc-provisioner-sa
namespace: nfsc-provisioner
roleRef:
kind: Role
name: nfsc-provisioner-role
apiGroup: rbac.authorization.k8s.io
$ kubectl get clusterrole,clusterrolebinding,role,rolebinding |grep nfs
$ nano class.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfsc-storage-sc
namespace: nfsc-provisioner
provisioner: example.com/nfs
parameters:
archiveOnDelete: "false"
$ kubectl create -f class.yaml
$ kubectl get storageclass
$ nano deployment.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
namespace: nfsc-provisioner
spec:
selector:
matchLabels:
app: nfs-client-provisioner
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfsc-provisioner-sa
containers:
- name: nfs-client-provisioner
image: quay.io/external_storage/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: example.com/nfs
- name: NFS_SERVER
value: 10.10.25.17
- name: NFS_PATH
value: /mnt/kube_file
volumes:
- name: nfs-client-root
nfs:
server: 10.10.25.17
path: /mnt/kube_file
$ kubectl get all -n nfsc-provisioner
$ kubectl describe pod nfs-client-provisioner-XXXXX
$ nano pv-nfs.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: pv
namespace: nfsc-provisioner
spec:
storageClassName: nfsc-storage-sc
volumeMode: Filesystem
accessModes:
- ReadWriteMany
capacity:
storage: 5Gi
nfs:
path: /mnt/kube_file
server: 10.10.25.17
$ nano pvc-nfs.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
namespace: nfsc-provisioner
spec:
storageClassName: nfsc-storage-sc
accessModes:
- ReadWriteMany
resources:
requests:
storage: 500Mi
$ kubectl get pvc,pv -n nfsc-provisioner -n nfsc-provisioner
$ nano pod_nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: nginx
name: nfs-nginx
namespace: nfsc-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
volumes:
- name: nginx-volume
persistentVolumeClaim:
claimName: pvc1
containers:
- image: nginx
name: nginx
volumeMounts:
- name: nginx-volume
mountPath: /usr/share/nginx/html
$ kubectl create -f pod_nginx.yaml
$ kubectl get all -n nfsc-provisioner
# or
$ kubectl get pod -n nfsc-provisioner
$ kubectl describe pod nfs-nginx-XXXX -n nfsc-provisioner
0 Comments