Prerequisite (all nodes)
yum update
Install CRI (Docker)
Install Docker from CentOS/RHEL repository
yum install -y docker
systemctl enable docker.service
Setup daemon
cat > /etc/docker/daemon.json <<EOF
{
#"exec-opts": ["native.cgroupdriver=systemd"],
#"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
#"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
Restart docker
systemctl daemon-reload
systemctl restart docker
Installing kubeadm, kubelet and kubectl
swapoff -a
remove swap in /etc/fstab
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
Set SELinux in permissive mode (effectively disabling it)
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
systemctl enable kubelet.service && systemctl start kubelet
service firewalld stop
systemctl disable firewalld
sysctl net.bridge.bridge-nf-call-iptables=1
sysctl net.bridge.bridge-nf-call-ip6tables=1
sysctl net.ipv4.ip_forward=1
Master node
kubeadm config images pull
kubeadm init --pod-network-cidr=10.244.0.0/16
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
export KUBECONFIG=/etc/kubernetes/admin.conf
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/canal/rbac.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/canal/canal.yaml
To schedule pod on master node:
kubectl taint nodes --all node-role.kubernetes.io/master-
Worker node
base on context
kubeadm join 10.0.2.100:6443 --token 5ka4zo.890fyuaiq3xynote --discovery-token-ca-cert-hash sha256:b0669f92c142266627e130d1bd61d3996583eeeb7222181a40cd86e39a6fa598
Install Dashboard
cat > admin-service-account.yaml <<EOF
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
EOF
kubectl apply -f admin-service-account.yaml
cat > cluster-role-binding.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
EOF
kubectl apply -f cluster-role-binding.yaml
get token
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
create dashboard
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml
proxy for other machine can view
kubectl proxy --address=10.0.2.100 &
More tips
k8s create namespace:
apiVersion: v1
kind: Namespace
metadata:
name: <namespace name>
stateless deployment nginx
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
namespace: hoang-test
name: nginx-deployment # set name for deployment
spec:
selector:
matchLabels:
app: nginx # must match with template.metadata.labels.app
replicas: 2 # tells deployment to run 2 pods matching the template
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
ports:
- containerPort: 80
expose service:
kubectl expose deployment nginx-deployment --type=LoadBalancer --name=nginx-service -n hoang-test
or expose by apply yaml
apiVersion: v1
kind: Service
metadata:
namespace: hoang-test
name: nginx-service-x
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
app: nginx
type: LoadBalancer # use type: NodePort for bind port to all nodes
externalTrafficPolicy: Cluster
expose dashboard by service, access by https://:32222
kind: Service
apiVersion: v1
metadata:
namespace: kube-system
name: dashboard-service
labels:
hoang-app: k8s-dashboard-svc
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- protocol: TCP
port: 8443
targetPort: 8443
nodePort: 32222
type: NodePort
install helm
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
kubectl create serviceaccount -n kube-system tiller
kubectl create clusterrolebinding tiller-binding --clusterrole=cluster-admin --serviceaccount kube-system:tiller
# run tiller with specific tiller account
helm init --service-account tiller
helm install stable/nginx-ingress
The nginx-ingress controller has been installed.
It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status by running ‘kubectl –namespace default get services -o wide -w dining-bear-nginx-ingress-controller’
An example Ingress that makes use of the controller:
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
name: example
namespace: foo
spec:
rules:
- host: www.example.com
http:
paths:
- backend:
serviceName: exampleService
servicePort: 80
path: /
# This section is only required if TLS is to be enabled for the Ingress
tls:
- hosts:
- www.example.com
secretName: example-tls
If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided:
apiVersion: v1
kind: Secret
metadata:
name: example-tls
namespace: foo
data:
tls.crt: <base64 encoded cert>
tls.key: <base64 encoded key>
type: kubernetes.io/tls
https://kubernetes.github.io/ingress-nginx/deploy/#bare-metal