虚拟化容器,大数据,DBA,中间件,监控。

Kubernetes v1.18 多master部署

12 11月
作者:admin|分类:容器虚拟化

一.基础环境准备

1.个节点设置主机名

hostnamectl set-hostname k8s-master1
hostnamectl set-hostname k8s-master2
hostnamectl set-hostname k8s-master3
hostnamectl set-hostname k8s-worker1
hostnamectl set-hostname k8s-worker2
hostnamectl set-hostname k8s-worker3

#在所有服务器上执行

cat >> /etc/hosts << EOF
192.168.3.101 hostnamectl set-hostname k8s-master1
192.168.3.102 hostnamectl set-hostname k8s-master2
192.168.3.103 hostnamectl set-hostname k8s-master3
192.168.3.104 hostnamectl set-hostname k8s-worker1
192.168.3.105 hostnamectl set-hostname k8s-worker2
192.168.3.106 hostnamectl set-hostname k8s-worker3
EOF

yum -y install vim wget net-tools lrzsz

systemctl stop firewalld && systemctl disable firewalld
sed -i '/^SELINUX=/s/enforcing/disabled/' /etc/selinux/config 
setenforce 0
swapoff -a
sed -i '/swap/s/^/#/' /etc/fstab

二. 部署 Docker 环境

三台主机均操作

yum install -y yum-utils device-mapper-persistent-data lvm2

wget -O /etc/yum.repos.d/aliyun.base.repo http://mirrors.aliyun.com/repo/Centos-7.repo

wget https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo 
yum -y install docker-ce-18.09.9-3.el7

# 启动docker,并设置docker开机自启 
systemctl start docker && systemctl enable docker 
# 配置加速,并设置驱动 
cat << EOF > /etc/docker/daemon.json 
{
    "registry-mirrors": [
"https://dockerhub.azk8s.cn",
"https://hub-mirror.c.163.com"
]
}
EOF

# 加载daemon并重启docker
systemctl daemon-reload && systemctl restart docker 
cat << EOF >> /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF

sysctl -p

三.部署 Kubernetes 集群

所有节点添加阿里云YUM源

三台主机均操作

cat > /etc/yum.repos.d/kubernetes.repo << EOF 
[kubernetes] 
name=Kubernetes 
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
gpgcheck=0 
repo_gpgcheck=0 
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
EOF

所有节点安装kubeadm,kubelet和kubectl

这里指定了版本号,若需要其他版本的可自行更改

yum install -y kubelet-1.18.3-0 kubeadm-1.18.3-0 kubectl-1.18.3-0
systemctl enable kubelet

初始化master节点

只在master节点执行

由于默认拉取镜像地址k8s.gcr.io国内无法访问,这里指定阿里云镜像仓库地址

执行成功以后最后结果会输出

kubeadm config print init-defaults > /opt/kubeadm.conf
vim /opt/kubeadm.conf
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.3.101
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "192.168.3.101"
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: "10.244.0.0/16"
scheduler: {}

#初始化init
kubeadm init --config /opt/kubeadm.conf 

将master1生成的证书上传到master2 master3

#scp
ssh-keygen -t rsa
ssh-copy-id root@192.168.3.102
ssh-copy-id root@192.168.3.103
#在master2  master3上创建文件
mkdir -p  /etc/kubernetes/pki

vim /etc/kubernetes/pki/scp.sh
# cat scp.sh
USER=root
CONTROL_PLANE_IPS="192.168.3.102 192.168.3.103"
for host in ${CONTROL_PLANE_IPS}; do
    scp /etc/kubernetes/pki/ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.key "${USER}"@$host:
    scp /etc/kubernetes/pki/sa.pub "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.crt "${USER}"@$host:
    scp /etc/kubernetes/pki/front-proxy-ca.key "${USER}"@$host:
    scp /etc/kubernetes/pki/etcd/ca.crt "${USER}"@$host:etcd-ca.crt
    scp /etc/kubernetes/pki/etcd/ca.key "${USER}"@$host:etcd-ca.key
    scp /etc/kubernetes/admin.conf "${USER}"@$host:
    ssh ${USER}@${host} 'mkdir -p /etc/kubernetes/pki/etcd'
    ssh ${USER}@${host} 'mv /${USER}/ca.crt /etc/kubernetes/pki/'
    ssh ${USER}@${host} 'mv /${USER}/ca.key /etc/kubernetes/pki/'
    ssh ${USER}@${host} 'mv /${USER}/sa.pub /etc/kubernetes/pki/'
    ssh ${USER}@${host} 'mv /${USER}/sa.key /etc/kubernetes/pki/'
    ssh ${USER}@${host} 'mv /${USER}/front-proxy-ca.crt /etc/kubernetes/pki/'
    ssh ${USER}@${host} 'mv /${USER}/front-proxy-ca.key /etc/kubernetes/pki/'
    ssh ${USER}@${host} 'mv /${USER}/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt'
    ssh ${USER}@${host} 'mv /${USER}/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key'
    ssh ${USER}@${host} 'mv /${USER}/admin.conf /etc/kubernetes/admin.conf'
done


chmod +x /etc/kubernetes/pki/scp.sh
cd /etc/kubernetes/pki/
./scp.sh

执行成功以后最后几行结果会输出(每次都不一样根据自己实际生成的为准,这个是node节点加入集群使用)

#在master上执行
 kubeadm join 192.168.3.101:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:ba35554715add44ece0fdcbf3eba7aa3f6de611865ded38ffe7d13f661e0c706 \
    --control-plane
# 在master节点执行
mkdir -p $HOME/.kube sudo
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

在worker上执行
kubeadm join 192.168.3.101:6443 --token abcdef.0123456789abcdef     \
    --discovery-token-ca-cert-hash sha256:ba35554715add44ece0fdcbf3eba7aa3f6de611865ded38ffe7d13f661e0c706

四.部署 Calico 网络插件

安装 Calico 网络插件。

上传本地的软件包

mkdir /opt/docker-flannel
cd /opt/docker-flannel
#上传网络插件
rz  kube-flannel.yaml
kubectl apply -f kube-flannel.yaml

执行以下命令使确认所有正在运行 Pod 与 Node。

kubectl get nodes
kubectl get pod --all-namespaces
或者   kubectl get pod -A
浏览464 评论0
返回
目录
返回
首页
Hadoop3 HA高可用集群搭建 k8s的持续集成(jenkins+gitlab+k8s)