k8s二进制部署(高可用)
1.服务器规划
角色 |
IP |
组件 |
k8s-master1 |
192.168.31.63 |
kube-apiserver kube-controller-manager kube-scheduler etcd |
k8s-master2 |
192.168.31.64 |
kube-apiserver kube-controller-manager kube-scheduler |
k8s-node1 |
192.168.31.65 |
kubelet kube-proxy docker etcd |
k8s-node2 |
192.168.31.66 |
kubelet kube-proxy docker etcd |
Load Balancer(Master) |
192.168.31.61 192.168.31.60 (VIP) |
Nginx L4 |
Load Balancer(Backup) |
192.168.31.62 |
Nginx L4 |
harbor(docker registry) |
192.168.31.70 |
harbor |
2.系统初始化
关闭防火墙: # systemctl stop firewalld # systemctl disable firewalld 关闭selinux: # setenforce 0 # 临时 # sed -i 's/enforcing/disabled/' /etc/selinux/config # 永久 关闭swap: # swapoff -a # 临时 # vim /etc/fstab # 永久 (注释掉swap内容) 同步系统时间: # ntpdate time.windows.com 添加hosts: # vim /etc/hosts 192.168.31.63 k8s-master1 192.168.31.64 k8s-master2 192.168.31.65 k8s-node1 192.168.31.66 k8s-node2 修改主机名: hostnamectl set-hostname k8s-master1 hostnamectl set-hostname k8s-master2 hostnamectl set-hostname k8s-node1 hostnamectl set-hostname k8s-node2
3.部署etcd集群
3.1生成etcd证书
mkdir -p TLS/etcd/ssl && cd TLS/etcd/ssl
ca-config.json
{ "signing": { "default": { "expiry": "87600h" }, "profiles": { "www": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } }
ca-csr.json
{ "CN": "etcd CA", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing" } ] }
server-csr.json
{ "CN": "etcd", "hosts": [ "192.168.31.63", "192.168.31.64", "192.168.31.65", "192.168.31.66" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing" } ] }
生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
生成 ca.pem ca-key.pem
根据ca证书生成服务端证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
生成 server.csr server.pem server-key.pem
3.2部署etcd
上传解压etcd二进制包
etcd.tar.gz
tar zxvf etcd.tar.gz cd etcd cp ../TLS/etcd/{ca,server,server-key}.pem ssl/
etcd.conf
不通节点配置不同
#[Member] ETCD_NAME="etcd-1" #不同节点需要更改 ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ETCD_LISTEN_PEER_URLS="https://192.168.31.63:2380" #不同节点需要更改 ETCD_LISTEN_CLIENT_URLS="https://192.168.31.63:2379" #不同节点需要更改 #[Clustering] ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.31.63:2380" #不同节点需要更改 ETCD_ADVERTISE_CLIENT_URLS="https://192.168.31.63:2379" #不同节点需要更改 ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.31.63:2380,etcd-2=https://192.168.31.64:2380,etcd-3=https://192.168.31.65:2380" ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ETCD_INITIAL_CLUSTER_STATE="new"
etcd.service
[Unit] Description=Etcd Server After=network.target After=network-online.target Wants=network-online.target [Service] Type=notify EnvironmentFile=/opt/etcd/cfg/etcd.conf ExecStart=/opt/etcd/bin/etcd \ --name=${ETCD_NAME} \ --data-dir=${ETCD_DATA_DIR} \ --listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \ --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \ --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \ --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \ --initial-cluster=${ETCD_INITIAL_CLUSTER} \ --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \ --initial-cluster-state=new \ --cert-file=/opt/etcd/ssl/server.pem \ --key-file=/opt/etcd/ssl/server-key.pem \ --peer-cert-file=/opt/etcd/ssl/server.pem \ --peer-key-file=/opt/etcd/ssl/server-key.pem \ --trusted-ca-file=/opt/etcd/ssl/ca.pem \ --peer-trusted-ca-file=/opt/etcd/ssl/ca.pem Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target
分发etcd目录和etcd.service到其他节点
注意:etcd.service需要存放在system管理的指定目录
mv etcd.service /usr/lib/systemd/system
启动 etcd
# systemctl start etcd # systemctl enable etcd
etcd健康检查
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https:// 192.168.31.63:2379,https://192.168.31.65:2379,https://192.168.31.66:2379" cluster-health
4.master节点部署
4.1生成master节点需要证书
mkdir -p TLS/k8s/ssl && cd TLS/k8s/ssl
server-csr.json
{ "CN": "kubernetes", "hosts": [ "10.0.0.1", "127.0.0.1", "kubernetes", "kubernetes.default", "kubernetes.default.svc", "kubernetes.default.svc.cluster", "kubernetes.default.svc.cluster.local", "192.168.31.60", "192.168.31.61", "192.168.31.62", "192.168.31.63", "192.168.31.64", "192.168.31.65", "192.168.31.66", "192.168.31.70" ], "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "BeiJing", "ST": "BeiJing", "O": "k8s", "OU": "System" } ] }
ca-config.json
{ "signing": { "default": { "expiry": "87600h" }, "profiles": { "kubernetes": { "expiry": "87600h", "usages": [ "signing", "key encipherment", "server auth", "client auth" ] } } } }
ca-csr.json
{ "CN": "kubernetes", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "L": "Beijing", "ST": "Beijing", "O": "k8s", "OU": "System" } ] }
生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
根据ca证书生成server端证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
根据ca证书生成kube-proxy证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
4.2 部署master节点服务
上传解压二进制包
# tar zxvf k8s-master.tar.gz # cd kubernetes # cp TLS/k8s/ssl/*.pem ssl # cp –rf kubernetes /opt # cp kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system
kube-apiserver.service
k8s-master.tar.gz
[Unit] Description=Kubernetes API Server Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target
kube-controller-manager.service
[Unit] Description=Kubernetes Controller Manager Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target
kube-scheduler.service
[Unit] Description=Kubernetes Scheduler Documentation=https://github.com/kubernetes/kubernetes [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS Restart=on-failure [Install] WantedBy=multi-user.target
kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \ --v=2 \ --log-dir=/opt/kubernetes/logs \ --etcd-servers=https://192.168.31.63:2379,https://192.168.31.65:2379,https://192.168.31.66:2379 \ --bind-address=192.168.31.63 \ --secure-port=6443 \ --advertise-address=192.168.31.63 \ --allow-privileged=true \ --service-cluster-ip-range=10.0.0.0/24 \ --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ --authorization-mode=RBAC,Node \ --enable-bootstrap-token-auth=true \ --token-auth-file=/opt/kubernetes/cfg/token.csv \ --service-node-port-range=30000-32767 \ --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \ --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \ --tls-cert-file=/opt/kubernetes/ssl/server.pem \ --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \ --client-ca-file=/opt/kubernetes/ssl/ca.pem \ --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \ --etcd-cafile=/opt/etcd/ssl/ca.pem \ --etcd-certfile=/opt/etcd/ssl/server.pem \ --etcd-keyfile=/opt/etcd/ssl/server-key.pem \ --audit-log-maxage=30 \ --audit-log-maxbackup=3 \ --audit-log-maxsize=100 \ --audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \ --v=2 \ --log-dir=/opt/kubernetes/logs \ --leader-elect=true \ --master=127.0.0.1:8080 \ --address=127.0.0.1 \ --allocate-node-cidrs=true \ --cluster-cidr=10.244.0.0/16 \ --service-cluster-ip-range=10.0.0.0/24 \ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \ --root-ca-file=/opt/kubernetes/ssl/ca.pem \ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \ --experimental-cluster-signing-duration=87600h0m0s"
kube-scheduler.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \ --v=2 \ --log-dir=/opt/kubernetes/logs \ --leader-elect=true \ --master=127.0.0.1:8080 \ --address=127.0.0.1 \ --allocate-node-cidrs=true \ --cluster-cidr=10.244.0.0/16 \ --service-cluster-ip-range=10.0.0.0/24 \ --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \ --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \ --root-ca-file=/opt/kubernetes/ssl/ca.pem \ --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \ --experimental-cluster-signing-duration=87600h0m0s" [root@k8s-master1 cfg]# cat kube-scheduler.conf KUBE_SCHEDULER_OPTS="--logtostderr=false \ --v=2 \ --log-dir=/opt/kubernetes/logs \ --leader-elect \ --master=127.0.0.1:8080 \ --address=127.0.0.1"
token.csv
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper" 格式:token,用户,uid,用户组 自行生成随机加密 head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 注意: token必须要与node节点bootstrap.kubeconfig配置里一致
master节点部署目录结构
启动设置服务
# systemctl start kube-apiserver # systemctl start kube-controller-manager # systemctl start kube-scheduler # systemctl enable kube-apiserver # systemctl enable kube-controller-manager # systemctl enable kube-scheduler # systemctl status kube-apiserver # systemctl status kube-controller-manager # systemctl status kube-scheduler
4.3启用TLS Bootstrapping
为kubelet-bootstrap用户授权
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
5.部署node节点
将master节点生成kube-proxy证书和ca.pem分发到node节点
# cd TLS/k8s # scp ca.pem kube-proxy*.pem root@192.168.31.65:/opt/kubernetes/ssl/
k8s-node.tar.gz
# tar zxvf k8s-node.tar.gz # mv kubernetes /opt # cp kubelet.service kube-proxy.service /usr/lib/systemd/system
5.1node节点服务配置
kubelet.service
[Unit] Description=Kubernetes Kubelet After=docker.service Before=docker.service [Service] EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target
kube-proxy.service
[Unit] Description=Kubernetes Proxy After=network.target [Service] EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS Restart=on-failure LimitNOFILE=65536 [Install] WantedBy=multi-user.target
bootstrap.kubeconfig
apiVersion
kubelet.conf
apiVersion
kubelet-config.yml
kind
kube-proxy.conf
KUBE_PROXY_OPTS="--logtostderr=false \ --v=2 \ --log-dir=/opt/kubernetes/logs \ --config=/opt/kubernetes/cfg/kube-proxy-config.yml"
kube-proxy-config.yml
kind
kube-proxy.kubeconfig
apiVersion
node节点k8s目录
5.2node节点二进制安装docker
docker-18.09.6.tgz
tar zxvf docker-18.09.6.tgz mv docker/* /usr/bin/ mkdir /etc/docker mv daemon.json /etc/docker mv docker.service /usr/lib/systemd/system systemctl start docker;systemctl enable docker;systemctl status docker
/etc/docker/daemon.json
{ "registry-mirrors": ["https://yyk0qnca.mirror.aliyuncs.com"], "insecure-registries": ["192.168.31.70"] }
docker.service
[Unit] Description=Docker Application Container Engine Documentation=https://docs.docker.com After=network-online.target firewalld.service containerd.service Wants=network-online.target [Service] Type=notify ExecStart=/usr/bin/dockerd ExecReload=/bin/kill -s HUP $MAINPID TimeoutSec=0 RestartSec=2 Restart=always StartLimitBurst=3 StartLimitInterval=60s LimitNOFILE=infinity LimitNPROC=infinity LimitCORE=infinity TasksMax=infinity Delegate=yes KillMode=process [Install] WantedBy=multi-user.target
5.3启动kubelet,kube-proxy
修改以下三个文件中IP地址: # grep 192 * bootstrap.kubeconfig: server: https://192.168.31.63:6443 kubelet.kubeconfig: server: https://192.168.31.63:6443 kube-proxy.kubeconfig: server: https://192.168.31.63:6443 修改以下两个文件中主机名: # grep hostname * kubelet.conf:--hostname-override=k8s-node1 \ kube-proxy-config.yml:hostnameOverride: k8s-node1 systemctl start kubelet;systemctl enable kubelet;systemctl status kubelet systemctl start kube-proxy;systemctl enable kube-proxy;systemctl status kube-proxy
5.4允许给Node颁发证书
审批
kubectl certificate approve node-csr-KPCHuash1oL_xrZWG2IvEC_urVByO5MOQE60QVbRh-U
由于还未部署网络插件所以node节点是 not ready 状态
5.5部署cni模式
mkdir /opt/cni/bin /etc/cni/net.d -p tar zxvf cni-plugins-linux-amd64-v0.8.2.tgz -C /opt/cni/bin/
确保配置中指定的网络是cni
5.6master节点部署flannel网络插件
kubectl apply -f kube-flannel.yaml
5.7授权apiserver访问kubelet
配置中定义禁止匿名访问需要认证
kubectl apply -f apiserver-to-kubelet-rbac.yaml
apiVersion
6.master高可用部署
6.1 Master高可用
scp -r /opt/kubernetes/ root@k8s-master2:/opt/ mkdir /opt/etcd #在 k8s-master2节点 scp -r /opt/etcd/ssl root@k8s-master2:/opt/etcd scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service root@k8s-master2:/usr/lib/systemd/system/ scp /usr/bin/kube* root@k8s-master2:/usr/bin
修改api-server配置文件,并启动服务
[root@k8s-master2 cfg]# egrep 'advertise|bind' kube-apiserver.conf --bind-address=192.168.31.64 \ --advertise-address=192.168.31.64 \ systemctl start kube-apiserver systemctl start kube-controller-manager systemctl start kube-scheduler systemctl enable kube-apiserver systemctl enable kube-controller-manager systemctl enable kube-scheduler systemctl status kube-apiserver systemctl status kube-controller-manager systemctl status kube-scheduler
6.2部署负载均衡
loadbalance-master 和 loadbalance-slave 分别安装nginx,keepalived
通过nginx 反向代理两个master的 kube-apiserver 服务
keepalived 设置健康检查 判断nginx 是否存活,如果一个节点nginx挂了,就会将vip 192.168.31.88 漂移到另一个节点。
安装nginx,keepalived
yum install -y nginx yum install -y keepalived
/etc/nginx/nginx.conf
[root@loadbalancer1 keepalived]# cat /etc/nginx/nginx.conf # For more information on configuration, see: # * Official English Documentation: http://nginx.org/en/docs/ # * Official Russian Documentation: http://nginx.org/ru/docs/ user nginx; worker_processes auto; error_log /var/log/nginx/error.log; pid /run/nginx.pid; # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic. include /usr/share/nginx/modules/*.conf; events { worker_connections 1024; } stream { log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent'; access_log /var/log/nginx/k8s-access.log main; upstream k8s-apiserver { server 192.168.31.63:6443; server 192.168.31.64:6443; } server { listen 6443; proxy_pass k8s-apiserver; } } http { log_format main '$remote_addr - $remote_user [$time_local] "$request" ' '$status $body_bytes_sent "$http_referer" ' '"$http_user_agent" "$http_x_forwarded_for"'; access_log /var/log/nginx/access.log main; sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; include /etc/nginx/mime.types; default_type application/octet-stream; include /etc/nginx/conf.d/*.conf; server { listen 80 default_server; listen [::]:80 default_server; server_name _; root /usr/share/nginx/html; # Load configuration files for the default server block. include /etc/nginx/default.d/*.conf; location / { } error_page 404 /404.html; location = /40x.html { } error_page 500 502 503 504 /50x.html; location = /50x.html { }