环境准备
192.168.241.130 master
192.168.241.131 node1
192.168.241.132 node1
设置永久主机名称,然后重新登录
hostnamectl set-hostname master
hostnamectl set-hostname node1
hostnamectl set-hostname node2
修改 /etc/hostname 文件,添加主机名和 IP 的对应关系:
$ vim /etc/hosts
192.168.241.130 master
192.168.241.131 node1
192.168.241.132 node1
在每台机器上关闭防火墙:
$ systemctl stop firewalld
$ systemctl disable firewalld
如果开启了swap分区,kubelet 会启动失败(可以通过将参数 --fail-swap-on 设置为false 来忽略 swap on),故需要在每台机器上关闭 swap 分区:
$ swapoff -a
关闭 SELinux,否则后续 K8S 挂载目录时可能报错 Permission denied :
$ setenforce 0
修改配置文件,永久生效;
$ grep SELINUX /etc/selinux/config
SELINUX=disabled
更新系统时间
$ yum -y install ntpdate
$ ntpdate cn.pool.ntp.org
以上环境准备完成
开始部署etcd集群
二进制包下载地址:https://gitlub.com/etcd-io/etcd/releases
分别创建以下目录 后期好管理
[root@master ~]# mkdir -p /root/k8s/k8s-cert //存放k8s证书
[root@master ~]# mkdir -p /root/k8s/etcd-cert //存放etcd证书
上传脚本到/root/k8s/etcd-cert路劲下(总脚本 以下会分开执行)
[root@master ~]# cat /root/k8s/etcd-cert/etcd-cert.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------
cat > server-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"192.168.241.130",
"192.168.241.131",
"192.168.241.132"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
下载生成etcd证书工具cssl
执行脚本
[root@master ~]# bash /root/k8s/etcd-cert/cfssl.sh
curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson /usr/local/bin/cfssl-certinfo
输入cf再按tab键3查看是否安装成功
执行以下脚本生成ca证书json文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "etcd CA",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing"
}
]
}
EOF
[root@master etcd-cert]# ls /root/k8s/etcd-cert //查看json文件
再执行:
[root@master etcd-cert]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca - //生成根证书
会生成ca-key.pem and ca.pem文件
生成一个etcd的域名证书
cat > server-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"192.168.241.130",
"192.168.241.131",
"192.168.241.132"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
EOF
##注意修改ip
[root@master etcd-cert]# ls
ca-config.json ca-csr.json ca.pem etcd-cert.sh server-csr.json server.pem
ca.csr ca-key.pem cfssl.sh server.csr server-key.pem
再执行:
[root@master etcd-cert]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
##出现警告可以忽略
以上证书准备完成
接下来安装etcd
[root@master ~]# mkdir /root/soft //存放安装包
etcd-v3.3.10-linux-amd64.tar.gz
解压文件
[root@master soft]# tar -xf etcd-v3.3.10-linux-amd64.tar.gz
创建etcd安装目录
[root@master ~]# mkdir -p /opt/etcd/{cfg,bin,ssl}
讲解压出来的两个文件移动到安装etcd的bin目录中
[root@master ~]# cd /root/soft/etcd-v3.3.10-linux-amd64
[root@master etcd-v3.3.10-linux-amd64]# mv etcd etcdctl /opt/etcd/bin/
编写创建etcd脚本
#!/bin/bash
# example: ./etcd.sh etcd01 192.168.241.130 etcd02=https://192.168.241.131:2380,etcd03=https://192.168.241.132:2380
ETCD_NAME=$1
ETCD_IP=$2
ETCD_CLUSTER=$3
WORK_DIR=/opt/etcd
cat <<EOF >$WORK_DIR/cfg/etcd
#[Member]
ETCD_NAME="${ETCD_NAME}"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_LISTEN_CLIENT_URLS="https://${ETCD_IP}:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://${ETCD_IP}:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://${ETCD_IP}:2379"
ETCD_INITIAL_CLUSTER="etcd01=https://${ETCD_IP}:2380,${ETCD_CLUSTER}"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
cat <<EOF >/usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=${WORK_DIR}/cfg/etcd
ExecStart=${WORK_DIR}/bin/etcd \
--name=\${ETCD_NAME} \
--data-dir=\${ETCD_DATA_DIR} \
--listen-peer-urls=\${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=\${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=\${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=\${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=\${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=\${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=${WORK_DIR}/ssl/server.pem \
--key-file=${WORK_DIR}/ssl/server-key.pem \
--peer-cert-file=${WORK_DIR}/ssl/server.pem \
--peer-key-file=${WORK_DIR}/ssl/server-key.pem \
--trusted-ca-file=${WORK_DIR}/ssl/ca.pem \
--peer-trusted-ca-file=${WORK_DIR}/ssl/ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
增加脚本权限
[root@master ~]# chmod +x /root/k8s/etcd.sh
执行脚本
[root@master k8s]# ./etcd.sh etcd01 192.168.241.130 etcd02=https://192.168.241.131:2380,etcd03=https://192.168.241.132:2380
##会出现报错
可以查看/opt/etcd/cfg/etcd和/use/lib/systemd/system/etcd.service文件
讲证书拷贝到/opt/etcd/ssl下
[root@master k8s]# cp /root/k8s/etcd-cert/{ca,server-key,server}.pem /opt/etcd/ssl/
启动etcd
[root@master k8s]# systemctl start etcd
会出现错误 但是主节点是启动状态
tail -f /var/log/messages 查看日志
分别向node1和node2拷贝文件
scp -r /opt/etcd/ 192.168.241.131:/opt/
scp -r /opt/etcd/ 192.168.241.132:/opt/
scp /usr/lib/systemd/system/etcd.service 192.168.241.131:/usr/lib/systemd/system
scp /usr/lib/systemd/system/etcd.service 192.168.241.132:/usr/lib/systemd/system
此时还不能够启动etcd集群
要修改node1和node2配置文件中的ip和etcd名字
[root@master cfg]# vim /opt/etcd/cfg/etcd
#[Member]
ETCD_NAME="etcd01" //要修改
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.241.130:2380" //ip修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.241.130:2379" ////ip修改
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.241.130:2380" //ip修改
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.241.130:2379" ////ip修改
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.241.130:2380,etcd02=https://192.168.241.131:2380,etcd03=https://192.168.241.
132:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
三台机器分别执行
systemctl daemon-reload
systemctl start etcd
ps -ef |grep etcd //查看进程
查看集群状态
[root@master k8s]# /opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.241.130:2379,https://192.168.241.131:2379,https://192.168.241.132:2379" cluster-health
member 951e78d92214d87b is healthy: got healthy result from https://192.168.241.132:2379
member a8d3d8b4942ac39e is healthy: got healthy result from https://192.168.241.130:2379
member c1e11c263eb397b7 is healthy: got healthy result from https://192.168.241.131:2379
cluster is healthy
说明etcd集群创建成功
安装docker(略)
kubernetes网络模型(CNI)
Container Network Interface(CNI):容器网络接口
kubernetes网络模型设计要求
一个pod一个ip
每个pod独立ip,pod内所有容器共享网络(同一个ip)
所有容器都可以与所有其他容器通信
所有节点都可以与所有容器通信
部署flannel网络
步骤
1,写入分配的子网段到etcd,供flannel使用
/opt/etcd/bin/etcdctl --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem --endpoints="https://192.168.241.130:2379,https://192.168.241.131:2379,https://192.168.241.132:2379" set /coreos.com/network/config '{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}'
2,下载二进制包:https://github.com/coreos/flannel/releases
3,部署与配置flannel
4,systemd管理flannel
5,配置docker使用flannel生成的子网
6,启动flannel
开始
编写flannel脚本
[root@slave1 ~]# vim flannel.sh
#!/bin/bash
ETCD_ENDPOINTS=${1:-"http://127.0.0.1:2379"}
#生成flannel文件
cat <<EOF >/opt/kubernetes/cfg/flanneld
FLANNEL_OPTIONS="--etcd-endpoints=${ETCD_ENDPOINTS} \
-etcd-cafile=/opt/etcd/ssl/ca.pem \
-etcd-certfile=/opt/etcd/ssl/server.pem \
-etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
#生成flannel service配置文件
cat <<EOF >/usr/lib/systemd/system/flanneld.service
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
EnvironmentFile=/opt/kubernetes/cfg/flanneld
ExecStart=/opt/kubernetes/bin/flanneld --ip-masq \$FLANNEL_OPTIONS
ExecStartPost=/opt/kubernetes/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
#修改docker service文件
cat <<EOF >/usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/run/flannel/subnet.env
ExecStart=/usr/bin/dockerd \$DOCKER_NETWORK_OPTIONS
ExecReload=/bin/kill -s HUP \$MAINPID
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TimeoutStartSec=0
Delegate=yes
KillMode=process
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable flanneld
systemctl restart flanneld
systemctl restart docker
可以在master上操作,我在node上操作的
node1
创建flannel工作目录
[root@slave1 ~]# mkdir -p /opt/kubernetes/{bin,cfg,ssl}
解压安装包
[root@slave1 ~]# tar -xf flannel-v0.10.0-linux-amd64.tar.gz
移动解压出的配置文件到安装目录
[root@slave1 ~]# mv flanneld mk-docker-opts.sh /opt/kubernetes/bin/
执行脚本
[root@slave1 ~]# ./flannel.sh
重启docker
[root@slave1 ~]# systemctl restart docker
验证docker ip和flannel ip是否在一个网段
node2节点直接拷贝文件就行
[root@slave1 ~]# scp /usr/lib/systemd/system/{flanneld,docker}.service node2:/usr/lib/systemd/system
node2:/usr/lib/systemd/system
[root@slave1 ~]# scp -r /opt/kubernetes node2:/opt/
启动flannel
[root@slave2 ~]# systemctl start flanneld
重启docker
[root@slave2 ~]# systemctl daemon-reload
[root@slave1 ~]# systemctl restart docker
验证docker ip和flannel ip是否在一个网段
在node1和node2分别创建一个容器,测试网络连通信(容器与容器之间,宿主机与容器之间)
以上flannel环境部署完成
开始部署master组件
1,kube-apiserver //必须先部署
2,kube-controller-manager
3,kube-scheduler
下载地址目前github下载不了
部署kube-apiserver
编写apiserver脚本
[root@master k8s]# vim apiserver.sh
#!/bin/bash
MASTER_ADDRESS=$1 //apiserver地址
ETCD_SERVERS=$2 //etcd地址
cat <<EOF >/opt/kubernetes/cfg/kube-apiserver
KUBE_APISERVER_OPTS="--logtostderr=true \\
--v=4 \\
--etcd-servers=${ETCD_SERVERS} \\
--bind-address=${MASTER_ADDRESS} \\
--secure-port=6443 \\
--advertise-address=${MASTER_ADDRESS} \\
--allow-privileged=true \\
--service-cluster-ip-range=10.0.0.0/24 \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \\
--authorization-mode=RBAC,Node \\
--kubelet-https=true \\
--enable-bootstrap-token-auth \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--service-node-port-range=30000-50000 \\
--tls-cert-file=/opt/kubernetes/ssl/server.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \\
--client-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--etcd-cafile=/opt/etcd/ssl/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/server.pem \\
--etcd-keyfile=/opt/etcd/ssl/server-key.pem"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
ExecStart=/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl restart kube-apiserver
解压安装包
[root@master ~]# mkdir -p /opt/kubernetes/{bin,ssl,cfg} //创建kubernetes安装目录
[root@master soft]# tar -xf kubernetes-server-linux-amd64.tar.gz
进入/root/soft/kubernetes/server/bin这个目录下拷贝以下内容
[root@master bin]# cp kube-apiserver kube-controller-manager kube-scheduler /opt/kubernetes/bin/
执行apiserver脚本
[root@master k8s]# ./apiserver.sh 192.168.241.130 https://192.168.241.130:2379,https://192.168.241.131:2379,https://192.168.241.132:2379
指定日志路劲
[root@master cfg]# vim /opt/kubernetes/cfg/kube-apiserver
将KUBE_APISERVER_OPTS="--logtostderr=true
改为
KUBE_APISERVER_OPTS="--logtostderr=false
再添加
--log-dir=日志路劲
重新执行脚本即可
生成k8s证书
编写脚本
vim /root/k8s/k8s-cert/k8s-cert.sh
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "Beijing",
"ST": "Beijing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
#-----------------------
cat > server-csr.json <<EOF
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1",
"127.0.0.1",
"192.168.241.130",
"192.168.241.131",
"192.168.241.132",
"192.168.241.133",
"192.168.241.134",
"192.168.241.135",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
#-----------------------
cat > admin-csr.json <<EOF
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
#-----------------------
cat > kube-proxy-csr.json <<EOF
{
"CN": "system:kube-proxy",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-pro
xy
执行脚本生成证书和文件
[root@master k8s-cert]# bahs k8s-cert.sh
将生成的证书拷贝到k8s工作目录下
[root@master k8s-cert]# cp ca.pem ca-key.pem server.pem server-key.pem /opt/kubernetes/ssl/
接下来生成token文件
BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t -x | tr -d ' ') //官方生成token命令
我使用已经生成的
[root@master k8s-cert]# BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
[root@master k8s-cert]# cat > token.csv <<EOF
>${BOOTSTRAP_TOKEN},,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
>EOF
将生成的token.sh文件放到k8s工作目录下
[root@master k8s-cert]# mv token.sh /opt/kubernetes/cfg
启动kube-apiserver
[root@master ~]# systemctl start kube-apiserver
注意:如果apiserver启动失败 可以进行如下操作排查
[root@master ~]# source /opt/kubernetes/cfg/kube-apiserver
[root@master ~]# /opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
部署kube-controller-manage
编写脚本
[root@master k8s]# vim controller-manager.sh
#!/bin/bash
MASTER_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-controller-manager
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect=true \\
--address=127.0.0.1 \\
--service-cluster-ip-range=10.0.0.0/24 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--root-ca-file=/opt/kubernetes/ssl/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \\
--experimental-cluster-signing-duration=87600h0m0s"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-controller-manager.service
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
ExecStart=/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl restart kube-controller-manager
执行脚本 只需要指定本地ip就行
[root@master k8s]# ./controller-manager.sh 127.0.0.1
部署kube-scheduler
编写脚本
[root@master k8s]# vim scheduler.sh
#!/bin/bash
MASTER_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-scheduler
KUBE_SCHEDULER_OPTS="--logtostderr=true \\
--v=4 \\
--master=${MASTER_ADDRESS}:8080 \\
--leader-elect"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-scheduler.service
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-scheduler
systemctl restart kube-scheduler
执行脚本 只需要指定本地ip就行
[root@master k8s]# ./scheduler.sh 127.0.0.1
拷贝文件到/usr/bin/下
[root@master ~]# cp /root/soft/kubernetes/server/bin/kubectl /usr/bin/ //可以使用kubectl工具管理了
查看集群状态
[root@master ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
scheduler Healthy ok
controller-manager Healthy ok
etcd-0 Healthy {"health":"true"}
etcd-2 Healthy {"health":"true"}
etcd-1 Healthy {"health":"true"}
[root@master ~]# kubectl api-resources //可以查看所有缩写字母的全拼
将kubelet-bootstrap用户绑定到系统集群角色
[root@master ~]# kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
主要是为kubelet颁发证书用的最小权限
开始部署node组件
创建kubeconfig文件
编写kubeconfig脚本
[root@master k8s-cert]# vim kubeconfig.sh
APISERVER=$1
SSL_DIR=$2
BOOTSTRAP_TOKEN=0fb61c46f8991b718eb38d27b605b008
# 创建kubelet bootstrapping kubeconfig
export KUBE_APISERVER="https://$APISERVER:6443"
# 设置集群参数
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=bootstrap.kubeconfig
# 设置客户端认证参数
kubectl config set-credentials kubelet-bootstrap \
--token=${BOOTSTRAP_TOKEN} \
--kubeconfig=bootstrap.kubeconfig
# 设置上下文参数
kubectl config set-context default \
--cluster=kubernetes \
--user=kubelet-bootstrap \
--kubeconfig=bootstrap.kubeconfig
# 设置默认上下文
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
#----------------------
# 创建kube-proxy kubeconfig文件
kubectl config set-cluster kubernetes \
--certificate-authority=$SSL_DIR/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=$SSL_DIR/kube-proxy.pem \
--client-key=$SSL_DIR/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
执行脚本
[root@master k8s-cert]# bash kubeconfig.sh 192.168.241.130 /root/k8s/k8s-cert/
将生成的文件拷贝到node节点上
[root@master k8s-cert]# scp bootstrap.kubeconfig kube-proxy.kubeconfig node1:/opt/kubernetes/cfg/
[root@master k8s-cert]# scp bootstrap.kubeconfig kube-proxy.kubeconfig node2:/opt/kubernetes/cfg/
部署kubelet ,kube-proxy组件 在node上执行
编写kubelet脚本
[root@slave1 ~]# vim kubelet.sh
#!/bin/bash
NODE_ADDRESS=$1
DNS_SERVER_IP=${2:-"10.0.0.2"}
cat <<EOF >/opt/kubernetes/cfg/kubelet
KUBELET_OPTS="--logtostderr=true \\
--v=4 \\
--address=${NODE_ADDRESS} \\
--hostname-override=${NODE_ADDRESS} \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--experimental-bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.config \\
--cert-dir=/opt/kubernetes/ssl \\
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
EOF
cat <<EOF >/opt/kubernetes/cfg/kubelet.config
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${NODE_ADDRESS}
port: 10250
cgroupDriver: cgroupfs
clusterDNS:
- ${DNS_SERVER_IP}
clusterDomain: cluster.local.
failSwapOn: false
EOF
cat <<EOF >/usr/lib/systemd/system/kubelet.service
[Unit]
Description=Kubernetes Kubelet
After=docker.service
Requires=docker.service
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=on-failure
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
执行脚本
[root@slave1 ~]# bash kubelet.sh 192.168.241.131
把master上解压出来的kubernetes中的文件拷贝到node节点
[root@master bin]# scp /root/soft/kubernetes/server/bin/kubelet kube-proxy node1:/opt/kubernetes/bin/
启动kubelet
[root@slave1 ~]# systemctl start kubelet
如报以下错误
error: failed to run Kubelet: cannot create certificate signing request: certificatesigningrequests.certificates.k8s.io is forbidden: User "kubelet-bootstrap" cannot create certificatesigningrequests.certificates.k8s.io at the cluster scope
原因是:kubelet-bootstrap并没有权限创建证书。所以要创建这个用户的权限并绑定到这个角色上。
解决方法是在master上执行kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
————————————————
版权声明:本文为CSDN博主「lyzkks」的原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/sinat_35930259/article/details/79946298
基本上都是证书问题 要细心
查看kubelet的请求(master执行)
[root@master ~]# kubectl get csr
执行以下命令使node节点加入集群中
[root@master ~]# kubectl certificate approve node-csr-c9xfnyCf7Vz1owhwI4yR5knoAxSKA8tNq-gB4C6KAtQ
查看集群
[root@master ~]# kubectl get no
NAME STATUS ROLES AGE VERSION
192.168.241.131 Ready <none> 170m v1.12.1
部署kube-proxy
编写kube-proxy脚本
[root@slave1 ~]# vim proxy.sh
#!/bin/bash
NODE_ADDRESS=$1
cat <<EOF >/opt/kubernetes/cfg/kube-proxy
KUBE_PROXY_OPTS="--logtostderr=true \\
--v=4 \\
--hostname-override=${NODE_ADDRESS} \\
--cluster-cidr=10.0.0.0/24 \\
--proxy-mode=ipvs \\
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
EOF
cat <<EOF >/usr/lib/systemd/system/kube-proxy.service
[Unit]
Description=Kubernetes Proxy
After=network.target
[Service]
EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
ExecStart=/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kube-proxy
systemctl restart kube-proxy
执行脚本
[root@slave1 ~]# bash proxy.sh 192.168.241.131
查看状态
[root@slave1 ~]# ps -ef |grep proxy
root 17178 1 0 14:42 ? 00:01:00 /opt/kubernetes/bin/kube-proxy --logtostderr=true --v=4 --hostname-override=192.168.241.131 --cluster-cidr=10.0.0.0/24 --proxy-mode=ipvs --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
root 44837 13942 0 17:26 pts/1 00:00:00 grep --color=auto proxy
node1创建完成,接下来部署node2.只需要拷贝文件改ip就行
[root@slave1 ~]# scp -r /opt/kubernetes node2:/opt/
[root@slave1 ~]# scp /usr/lib/systemd/system/{kubelet,kube-proxy}.service node2:/usr/lib/systemd/system/
修改配置文件(node2执行)
[root@slave2 ssl]# rm -rf /opt/kubernetes/ssl
修改文件(把ip改成node2ip)
[root@slave2 cfg]# /opt/kubernetes/cfg/kubelet
[root@slave2 cfg]# /opt/kubernetes/cfg/kubelet.config
[root@slave2 cfg]# vim /opt/kubernetes/cfg/kube-proxy
启动(node2)
[root@slave2 cfg]# systemctl start kubelet
到master节点执行kubectl get csr查看请求
执行命令加入集群
[root@master ~]# kubectl certificate approve node-csr-aC8xr_qDkBseE1rmAcT1YS40jxsnhpALkZRCQ_Yj_UY
在master查看集群状态
[root@master ~]# kubectl get no
NAME STATUS ROLES AGE VERSION
192.168.241.131 Ready <none> 3h5m v1.12.1
192.168.241.132 Ready <none> 166m v1.12.1
以上二进制部署k8s一主双从搭建完成
问题排错
问题1
#####使用kubectl不能查看日志显示以下错误时
报错信息
[root@master yaml]# kubectl -n kube-system logs -f kubernetes-dashboard-fdccbc96-r5srr
error: You must be logged in to the server (the server has asked for the client to provide credentials ( pods/log kubernetes-dashboard-fdccbc96-r5srr))
解决方法
到node节点修改kubelet.config文件内容
------------------在文件末尾添加,认证确认
authentication:
anonymous:
enabled: true
----------------
# 然后重启kubelet
systemctl restart kubelet
如果还没有生效,就在master节点上重启kube-apiserver
问题2
如题,部分容器可以进入 但是部分容器会有如下错误
kubectl exec traefik-ingress-controller-6gh2g -i -t -n kube-system -- /bin/bash
OCI runtime exec failed: exec failed: container_linux.go:348: starting container process caused "exec: \"/bin/bash\": stat /bin/bash: no such file or directory": unknown
command terminated with exit code 126
解决方法
你有没有想过traefik-ingress-controller-6gh2g这样一个问题。1.该容器使用的不支持/bin/bash,只有/bin/sh ,或者压根都没有提供这种shell接口!