参考连接:https://blog.csdn.net/xznswd/article/details/127266051
准备工作准备两台或者两台以上的的服务器,手里只有两台服务器,我这里用的两台服务器a,b
关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
cat /etc/selinux/config
关闭swap分区
sed -ri 's/.*swap.*/#&/' /etc/fstab
reboot
设置主机名(a服务器)
hostnamectl set-hostname master
bash
(b服务器)
hostnamectl set-hostname node1
bash
每个节点添加hosts(a,b都需要配置)
vi /etc/hosts
a服务器的ip master
b服务器的ip node1
验证互ping
ping node1
ping master
将桥接的IPv4流量传递到iptables的链(a,b 都要配置)
[root@master ~]# cat > /etc/sysctl.d/k8s.conf << EOF
>
> net.bridge.bridge-nf-call-ip6tables = 1
>
> net.bridge.bridge-nf-call-iptables = 1
>
> EOF
[root@master ~]# sysctl --system 刷新生效
* Applying /usr/lib/sysctl.d/00-system.conf ...
* Applying /usr/lib/sysctl.d/10-default-yama-scope.conf ...
kernel.yama.ptrace_scope = 0
* Applying /usr/lib/sysctl.d/50-default.conf ...
kernel.sysrq = 16
kernel.core_uses_pid = 1
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.conf.all.accept_source_route = 0
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.conf.all.promote_secondaries = 1
fs.protected_hardlinks = 1
fs.protected_symlinks = 1
* Applying /etc/sysctl.d/99-sysctl.conf ...
* Applying /etc/sysctl.d/k8s.conf ...
* Applying /etc/sysctl.conf ...
每个节点添加时间同步(a,b 都要配置)
[root@node2 ~]# yum install ntpdate -y 安装时间同步
[root@master ~]# ntpdate time.windows.com 开启时间同步
3 May 23:19:47 ntpdate[2980]: adjust time server 20.189.79.72 offset -0.005315 sec
[root@node1 ~]# ntpdate time.windows.com
3 May 23:19:51 ntpdate[2296]: adjust time server 20.189.79.72 offset -0.004672 sec
在每个节点安装ipset和ipvsadm:(ipset是iptables的扩展,允许你创建匹配整个地址sets(地址集合)的规则。而不像普通的iptables链是线性的存储和过滤,ipvsadm命令功能:用于 设置,维护,检查Linux内核中的虚拟服务表)
[root@node2 ~]# yum -y install ipset ipvsadm 安装ipset和ipsadm
[root@master ~]# cat > /etc/sysconfig/modules/ipvs.modules <<EOF 配置文件
> #!/bin/bash
> modprobe -- ip_vs
> modprobe -- ip_vs_rr
> modprobe -- ip_vs_wrr
> modprobe -- ip_vs_sh
> modprobe -- nf_conntrack_ipv4
> EOF
## 授权、运行、检查是否加载:
[root@master ~]# chmod 755 /etc/sysconfig/modules/ipvs.modules &&bash/etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
三台节点安装docker K8s
k8s docker版本对应关系表
参考连接:https://blog.51cto.com/u_16213315/9265584
## 获取镜像
[root@master ~]# wget http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo -O /etc/yum.repos.d/docker-ce.repo
--2022-05-03 23:22:49-- http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
正在解析主机 mirrors.aliyun.com (mirrors.aliyun.com)... 119.96.65.198, 119.96.65.202, 119.96.64.238, ...
正在连接 mirrors.aliyun.com (mirrors.aliyun.com)|119.96.65.198|:80... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:2081 (2.0K) [application/octet-stream]
正在保存至: “/etc/yum.repos.d/docker-ce.repo”
100%[==================================================================================================================================================>] 2,081 --.-K/s 用时 0s
2022-05-03 23:22:50 (275 MB/s) - 已保存 “/etc/yum.repos.d/docker-ce.repo” [2081/2081])
##刷新yum源
[root@master ~]# yum clean all
已加载插件:fastestmirror
正在清理软件源: base docker-ce-stable extras updates
Cleaning up everything
Maybe you want: rm -rf /var/cache/yum, to also free up space taken by orphaned data from disabled or removed repos
Cleaning up list of fastest mirrors
[root@master ~]# yum makecache
已加载插件:fastestmirror
Determining fastest mirrors
* base: mirrors.aliyun.com
* extras: mirrors.aliyun.com
updates | 2.9 kB 00:00:00
(1/14): base/7/x86_64/group_gz | 153 kB 00:00:00
(2/14): base/7/x86_64/filelists_db | 7.2 MB 00:00:01
(3/14): docker-ce-stable/7/x86_64/updateinfo | 55 B 00:00:00
(4/14): base/7/x86_64/other_db | 2.6 MB 00:00:00
(5/14): docker-ce-stable/7/x86_64/filelists_db | 31 kB 00:00:00
(6/14): docker-ce-stable/7/x86_64/primary_db | 75 kB 00:00:00
(7/14): docker-ce-stable/7/x86_64/other_db | 123 kB 00:00:00
(8/14): extras/7/x86_64/primary_db | 246 kB 00:00:00
(9/14): extras/7/x86_64/other_db | 147 kB 00:00:00
(10/14): extras/7/x86_64/filelists_db | 277 kB 00:00:00
(11/14): base/7/x86_64/primary_db | 6.1 MB 00:00:02
(12/14): updates/7/x86_64/other_db | 1.0 MB 00:00:00
(13/14): updates/7/x86_64/filelists_db | 8.2 MB 00:00:04
(14/14): updates/7/x86_64/primary_db | 15 MB 00:00:06
元数据缓存已建立
[root@master ~]yum -y update 更新软件
安装docker(a,b服务器)docker-ce和docker-ce-cli需要一起指定,版本是对应的,单独不会下载指定的版本,会自动下载最新的版本(有点坑)
yum list docker-ce --showduplicates | sort -r
yum install docker-ce-20.10.5-3.el7 docker-ce-cli-20.10.5-3.el7
设置开机自启动
systemctl enable docker && systemctl start docker
所有节点都要配置kubeadm,kubelet,kubectl镜像
kubelet:运行在集群所有节点上,负责启动POD和容器
kubeadm:用于初始化集群
kubectl:kubenetes命令行工具,通过kubectl可以部署和管理应用,查看各种资源,创建,删除和更新组件
创建yum源的文件
[root@node2 ~]# cat > kubernetes.repo << EOF
> [kubernetes]
> name=Kubernetes
> baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
> enabled=1
> gpgcheck=1
> repo_gpgcheck=1
> gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
> EOF
[root@node2 ~]# mv kubernetes.repo /etc/yum.repos.d/ 将文件移到yum的目录
[root@node2 ~]# m
安装kubeadm,kubelet,kubectl,上面docker安装的是20.10.x,k8s建议装1.21.x
[root@node1 etc]# yum install -y kubelet-1.21.x kubeadm-1.21.x kubectl-1.21.x
[root@node1 etc]# systemctl enable kubelet 设置开机自启
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service
初始化kubeadm(只在master机器上)
kubeadm init \
--apiserver-advertise-address=a服务器的ip \
--image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.23.0 \
--service-cidr=10.96.0.0/12 \
--pod-network-cidr=10.244.0.0/16 \
--ignore-preflight-errors=all
拷贝k8s认证文件
[root@master kubelet.service.d]# mkdir -p $HOME/.kube
[root@master kubelet.service.d]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master kubelet.service.d]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@master kubelet.service.d]#
[root@master ~]# scp /etc/kubernetes/admin.conf root@node1:/root/
The authenticity of host 'node1 (192.168.200.120)' can't be established.
ECDSA key fingerprint is SHA256:ESmmyN7sUeSWqsTVabY6UOjt322FOm+q9O7lohc25VU.
ECDSA key fingerprint is MD5:78:4c:85:76:6e:29:02:5b:5c:44:bf:c3:6f:66:11:e4.
查看工作节点
[root@master kubelet.service.d]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane,master 4m15s v1.23.0
拉取成功
[root@master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 3d5h v1.23.0
node1 Ready <none> 3d5h v1.23.0
安装pod网络
[root@master flannel]# wget https://docs.projectcalico.org/v3.20/manifests/calico.yaml --no-check-certificate
[root@master flannel]# kubectl apply -f calico.yaml
configmap/calico-config unchanged
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org configured
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org configured
##验证集群及组件
[root@master flannel]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready control-plane,master 11h v1.23.0
node1 Ready <none> 11h v1.23.0
node2 Ready <none> 11h v1.23.0
[root@master flannel]# kubectl get pods --namespace kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-7c845d499-n6kv4 1/1 Running 0 9m41s
calico-node-85rhs 1/1 Running 0 9m41s
calico-node-d4n4q 1/1 Running 0 9m41s
calico-node-zqjtn 1/1 Running 0 9m41s
coredns-6d8c4cb4d-l78gv 1/1 Running 0 12h
coredns-6d8c4cb4d-r6mvw 1/1 Running 0 12h
etcd-master 1/1 Running 0 12h
kube-apiserver-master 1/1 Running 0 12h
kube-controller-manager-master 1/1 Running 2 (11h ago) 12h
kube-proxy-9wbvj 1/1 Running 0 12h
kube-proxy-g72xh 1/1 Running 2 (11h ago) 11h
kube-proxy-w54v6 1/1 Running 0 11h
kube-scheduler-master 1/1 Running 1 (11h ago) 12h
[root@master flannel]# kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 12h
[root@master flannel]# kubectl get svc --namespace kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 12h
部署Dashboard
[root@master ~]# wget http://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
--2022-05-04 13:45:55-- http://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
正在解析主机 raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.110.133, 185.199.111.133, ...
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:80... 已连接。
已发出 HTTP 请求,正在等待回应... 301 Moved Permanently
位置:https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml [跟随至新的 URL]
--2022-05-04 13:45:55-- https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml
正在连接 raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... 已连接。
已发出 HTTP 请求,正在等待回应... 200 OK
长度:7543 (7.4K) [text/plain]
正在保存至: “recommended.yaml”
100%[==================================================================================================================================================>] 7,543 --.-K/s 用时 0s
2022-05-04 13:45:56 (28.6 MB/s) - 已保存 “recommended.yaml” [7543/7543])
默认Dashboard只能集群内部访问,修改Service为NodePort类型,暴露到外部:
[root@master ~]# vi recommended.yaml
spec:
ports:
- port: 443
targetPort: 8443
nodePor:30001 可以添加nodePort指定端口,然后访问地址,必须火狐浏览器用https打开:https://NodeIP:30001
selector:
k8s-app: kubernetes-dashboard
type: NodePort
---
[root@master ~]# kubectl apply -f recommended.yaml 配置文件一定要注意空格那些
namespace/kubernetes-dashboard unchanged
serviceaccount/kubernetes-dashboard unchanged
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
[root@master ~]# kubectl get pods,svc -n kube-system
NAME READY STATUS RESTARTS AGE
pod/calico-kube-controllers-7c845d499-n6kv4 1/1 Running 0 64m
pod/calico-node-85rhs 1/1 Running 0 64m
pod/calico-node-d4n4q 1/1 Running 0 64m
pod/calico-node-zqjtn 1/1 Running 0 64m
pod/coredns-6d8c4cb4d-l78gv 1/1 Running 0 12h
pod/coredns-6d8c4cb4d-r6mvw 1/1 Running 0 12h
pod/etcd-master 1/1 Running 0 12h
pod/kube-apiserver-master 1/1 Running 0 12h
pod/kube-controller-manager-master 1/1 Running 2 (12h ago) 12h
pod/kube-proxy-9wbvj 1/1 Running 0 12h
pod/kube-proxy-g72xh 1/1 Running 2 (12h ago) 12h
pod/kube-proxy-w54v6 1/1 Running 0 12h
pod/kube-scheduler-master 1/1 Running 1 (12h ago) 12h
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 12h
[root@master ~]# kubectl get pods -n kubernetes-dashboard 全部为运行状态
NAME READY STATUS RESTARTS AGE
dashboard-metrics-scraper-799d786dbf-djzvj 1/1 Running 0 4m37s
kubernetes-dashboard-6b6b86c4c5-p5s2r 1/1 Running 0 4m37s
浏览器登录,IP前一定要加https,直接加i会提示客户端向HTTPS服务器发送了一个HTTP请求。
[root@master ~]# kubectl create serviceaccount dashboard-admin -n kube-system 创建用户
serviceaccount/dashboard-admin created
[root@master ~]# kubectl create clusterrolebinding dashboard-admin --clusterrole=cluster-admin --serviceaccount=kube-system:dashboard-admin 用户授权
clusterrolebinding.rbac.authorization.k8s.io/dashboard-admin created
[root@master ~]# kubectl describe secrets -n kube-system $(kubectl -n kube-system get secret | awk '/dashboard-admin/{print $1}') 获取token
Name: dashboard-admin-token-qdpxb
Namespace: kube-system
Labels: <none>
Annotations: kubernetes.io/service-account.name: dashboard-admin
kubernetes.io/service-account.uid: db6fba8a-b8c4-466d-88f5-8cc081520de4
Type: kubernetes.io/service-account-token
Data
====
ca.crt: 1099 bytes
namespace: 11 bytes
token:
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
使用输出的token登录Dashboard。
springboot项目打成本地镜像
FROM openjdk:8-jre
# 将 jar 包添加到容器中 , tar 类型文件会自动解压(网络压缩资源不会被解压) , 可以访问网络资源 , 类似 wget
ADD citything-gateway-2.0.jar /work/docker-compose/gateway/citything-gateway-2.0.jar
# 功能和 ADD 相同 , 不会自动解压文件 , 也不能访问网络资源
# COPY ./config/ /usr/local/config
# 配置环境变量
# ENV JAVA_HOME=/usr/local/java/jdk-8
# ENV CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
# ENV PATH=$JAVA_HOME/bin:$PATH
# 切换工作目录 , 相当于 cd
WORKDIR /work/docker-compose/gateway
# 映射端口
EXPOSE 9999
# ENTRYPOINT 或 CMD 只会执行一次 , 会覆盖之前的 ENTRYPOINT 或 CMD 命令
ENTRYPOINT ["java" , "-jar" , "citything-gateway-2.0.jar"]
compose.yml同目录执行构建镜像命令
docker build -t 镜像名称:版本 .
打包保存镜像:docker save 镜像名:版本号 -o /路径/保存的包名.jar,scp到各个服务器
解压镜像:docker load -i <镜像文件路径>
配置k8s test.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: citything-gateway
namespace: citything
spec:
selector:
matchLabels:
app: citything-gateway
replicas: 1
template:
metadata:
labels:
app: citything-gateway
spec:
nodeSelector:
name: "test"
containers:
- name: citything-gateway
image: citything-gateway:2.0
env:
- name: TZ
value: Asia/Shanghai
imagePullPolicy: Never
ports:
- containerPort: 9999
---
apiVersion: v1
kind: Service
metadata:
name: citything-gateway
namespace: citything
spec:
selector:
app: citything-gateway
ports:
- protocol: TCP
port: 9999
targetPort: 9999
nodePort: 30006
type: NodePort
给节点添加label
kubectl label nodes node1 name=test
执行运行pod命令
kubectl apply -f test.yaml