每一次学习k8s的时候,都得手动搭建k8s集群,太麻烦,又太浪费时间。
所有就自己写了一个ansible的role,这样能节省不小的时间,有需要的也可以直接拿来使用
一、集群环境
操作系统 | 主机名 | IP地址 |
---|---|---|
CentOS7.6.1810 | master | 192.168.16.45 |
CentOS7.6.1810 | slave1 | 192.168.16.46 |
CentOS7.6.1810 | slave2 | 192.168.16.47 |
注:后期配置所有的repo源,registry仓库全部来自阿里云
二、配置主机
[root@master ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.16.35 master.example.com master
192.168.16.36 slave1.example.com slave1
192.168.16.37 slave2.example.com slave2
[root@master ~]# ssh-keygen
[root@master ~]# ssh-copy-id root@master
[root@master ~]# ssh-copy-id root@slave1
[root@master ~]# ssh-copy-id root@slave2
三、安装并配置ansible
[root@master ~]# yum -y install wget
[root@master ~]# wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
[root@master ~]# yum -y install ansible
[root@master ~]# vi /etc/ansible/ansible.cfg
inventory = /etc/ansible/hosts
remote_user = root
[privilege_escalation]
become=True
become_method=sudo
become_user=root
become_ask_pass=False
[root@master ansible]# vi hosts
[k8s-server]
master
[k8s-slave]
slave1
slave2
[root@master ansible]# ansible all -m ping
[WARNING]: Invalid characters were found in group names but not replaced, use
-vvvv to see details
master | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python"
},
"changed": false,
"ping": "pong"
}
slave2 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python"
},
"changed": false,
"ping": "pong"
}
slave1 | SUCCESS => {
"ansible_facts": {
"discovered_interpreter_python": "/usr/bin/python"
},
"changed": false,
"ping": "pong"
}
ansible软件包来自epel仓库,需要提前下载,最后配置好hosts文件,进行验证
四、配置kubernetes role
1、查看role目录结构
[root@master ansible]# tree roles/
roles/
├── k8s-init-cluster
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── 99-prophet.conf
│ │ ├── chrony.conf
│ │ ├── daemon.json
│ │ ├── ipvs.modules.sh
│ │ ├── join_k8s_cluster.sh
│ │ ├── selinux.config
│ │ └── sysctl_kubernetes.sh
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── README.md
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ └── main.yml
└── k8s-init-cluster.tar.gz
2、配置vars/main.yml
[root@master ansible]# vim roles/k8s-init-cluster/vars/main.yml
#Define THE ServerIP of K8S
k8s_cluster_serverip: 192.168.16.35
#Define THE VERSION of K8S
k8s_cluster_version: v1.23.6
#Define THE CNI plug-in of K8S. The optional types are Calico and Flannel. Default is flannel
k8s_cluster_cni: flannel
#Define serives implementation of K8S. The optional types are ipvs and iptables. Default is ipvs k8s_cluster_service_type: ipvs
k8s_cluster_service_type: ipvs
这里可以配置masterIP地址,k8s版本,cni插件以及service网络所使用类型
work节点IP地址不需要配置,来自inventory文件
另外cni插件还没有做
3、查看files
[root@master ansible]# ll roles/k8s-init-cluster/files/
total 28
-rw-r--r--. 1 root root 184 Apr 27 00:47 99-prophet.conf
-rw-r--r--. 1 root root 109 Apr 27 01:08 chrony.conf
-rw-r--r--. 1 root root 112 Apr 27 01:15 daemon.json
-rw-r--r--. 1 root root 124 Apr 27 00:47 ipvs.modules.sh
-rw-rw-r--. 1 root root 357 Apr 27 12:01 join_k8s_cluster.sh
-rw-r--r--. 1 root root 546 Apr 27 00:47 selinux.config
-rw-r--r--. 1 root root 487 Apr 27 09:29 sysctl_kubernetes.sh
[root@master ansible]# cat roles/k8s-init-cluster/files/99-prophet.conf
[Journal]
Storage=persistent
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
SystemMaxUse=10G
SystemMaxFileSize=200M
MaxRetentionSec=2week
ForwardToSyslog=no
[root@master ansible]# cat roles/k8s-init-cluster/files/chrony.conf
server time1.aliyun.com iburst
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
logdir /var/log/chrony
[root@master ansible]# cat roles/k8s-init-cluster/files/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
}
}
[root@master ansible]# cat roles/k8s-init-cluster/files/ipvs.modules.sh
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
[root@master ansible]# cat roles/k8s-init-cluster/files/join_k8s_cluster.sh
#!/bin/bash
kube_token=$(kubeadm token list | awk '{print $1}' | sed -n 2p)
kube_ca=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //')
echo kubeadm join $HOSTNAME:6443 --token $kube_token --discovery-token-ca-cert-hash sha256:$kube_ca > /opt/join_k8s_cluster.sh
[root@master ansible]# cat roles/k8s-init-cluster/files/selinux.config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
# SELINUXTYPE= can take one of three two values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@master ansible]# cat roles/k8s-init-cluster/files/sysctl_kubernetes.sh
#!/bin/bash
modprobe br_netfilter
cat > /etc/sysctl.d/kubernetes.conf << EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl -p /etc/sysctl.d/kubernetes.conf
4、查看tasks/main.yml
[root@master ansible]# cat roles/k8s-init-cluster/tasks/main.yml
#配置dns解析
- name: 配置dns解析
copy:
content: "nameserver 114.114.114.114\n"
dest: /etc/resolv.conf
#配置base阿里源
- name: 配置base阿里源
copy:
src: /etc/yum.repos.d/CentOS-Base.repo
dest: /etc/yum.repos.d/CentOS-Base.repo.backup
remote_src: true
- get_url:
dest: /etc/yum.repos.d/centos.repo
url: http://mirrors.aliyun.com/repo/Centos-7.repo
#安装epel源和docker,k8s源
- name: 安装epel源和docker,k8s源
get_url:
dest: /etc/yum.repos.d/epel.repo
url: http://mirrors.aliyun.com/repo/epel-7.repo
- yum:
name: "{{ item }}"
state: present
loop:
- yum-utils
- device-mapper-persistent-data
- lvm2
- shell: yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
- replace:
path: /etc/yum.repos.d/docker-ce.repo
regexp: 'download.docker.com'
replace: 'mirrors.aliyun.com/docker-ce'
- yum_repository:
file: kubernetes
name: kubernetes
description: kubernetes
baseurl: https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled: true
gpgcheck: true
gpgkey: https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
#安装kubelet,kubeadm,kubectl,docker和必要的工具和软件
- name: 安装kubelet,kubeadm,kubectl,docker和必要的工具和软件
yum:
name: "{{ item }}"
state: present
with_items:
- kubelet
- kubeadm
- kubectl
- docker-ce
- conntrack
- chrony
- ipvsadm
- ipset
- jq
- iptables
- curl
- sysstat
- libseccomp
- wget
- vim
- net-tools
- git
- chrony
- bash
- iptables
- iptables-services
- file:
path: "{{ item }}"
state: directory
loop:
- /etc/systemd/system/docker.service.d
- /etc/docker
- copy:
src: daemon.json
dest: /etc/docker/daemon.json
#启动kubelet,docker
- service:
name: "{{ item }}"
state: started
enabled: true
with_items:
- kubelet
- docker
#停止iptables,firewalld和selinux
- service:
name: "{{ item }}"
state: stopped
enabled: false
with_items:
- firewalld
- selinux:
state: disabled
#修改时区,并且同步时间服务器
- name: 修改时区,并且同步时间服务器
copy:
src: chrony.conf
dest: /etc/chrony.conf
- service:
name: chronyd
state: started
enabled: true
#关闭swap
- name: 关闭swap
shell: swapoff -a
- lineinfile:
path: /etc/fstab
state: absent
regexp: 'swap'
#加载ipvs内核,开启ipvs
- name: 加载ipvs内核,开启ipvs
script: ipvs.modules.sh
when: "'ipvs' in k8s_cluster_service_type"
- script: sysctl_kubernetes.sh
when: "'ipvs' in k8s_cluster_service_type"
- systemd:
name: iptables
state: stopped
enabled: false
when: "'ipvs' in k8s_cluster_service_type"
- systemd:
name: iptables
state: started
enabled: true
when: "'iptables' in k8s_cluster_service_type"
#设置rsyslogd和systemd journald
- name: 设置rsyslogd和systemd journald
file:
path: "{{ item }}"
state: directory
with_items:
- /var/log/journal
- /etc/systemd/journald.conf.d
- copy:
src: 99-prophet.conf
dest: /etc/systemd/journald.conf.d/99-prophet.conf
notify: restart journald
- service:
name: systemd-journald
state: restarted
#安装k8s,master
- name: 安装k8s,master
shell: kubeadm reset -f
- shell: kubeadm init --apiserver-advertise-address={{k8s_cluster_serverip}} --image-repository registry.aliyuncs.com/google_containers --kubernetes-version {{k8s_cluster_version}} --service-cidr=10.96.0.0/12 --pod-network-cidr=10.244.0.0/16
when: "'k8s-server' in group_names"
- file:
path: /{{ansible_user_id}}/.kube
state: directory
when: "'k8s-server' in group_names"
- copy:
src: /etc/kubernetes/admin.conf
dest: /{{ansible_user_id}}/.kube/config
owner: "{{ansible_user_id}}"
group: "{{ansible_user_id}}"
remote_src: true
when: "'k8s-server' in group_names"
#将node节点加入集群
- name: 将node节点加入集群
script: join_k8s_cluster.sh
when: "'k8s-server' in group_names"
- script: /opt/join_k8s_cluster.sh
when: "'k8s-slave' in group_names"
五、部署kubernetes集群
[root@master ansible]# vim k8s-init-cluster.yml
- hosts: all
roles:
- k8s-init-cluster
[root@master ansible]# ansible-playbook k8s-init-cluster.yml
master : ok=30 changed=10 unreachable=0 failed=0 skipped=2 rescued=0 ignored=0
slave1 : ok=27 changed=8 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0
slave2 : ok=27 changed=8 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0
Example Playbook 也已在README.md中指出
六、验证kubernetes集群
[root@master ansible]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master NotReady control-plane,master 80s v1.23.6
slave1 NotReady <none> 62s v1.23.6
slave2 NotReady <none> 62s v1.23.6
集群状态未NotReady,是因为cni插件还未加载
可在主节点执行:
kubectl apply -f https://raw.githubusercontent.com/flann
el-io/flannel/master/Documentation/kube-flannel.yml