K8S资源对象管理 、 服务与负载均衡 、 Ingress

kubernetes

资源控制器

daemonset 控制器

[root@master ~]# vim mynginx.yaml
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
  name: mynginx
spec:
  selector:
    matchLabels:
      myapp: nginx
  template:
    metadata:
      labels:
        myapp: nginx
    spec:
      containers:
      - name: nginxcluster
        image: 192.168.1.100:5000/myos:nginx
        stdin: false
        tty: false
        ports:
        - protocol: TCP
          containerPort: 80
      restartPolicy: Always

[root@master ~]# kubectl apply -f mynginx.yaml 
daemonset.apps/mynginx created
[root@master ~]# kubectl get pod -o wide
NAME            READY   STATUS    RESTARTS   AGE   IP            NODE
mynginx-77jtf   1/1     Running   0          6s    10.244.3.9    node-0001
mynginx-cwdzt   1/1     Running   0          6s    10.244.1.9    node-0003
mynginx-z2kl6   1/1     Running   0          6s    10.244.2.10   node-0002
[root@master ~]# 

污点与容忍

污点策略:NoSchedule、PreferNoSchedule、NoExecute

[root@master ~]# kubectl delete -f mynginx.yaml 
daemonset.apps "mynginx" deleted
[root@master ~]# kubectl describe nodes |grep -P "^Taints"
Taints:             node-role.kubernetes.io/master:NoSchedule
Taints:             <none>
Taints:             <none>
Taints:             <none>
[root@master ~]# kubectl taint node node-0001 k1=v1:NoSchedule
node/node-0001 tainted
[root@master ~]# kubectl apply -f mynginx.yaml 
daemonset.apps/mynginx created
[root@master ~]# kubectl get pods
NAME            READY   STATUS    RESTARTS   AGE
mynginx-f2rxh   1/1     Running   0          4s
mynginx-n7xsw   1/1     Running   0          4s
[root@master ~]# kubectl taint node node-0001 k1-
node/node-0001 untainted
[root@master ~]# kubectl get pods
NAME            READY   STATUS    RESTARTS   AGE
mynginx-f2rxh   1/1     Running   0          105s
mynginx-hp6f2   1/1     Running   0          2s
mynginx-n7xsw   1/1     Running   0          105s
[root@master ~]# 

驱逐容器

[root@master ~]# kubectl apply -f myapache.yaml 
deployment.apps/myapache created
[root@master ~]# kubectl scale deployment myapache --replicas=3
deployment.apps/myapache scaled
[root@master ~]# kubectl get pod -o wide
NAME                       READY   STATUS    RESTARTS   AGE     IP            NODE
myapache-7d689bf8f-xq7l6   1/1     Running   0          2m23s   10.244.3.11   node-0001
myapache-7d689bf8f-b4d5f   1/1     Running   0          9s      10.244.2.14   node-0002
myapache-7d689bf8f-mzcgw   1/1     Running   0          9s      10.244.1.13   node-0003
mynginx-hp6f2              1/1     Running   0          5m25s   10.244.3.10   node-0001
mynginx-f2rxh              1/1     Running   0          7m8s    10.244.2.11   node-0002
mynginx-4f7tl              1/1     Running   0          20s     10.244.1.12   node-0003
[root@master ~]# kubectl taint node node-0003 k1=v1:NoExecute
node/node-0003 tainted
[root@master ~]# kubectl get pod -o wide
NAME                       READY   STATUS    RESTARTS   AGE     IP            NODE
myapache-7d689bf8f-xq7l6   1/1     Running   0          2m23s   10.244.3.11   node-0001
myapache-7d689bf8f-b4d5f   1/1     Running   0          9s      10.244.2.14   node-0002
myapache-7d689bf8f-mzcgw   1/1     Running   0          9s      10.244.2.15   node-0002
mynginx-hp6f2              1/1     Running   0          5m25s   10.244.3.10   node-0001
mynginx-f2rxh              1/1     Running   0          7m8s    10.244.2.11   node-0002
[root@master ~]# kubectl taint node node-0003 k1-
node/node-0003 untainted
[root@master ~]# kubectl get pod -o wide
NAME                       READY   STATUS    RESTARTS   AGE     IP            NODE
myapache-7d689bf8f-xq7l6   1/1     Running   0          2m23s   10.244.3.11   node-0001
myapache-7d689bf8f-b4d5f   1/1     Running   0          9s      10.244.2.14   node-0002
myapache-7d689bf8f-mzcgw   1/1     Running   0          9s      10.244.2.15   node-0002
mynginx-hp6f2              1/1     Running   0          5m25s   10.244.3.10   node-0001
mynginx-f2rxh              1/1     Running   0          7m8s    10.244.2.11   node-0002
mynginx-9s9z4              1/1     Running   0          34s     10.244.1.14   node-0003
[root@master ~]# 

job/cronjob 控制器

job 资源文件

[root@master ~]# vim myjob.yaml
---
apiVersion: batch/v1
kind: Job
metadata:
  name: pi
spec:
  template:
    spec:
      containers:
      - name: pi
        image: 192.168.1.100:5000/myos:v1804
        command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
      restartPolicy: OnFailure
[root@master config]# kubectl apply -f myjob.yaml 
job.batch/pi created
[root@master config]# kubectl get job
NAME   COMPLETIONS   DURATION   AGE
pi     1/1           2s         7s
[root@master config]# kubectl get pod
NAME                     READY   STATUS      RESTARTS   AGE
pi-gvfwj                 0/1     Completed   0          15s
# 查看终端结果
[root@master config]# kubectl logs pi-gvfwj

cronjob 资源文件

[root@master ~]# vim mycronjob.yaml 
---
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: cronjob-pi
spec:
  schedule: "*/1 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: pi
            image: 192.168.1.100:5000/myos:v1804
            command: ["perl",  "-Mbignum=bpi", "-wle", "print bpi(2000)"]
          restartPolicy: OnFailure
[root@master ~]# kubectl apply -f mycronjob.yaml 
cronjob.batch/cronjob-pi created
[root@master ~]# kubectl get cronjobs.batch 
NAME         SCHEDULE           SUSPEND   ACTIVE        LAST SCHEDULE   AGE
cronjob-pi   */1 * * * *        False     0             <none>          10s
[root@master ~]# kubectl get pod
NAME                            READY     STATUS      RESTARTS          AGE
cronjob-pi-1595410620-vvztx     0/1       Completed   0                 62s

集群服务

服务图例

flowchart LR
subgraph K8S服务
  S1([service<br>clusterIP]);S2([service<br>headless]);S3([service<br>nodeport])
  S1 ---> P1[(apache<br>Pod)] & P2[(apache<br>Pod)] & P3[(apache<br>Pod)]
  U1((用户)) --> S1
  U1 -.-> S2 -.-> U1
  U1 -.-> P1 & P2 & P3
  S3 ==> P1 & P2 & P3
end
U2((用户)) ==> S3
classDef className fill:#f9f,stroke:#333,stroke-width:4px;
classDef Kubernetes fill:#ffffc0,color:#ff00ff
class U1,U2 className
class K8S服务 Kubernetes

ClusterIP服务

会变化的资源
[root@master ~]# kubectl apply -f myapache.yaml 
deployment.apps/myapache created
[root@master ~]# kubectl scale deployment myapache --replicas=2
deployment.apps/myapache scaled
[root@master ~]# kubectl get pod -o wide
NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE
myapache-7d689bf8f-c268l   1/1     Running   0          13s   10.244.2.16   node-0002
myapache-7d689bf8f-4z225   1/1     Running   0          5s    10.244.1.15   node-0003
[root@master ~]# kubectl delete pod myapache-7d689bf8f-4z225 
pod "myapache-7d689bf8f-4z225" deleted
[root@master ~]# kubectl get pod -o wide
NAME                       READY   STATUS    RESTARTS   AGE   IP            NODE
myapache-7d689bf8f-c268l   1/1     Running   0          38s   10.244.2.16   node-0002
myapache-7d689bf8f-mccqv   1/1     Running   0          13s   10.244.3.12   node-0001
[root@master ~]# 
创建 ClusterIP 服务
[root@master ~]# vim clusterip.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: myapache
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
  selector:
    myapp: httpd      # 标签必须与 deploy 资源文件中一致
  type: ClusterIP
[root@master config]# kubectl apply -f clusterip.yaml 
service/myapache created
[root@master config]# kubectl get service
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.254.0.1       <none>        443/TCP   22h
myapache     ClusterIP   10.254.235.248   <none>        80/TCP    4s
访问服务

服务只有在集群内部才可以访问,创建 Pod,在Pod 中访问服务

[root@master ~]# kubectl apply -f mypod.yaml 
pod/mypod created
[root@master ~]# kubectl exec -it mypod -- /bin/bash
[root@mypod /]# curl http://10.254.235.248/info.php
<pre>
Array
(
    [REMOTE_ADDR] => 10.244.1.16
    [REQUEST_METHOD] => GET
    [HTTP_USER_AGENT] => curl/7.29.0
    [REQUEST_URI] => /info.php
)
php_host:     myapache-7d689bf8f-mccqv
1229
[root@mypod /]# curl http://10.254.235.248/info.php
<pre>
Array
(
    [REMOTE_ADDR] => 10.244.1.16
    [REQUEST_METHOD] => GET
    [HTTP_USER_AGENT] => curl/7.29.0
    [REQUEST_URI] => /info.php
)
php_host:     myapache-7d689bf8f-c268l
1229
[root@mypod /]# 

扩容集群节点,服务自动扩展

# 在master上执行扩容节点
[root@master ~]# kubectl scale deployment myapache --replicas=3
# 服务本质是LVS规则
[root@master ~]# ipvsadm -L -n
TCP  10.254.235.248:80 rr
  -> 10.244.1.17:80               Masq    1      0          0         
  -> 10.244.2.16:80               Masq    1      0          0         
  -> 10.244.3.12:80               Masq    1      0          0        
-----------------------------------------------------------------------------------------
# 在pod里访问
[root@pod-example /]# curl http://10.254.78.148/info.php
... ...
php_host:   myapache-7d689bf8f-lpt89
... ...
php_host:   myapache-7d689bf8f-mccqv
... ...
php_host:   myapache-7d689bf8f-c268l

nodeport 服务

[root@master ~]# vim mynodeport.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: mynodeport
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
  selector:
    myapp: httpd
  type: NodePort     # 指定服务类型
[root@master ~]# kubectl apply -f mynodeport.yaml 
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE
mynodeport   NodePort    10.254.105.233   <none>        80:31410/TCP   4s
#---------------------------所有node节点31410端口均可访问-----------------------------------
# 在跳板机上访问服务
[root@ecs-proxy ~]# curl http://192.168.1.31:31410/info.php
[root@ecs-proxy ~]# curl http://192.168.1.32:31410/info.php
[root@ecs-proxy ~]# curl http://192.168.1.33:31410/info.php

headless 服务

[root@master ~]# vim myheadless.yaml 
---
kind: Service
apiVersion: v1
metadata:
  name: myheadless
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
  selector:
    myapp: httpd
  type: ClusterIP
  clusterIP: None      # 新添加
[root@master ~]# kubectl apply -f myheadless.yaml 
service/myheadless created
[root@master ~]# kubectl get service
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)   AGE
kubernetes   ClusterIP   10.254.0.1       <none>        443/TCP   22h
myapache     ClusterIP   10.254.235.248   <none>        80/TCP    7m52s
myheadless   ClusterIP   None             <none>        80/TCP    3s
#-----------------------------------进入pod查看解析结果------------------------------------
[root@master ~]# kubectl exec -it pod-example -- /bin/bash
[root@mypod /]# yum install -y bind-utils
[root@mypod /]# host myheadless.default.svc.cluster.local
myheadless.default.svc.cluster.local has address 10.244.3.12
myheadless.default.svc.cluster.local has address 10.244.1.17
myheadless.default.svc.cluster.local has address 10.244.2.16

ingress

安装控制器

拷贝云盘 kubernetes/v1.17.6/ingress 文件夹到 master 上,导入镜像到私有仓库

[root@master ingress]# docker load -i ingress-nginx.tar.gz
[root@master ingress]# docker tag quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0 192.168.1.100:5000/nginx-ingress-controller:0.30.0
[root@master ingress]# docker push 192.168.1.100:5000/nginx-ingress-controller:0.30.0
[root@master ingress]# curl http://192.168.1.100:5000/v2/nginx-ingress-controller/tags/list
{"name":"nginx-ingress-controller","tags":["0.30.0"]}
[root@master ~]# vim ingress/mandatory.yaml 
221:  image: 192.168.1.100:5000/nginx-ingress-controller:0.30.0
[root@master ~]# kubectl apply -f ingress/mandatory.yaml 
[root@master ~]# kubectl -n ingress-nginx get pod
NAME                                      READY   STATUS    RESTARTS   AGE
nginx-ingress-controller-fc6766d7-ptppp   1/1     Running   0          47s
[root@master ingress]#
发布服务
[root@master ingress]# vim ingress-example.yaml 
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: my-web
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  backend:
    serviceName: myapache
    servicePort: 80
[root@master ingress]# kubectl apply -f ingress-example.yaml
[root@master ingress]# kubectl get ingresses
NAME     HOSTS   ADDRESS        PORTS   AGE
my-app   *       192.168.1.33   80      3m2s
#----------------------- 在跳板机访问测试 -------------------------------------------------
[root@ecs-proxy ~]# curl http://192.168.1.33/info.php
<pre>
Array
(
    [REMOTE_ADDR] => 10.244.3.0
    [REQUEST_METHOD] => GET
    [HTTP_USER_AGENT] => curl/7.29.0
    [REQUEST_URI] => /info.php
)
php_host:   apache-example-65fb568b4c-p6mrl
1229
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 203,271评论 5 476
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 85,275评论 2 380
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 150,151评论 0 336
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,550评论 1 273
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,553评论 5 365
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,559评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 37,924评论 3 395
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,580评论 0 257
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,826评论 1 297
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,578评论 2 320
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,661评论 1 329
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,363评论 4 318
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 38,940评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,926评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,156评论 1 259
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 42,872评论 2 349
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,391评论 2 342

推荐阅读更多精彩内容