部署Dashboard 、 部署Prometheus 、 HPA集群

kubernetes

部署Dashboard

拷贝 云盘的kubernetes/v1.17.6/dashboard 目录到 master 上

上传镜像到私有仓库

# 上传 dashboard 镜像
[root@master dashboard]# docker load -i dashboard.tar.gz 
[root@master dashboard]# docker tag kubernetesui/dashboard:v2.0.0 192.168.1.100:5000/dashboard:v2.0.0
[root@master dashboard]# docker push 192.168.1.100:5000/dashboard:v2.0.0
# 上传 metrics-scraper 镜像
[root@master dashboard]# docker load -i metrics-scraper.tar.gz 
[root@master dashboard]# docker tag kubernetesui/metrics-scraper:v1.0.4 192.168.1.100:5000/metrics-scraper:v1.0.4
[root@master dashboard]# docker push 192.168.1.100:5000/metrics-scraper:v1.0.4

安装发布服务

[root@master dashboard]# vim recommended.yaml
          # 190 行修改为
          image: 192.168.1.100:5000/dashboard:v2.0.0
          # 274 行修改为
          image: 192.168.1.100:5000/metrics-scraper:v1.0.4
[root@master dashboard]# kubectl apply -f recommended.yaml
# ---------------------------------- 查询验证 --------------------------------------
[root@master dashboard]# kubectl -n kubernetes-dashboard get pod
NAME                                         READY   STATUS    RESTARTS   AGE
dashboard-metrics-scraper-57bf85fcc9-vsz74   1/1     Running   0          52s
kubernetes-dashboard-7b7f78bcf9-5k8vq        1/1     Running   0          52s
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)
dashboard-metrics-scraper   ClusterIP   10.254.76.85     <none>        8000/TCP
kubernetes-dashboard        ClusterIP   10.254.211.125   <none>        443/TCP
# ---------------------------------- 对外发布服务 -----------------------------------
[root@master dashboard]# vim service.yaml
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      nodePort: 30443                # 新添加
      targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort                     # 新添加

[root@master dashboard]# kubectl apply -f service.yaml 
service/kubernetes-dashboard configured
[root@master dashboard]# kubectl -n kubernetes-dashboard get service
NAME                        TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.254.66.25     <none>        8000/TCP        2m6s
kubernetes-dashboard        NodePort    10.254.165.155   <none>        443:30443/TCP   2m6s
[root@master dashboard]#

在华为云上为 node 节点绑定弹性公网IP [ <font color=#ff0000>https://弹性公网IP:30443/</font> ]

token认证登录

[root@master dashboard]# kubectl apply -f admin-token.yaml
[root@master ~]# kubectl -n kubernetes-dashboard get secrets 
NAME                               TYPE                                  DATA   AGE
admin-user-token-bxjlz             kubernetes.io/service-account-token   3      23s
[root@master ~]# kubectl -n kubernetes-dashboard describe secrets admin-user-token-bxjlz
Name:         admin-user-token-bxjlz
... ...
ca.crt:     1025 bytes
namespace:  20 bytes
token:      这里这个很长的字符串就是你要找的认证 token

使用获取的 token 登录,通过 web 页面访问即可

部署Prometheus

导入镜像

kubernetes/v1.17.6/prometheus/images/ 下所有镜像导入到私有仓库

拷贝所有镜像到 master 的 images 目录下

[root@master images]# for i in *.gz;do docker load -i ${i};done
[root@master images]# img="prom/node-exporter v1.0.0
quay.io/coreos/prometheus-config-reloader v0.35.1
quay.io/coreos/prometheus-operator v0.35.1
quay.io/coreos/kube-state-metrics v1.9.2
grafana/grafana 6.4.3
jimmidyson/configmap-reload v0.3.0
quay.io/prometheus/prometheus v2.11.0
quay.io/prometheus/alertmanager v0.18.0
quay.io/coreos/k8s-prometheus-adapter-amd64 v0.5.0
quay.io/coreos/kube-rbac-proxy v0.4.1"
[root@master images]# while read _f _v;do 
  docker tag ${_f}:${_v} 192.168.1.100:5000/${_f##*/}:${_v}
  docker push 192.168.1.100:5000/${_f##*/}:${_v}
  docker rmi ${_f}:${_v}
done <<<"${img}"
[root@master images]# curl http://192.168.1.100:5000/v2/_catalog
{"repositories":["alertmanager","configmap-reload","coredns","dashboard","etcd","flannel","grafana","k8s-prometheus-adapter-amd64","kube-apiserver","kube-controller-manager","kube-proxy","kube-rbac-proxy","kube-scheduler","kube-state-metrics","metrics-scraper","metrics-server","myos","nginx-ingress-controller","node-exporter","pause","prometheus","prometheus-config-reloader","prometheus-operator"]}

operator安装

拷贝prometheus/setup 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/configmap-reload/tags/list
{"name":"configmap-reload","tags":["v0.3.0"]}
[root@master prometheus]# curl http://192.168.1.100:5000/v2/prometheus-config-reloader/tags/list
{"name":"prometheus-config-reloader","tags":["v0.35.1"]}
[root@master prometheus]# curl http://192.168.1.100:5000/v2/prometheus-operator/tags/list
{"name":"prometheus-operator","tags":["v0.35.1"]}
[root@master prometheus]# vim setup/prometheus-operator-deployment.yaml
27:        - --config-reloader-image=192.168.1.100:5000/configmap-reload:v0.3.0
28:        - --prometheus-config-reloader=192.168.1.100:5000/prometheus-config-reloader:v0.35.1
29:        image: 192.168.1.100:5000/prometheus-operator:v0.35.1
# 验证安装
[root@master prometheus]# kubectl apply -f setup/
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
prometheus-operator-75b4b59b74-72qhg   1/1     Running   0          47s

Prometheus server安装

拷贝 prometheus/prom-server 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/prometheus/tags/list
{"name":"prometheus","tags":["v2.11.0"]}
[root@master prometheus]# vim prom-server/prometheus-prometheus.yaml
14:   baseImage: 192.168.1.100:5000/prometheus
34:   version: v2.11.0
[root@master prometheus]# kubectl apply -f prom-server/
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
prometheus-k8s-0                       3/3     Running   1          45s
prometheus-k8s-1                       3/3     Running   1          45s

prom-adapter安装

拷贝 prometheus/prom-adapter 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/k8s-prometheus-adapter-amd64/tags/list
{"name":"k8s-prometheus-adapter-amd64","tags":["v0.5.0"]}
[root@master prometheus]# vim prom-adapter/prometheus-adapter-deployment.yaml
28:      image: 192.168.1.100:5000/k8s-prometheus-adapter-amd64:v0.5.0
[root@master prometheus]# kubectl apply -f prom-adapter
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
prometheus-adapter-856854f9f6-knqtq    1/1     Running   0          6s

metrics-state安装

拷贝 prometheus/metrics-state 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/kube-state-metrics/tags/list
{"name":"kube-state-metrics","tags":["v1.9.2"]}
[root@master prometheus]# curl http://192.168.1.100:5000/v2/kube-rbac-proxy/tags/list
{"name":"kube-rbac-proxy","tags":["v0.4.1"]}
[root@master prometheus]# vim metrics-state/kube-state-metrics-deployment.yaml
24:         image: 192.168.1.100:5000/kube-rbac-proxy:v0.4.1
41:         image: 192.168.1.100:5000/kube-rbac-proxy:v0.4.1
58:         image: 192.168.1.100:5000/kube-state-metrics:v1.9.2
[root@master prometheus]# kubectl apply -f metrics-state/
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
kube-state-metrics-5894f64799-krvn6    3/3     Running   0          4s

node-exporter安装

拷贝 prometheus/node-exporter 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/node-exporter/tags/list
{"name":"node-exporter","tags":["v1.0.0"]}
[root@master prometheus]# curl http://192.168.1.100:5000/v2/kube-rbac-proxy/tags/list
{"name":"kube-rbac-proxy","tags":["v0.4.1"]}
[root@master prometheus]# vim node-exporter/node-exporter-daemonset.yaml
27:         image: 192.168.1.100:5000/node-exporter:v1.0.0
57:         image: 192.168.1.100:5000/kube-rbac-proxy:v0.4.1
[root@master prometheus]# kubectl apply -f node-exporter/
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
node-exporter-7h4l9                    2/2     Running   0          7s
node-exporter-7vxmx                    2/2     Running   0          7s
node-exporter-mr6lw                    2/2     Running   0          7s
node-exporter-zg2j8                    2/2     Running   0          7s

alertmanager安装

拷贝 prometheus/alertmanager 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/alertmanager/tags/list
{"name":"alertmanager","tags":["v0.18.0"]}
[root@master prometheus]# vim alertmanager/alertmanager-alertmanager.yaml
09:  baseImage: 192.168.1.100:5000/alertmanager
18:  version: v0.18.0
[root@master prometheus]# kubectl apply -f alertmanager/
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
alertmanager-main-0                    2/2     Running   0          16s
alertmanager-main-1                    2/2     Running   0          16s
alertmanager-main-2                    2/2     Running   0          16s

grafana安装

拷贝 prometheus/grafana 目录到 master 下

[root@master prometheus]# curl http://192.168.1.100:5000/v2/grafana/tags/list
{"name":"grafana","tags":["6.4.3"]}
[root@master prometheus]# vim grafana/grafana-deployment.yaml
19:     - image: 192.168.1.100:5000/grafana:6.4.3
[root@master prometheus]# kubectl apply -f grafana/
[root@master prometheus]# kubectl -n monitoring get pod
NAME                                   READY   STATUS    RESTARTS   AGE
grafana-647d948b69-d2hv9               1/1     Running   0          19s

发布服务

grafana服务

[root@master prometheus]# cp grafana/grafana-service.yaml ./
[root@master prometheus]# vim grafana-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    app: grafana
  name: grafana
  namespace: monitoring
spec:
  type: NodePort          # 新添加
  ports:
  - name: http
    port: 3000
    nodePort: 30000       # 新添加
    targetPort: http
  selector:
    app: grafana
[root@master prometheus]# kubectl apply -f grafana-service.yaml
[root@master prometheus]# kubectl -n monitoring get service
NAME                    TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)
grafana                 NodePort    10.254.79.49     <none>        3000:30000/TCP

服务发布以后可以通过华为云弹性公网IP直接访问即可

grafana 第一次默认登录的用户名/密码(admin/admin)

HPA集群

集群图例

graph LR
U((用户)) --> I1
  subgraph K8S集群
     I1([ingress]) --> S1([service<br>clusterIP]) ---> POD1[(apache)] & POD2[(自动扩展节点<br>apache)] & POD3[(自动扩展节点<br>apache)]
  end
style POD1 color:#ffff00,fill:#0000ff
style U fill:#f9f,stroke:#333,stroke-width:4px;
classDef Kubernetes fill:#ffffc0,color:#ff00ff
class K8S集群 Kubernetes
classDef SER fill:#00ffff,color:#0000ff
class I1,S1 SER
classDef WEB color:#ffff00,fill:#11aaff
class POD2,POD3 WEB

实验步骤

[root@master ~]# vim myhpa.yaml 
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myweb
spec:
  selector:
    matchLabels:
      app: apache
  replicas: 1
  template:
    metadata:
      labels:
        app: apache
    spec:
      containers:
      - name: apache
        image: 192.168.1.100:5000/myos:httpd
        ports:
        - containerPort: 80
        resources:      # 新添加
          requests:     # 新添加
            cpu: 200m   # 新添加
      restartPolicy: Always

---
apiVersion: v1
kind: Service
metadata:
  name: web-service
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
  selector:
    app: apache
  type: ClusterIP

---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: my-app
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  backend:
    serviceName: web-service
    servicePort: 80

---
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
  name: myweb
spec:
  minReplicas: 1
  maxReplicas: 3
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: myweb
  targetCPUUtilizationPercentage: 50
[root@master ~]# kubectl apply -f hpa-example.yaml
[root@master ~]# kubectl get hpa
NAME    REFERENCE          TARGETS   MINPODS   MAXPODS   REPLICAS   AGE
myweb   Deployment/myweb   0%/50%    1         3         1          15m

当容器的cpu占用超过 50% 的时候,自动扩展一个POD,依次扩展,一直到最大值

如果cpu访问不足 50% 的时候,每 300s 缩减一个 POD 节点,直到最小值时停止

访问测试可以使用镜像内提供的 info.php增加系统负载,从而查看状态信息

访问地址 http://ip.xx.xx.xx/info.php?id=1000000

id 为计算结果集的数量,id 越大,占用内存和CPU越高,设置特别大容易死机

弹性云实战

弹性云架构图解

flowchart LR
U((USER)):::U;Kubernetes:::Kubernetes
subgraph Kubernetes
  I([ingress]):::I --> WS([nginx-service]):::S
  WS ---> WEB1[(nginx)] & WEB2[(nginx)] & WEB3[(nginx)]
  WEB1 & WEB2 & WEB3 --> PS([php-service]):::S 
  PS ---> PHP1[(PHP)]:::PHP1 & PHP2[(PHP)] & PHP3[(PHP)] & PHP4[(PHP)] & PHP5[(PHP)]
  WEB1 & WEB2 & WEB3 & PHP1 & PHP2 & PHP3 & PHP4 & PHP5 -.- NFS{{NFS Server}}:::NFS
end
classDef WEB color:#ff0000,fill:#99ff99
class WEB1,WEB2,WEB3 WEB
classDef PHP1 color:#ffff00,fill:#0000ff
classDef PHP color:#ffff00,fill:#11aaff
class PHP2,PHP3,PHP4,PHP5 PHP
classDef NFS color:#ffff00,fill:#aa99aa
classDef S fill:#ffaa00,color:#3333ff
classDef I fill:#00ffff,color:#0000ff
classDef U fill:#000000,color:#ffffff
classDef Kubernetes fill:#ffffc0,color:#ff00ff
U o--o I

部署规划

01、集群配置
主机名称 ip地址 备注
master 192.168.1.21 K8S管理节点
node-0001 192.168.1.31 K8S计算节点
node-0002 192.168.1.32 K8S计算节点
node-0003 192.168.1.33 K8S计算节点
node-0004 192.168.1.34 K8S计算节点,需要扩容
node-0005 192.168.1.35 K8S计算节点,需要扩容
registry 192.168.1.100 K8S镜像仓库
02、资源文件
[root@master ~]# mkdir webapp
[root@master ~]# cd webapp/
[root@master webapp]# vim 01-pv.yaml 
---
kind: PersistentVolume
apiVersion: v1
metadata:
  name: pv-nfs
spec:
  volumeMode: Filesystem
  capacity:
    storage: 30Gi
  accessModes:
  - ReadWriteOnce
  - ReadOnlyMany
  - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  nfs:
    server: 192.168.1.100
    path: /var/webroot

[root@master webapp]# vim 02-pvc.yaml 
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: pvc-nfs
spec:
  volumeMode: Filesystem
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 25Gi

[root@master webapp]# docker run -itd --name myphp 192.168.1.100:5000/myos:php-fpm 
f5147647288fcf87f308a726fade34c2bf2aa635e2b9c3f7ae867341af22e0a6
[root@master webapp]# docker cp myphp:/etc/php-fpm.d/www.conf /var/webconf/
[root@master webapp]# docker rm -f myphp
myphp
[root@master webapp]# vim /var/webconf/www.conf
12:      listen = 0.0.0.0:9000
24:      ; listen.allowed_clients = 127.0.0.1
[root@master webapp]# kubectl create configmap php-conf --from-file=/var/webconf/www.conf 
configmap/php-conf created
[root@master webapp]# vim 04-phpdeploy.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: php-deploy
spec:
  selector:
    matchLabels:
      app: myphp
  replicas: 1
  template:
    metadata:
      labels:
        app: myphp
    spec:
      volumes:
      - name: php-conf
        configMap:
          name: php-conf
      - name: website
        persistentVolumeClaim:
          claimName: pvc-nfs
      containers:
      - name: php-fpm
        image: 192.168.1.100:5000/myos:php-fpm
        volumeMounts:
        - name: php-conf
          subPath: www.conf
          mountPath: /etc/php-fpm.d/www.conf
        - name: website
          mountPath: /usr/local/nginx/html
        ports:
        - protocol: TCP
          containerPort: 9000
        resources:
          requests:
            cpu: 200m
      restartPolicy: Always

[root@master webapp]# vim 05-phphpa.yaml 
---
kind: HorizontalPodAutoscaler
apiVersion: autoscaling/v1
metadata:
  name: myphp
spec:
  minReplicas: 1
  maxReplicas: 5
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: php-deploy
  targetCPUUtilizationPercentage: 50

[root@master webapp]# vim 06-phpservice.yaml 
---
apiVersion: v1
kind: Service
metadata:
  name: phpbackend
spec:
  ports:
  - protocol: TCP
    port: 9000
    targetPort: 9000
  selector:
    app: myphp
  type: ClusterIP

[root@master webapp]# docker run -itd --name mynginx 192.168.1.100:5000/myos:nginx
a8427c4972601eb85eb188a0f2a96d6d1d5b87947afdfced762bae37151188e5
[root@master webapp]# docker cp myphp:/usr/local/nginx/conf/nginx.conf /var/webconf/
[root@master webapp]# docker rm -f mynginx 
mynginx
[root@master webapp]# vim /var/webconf/nginx.conf
... ...
        location ~ \.php$ {
            root           html;
            fastcgi_pass   phpbackend:9000;
            fastcgi_index  index.php;
            include        fastcgi.conf;
        }
... ...
[root@master webapp]# kubectl create configmap nginx-conf --from-file=/var/webconf/nginx.conf 
configmap/nginx-conf created
[root@master webapp]# vim 08-nginxdeploy.yaml
---
kind: Deployment
apiVersion: apps/v1
metadata:
  name: webcluster
spec:
  selector:
    matchLabels:
      app: mynginx
  replicas: 3
  template:
    metadata:
      labels:
        app: mynginx
    spec:
      volumes:
      - name: nginx-php
        configMap:
          name: nginx-conf
      - name: log-data
        hostPath:
          path: /var/log/weblog
          type: DirectoryOrCreate
      - name: website
        persistentVolumeClaim:
          claimName: pvc-nfs
      containers:
      - name: nginx
        image: 192.168.1.100:5000/myos:nginx
        volumeMounts:
        - name: nginx-php
          subPath: nginx.conf
          mountPath: /usr/local/nginx/conf/nginx.conf
        - name: log-data
          mountPath: /usr/local/nginx/logs
        - name: website
          mountPath: /usr/local/nginx/html
        ports:
        - protocol: TCP
          containerPort: 80
      restartPolicy: Always

[root@master webapp]# vim 09-nginxservice.yaml 
---
apiVersion: v1
kind: Service
metadata:
  name: webforeground
spec:
  ports:
  - protocol: TCP
    port: 80
    targetPort: 80
  selector:
    app: mynginx
  type: ClusterIP

[root@master webapp]# vim 10-ingress.yaml 
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: webcluster
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  backend:
    serviceName: webforeground
    servicePort: 80

[root@master webapp]#

完整版详见笔记 webcluster.yaml 资源文件

©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 203,271评论 5 476
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 85,275评论 2 380
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 150,151评论 0 336
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,550评论 1 273
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 63,553评论 5 365
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,559评论 1 281
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 37,924评论 3 395
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,580评论 0 257
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,826评论 1 297
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,578评论 2 320
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,661评论 1 329
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,363评论 4 318
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 38,940评论 3 307
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,926评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,156评论 1 259
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 42,872评论 2 349
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,391评论 2 342

推荐阅读更多精彩内容