本文是想透过一个空的elasticsearch实例应用了解创建一个索引
在实体文件中占用多少磁盘空间, 所以在落实创建之前:
- 操作之前做一些记录;
- 利用python来创建一个索引;
- 观察索引创建后每个环节发生的变化;
- 激活另外一个空节点, 并观察数据同步情况;
- 总结。
操作之前做一些记录
文件分布
[root@es-master data]# tree /usr/local/elasticsearch-5.5.1/data/
/usr/local/elasticsearch-5.5.1/data/
└── nodes # 目录
└── 0 # 目录
├── node.lock # 文件
└── _state # 目录
├── global-16.st # 文件
└── node-17.st # 文件
3 directories, 3 files
文件大小(bytes)
[root@es-master data]# find /usr/local/elasticsearch-5.5.1/data/ -type f | xargs du -lsb
0 /usr/local/elasticsearch-5.5.1/data/nodes/0/node.lock
71 /usr/local/elasticsearch-5.5.1/data/nodes/0/_state/node-17.st
140 /usr/local/elasticsearch-5.5.1/data/nodes/0/_state/global-16.st
节点信息
[root@es-master data]# curl -X GET http://localhost:9200/_cluster/health?pretty
{
"cluster_name" : "HawkEye",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
分片和索引的统计信息
[root@es-master data]# curl -X GET http://localhost:9200/_stats?pretty
{
"_shards" : {
"total" : 0,
"successful" : 0,
"failed" : 0
},
"_all" : {
"primaries" : { },
"total" : { }
},
"indices" : { }
}
利用python来创建一个索引
_01_create_index.py
# -.- coding:utf-8 -.-
from elasticsearch import Elasticsearch
es = Elasticsearch(hosts=["192.168.1.132"])
es.index(
index="website",
doc_type="blog",
id=1,
body={
"title": "My first blog entry",
"text": "Just trying this out...",
"date": "2014/01/01"
}
)
# 执行python文件, 落实索引创建
[root@es-master ~]# python _01_create_index.py# 检查索引是否创建成功
[root@es-master data]# curl -XGET http://localhost:9200/website/blog/1?pretty { "_index" : "website", "_type" : "blog", "_id" : "1", "_version" : 1, "found" : true, "_source" : { "date" : "2014/01/01", "text" : "Just trying this out...", "title" : "My first blog entry" } }
观察索引创建后每个环节发生的变化
elasticsearch应用日志
[2017-08-18T22:41:03,598][INFO ][o.e.c.m.MetaDataCreateIndexService] [node-1] [website] creating index, cause [auto(bulk api)], templates [ mappings []
[2017-08-18T22:41:04,490][INFO ][o.e.c.m.MetaDataMappingService] [node-1] [website/YhxMl-cOQUq6WCYH1sKjRg] create_mapping [blog]
文件分布
[root@es-master data]# tree /usr/local/elasticsearch-5.5.1/data/
/usr/local/elasticsearch-5.5.1/data/
└── nodes
└── 0
├── indices
│ └── YhxMl-cOQUq6WCYH1sKjRg
│ ├── 0
│ │ ├── index
│ │ │ ├── segments_1
│ │ │ └── write.lock
│ │ ├── _state
│ │ │ └── state-0.st
│ │ └── translog
│ │ ├── translog-1.tlog
│ │ └── translog.ckp
│ ├── 1
│ │ ├── index
│ │ │ ├── segments_1
│ │ │ └── write.lock
│ │ ├── _state
│ │ │ └── state-0.st
│ │ └── translog
│ │ ├── translog-1.tlog
│ │ └── translog.ckp
│ ├── 2
│ │ ├── index
│ │ │ ├── segments_1
│ │ │ └── write.lock
│ │ ├── _state
│ │ │ └── state-0.st
│ │ └── translog
│ │ ├── translog-1.tlog
│ │ └── translog.ckp
│ ├── 3
│ │ ├── index
│ │ │ ├── _0.cfe
│ │ │ ├── _0.cfs
│ │ │ ├── _0.si
│ │ │ ├── segments_3
│ │ │ └── write.lock
│ │ ├── _state
│ │ │ └── state-0.st
│ │ └── translog
│ │ ├── translog-2.tlog
│ │ └── translog.ckp
│ ├── 4
│ │ ├── index
│ │ │ ├── segments_1
│ │ │ └── write.lock
│ │ ├── _state
│ │ │ └── state-0.st
│ │ └── translog
│ │ ├── translog-1.tlog
│ │ └── translog.ckp
│ └── _state
│ └── state-4.st
├── node.lock
└── _state
├── global-16.st
└── node-17.st
26 directories, 32 files
文件大小(bytes)
[root@es-master data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-17.st
140 ./nodes/0/_state/global-16.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/write.lock
3620 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfs
405 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfe
375 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.si
256 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/segments_3
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog-2.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog-1.tlog
632 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-4.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog-1.tlog
[root@es-master data]# echo `find ./ -type f | xargs du -lsb | awk '{print $1}'` | tr " " "+" | bc
7272 # 存储总量(Bytes) == 7K大小
节点信息
[root@es-master data]# curl -X GET http://localhost:9200/_cluster/health?pretty
{
"cluster_name" : "HawkEye",
"status" : "yellow",
"timed_out" : false,
"number_of_nodes" : 1,
"number_of_data_nodes" : 1,
"active_primary_shards" : 5,
"active_shards" : 5,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 5,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 50.0
}
分片和索引的统计信息
[root@es-master data]# curl -X GET http://localhost:9200/_stats/indexing,store?pretty
{
"_shards" : {
"total" : 10,
"successful" : 5,
"failed" : 0
},
"_all" : {
"primaries" : {
"store" : {
"size_in_bytes" : 5304,
"throttle_time_in_millis" : 0
},
"indexing" : {
"index_total" : 1,
"index_time_in_millis" : 90,
"index_current" : 0,
"index_failed" : 0,
"delete_total" : 0,
"delete_time_in_millis" : 0,
"delete_current" : 0,
"noop_update_total" : 0,
"is_throttled" : false,
"throttle_time_in_millis" : 0
}
},
"total" : {
"store" : {
"size_in_bytes" : 5304,
"throttle_time_in_millis" : 0
},
"indexing" : {
"index_total" : 1,
"index_time_in_millis" : 90,
"index_current" : 0,
"index_failed" : 0,
"delete_total" : 0,
"delete_time_in_millis" : 0,
"delete_current" : 0,
"noop_update_total" : 0,
"is_throttled" : false,
"throttle_time_in_millis" : 0
}
}
},
"indices" : {
"website" : {
"primaries" : {
"store" : {
"size_in_bytes" : 5304,
"throttle_time_in_millis" : 0
},
"indexing" : {
"index_total" : 1,
"index_time_in_millis" : 90,
"index_current" : 0,
"index_failed" : 0,
"delete_total" : 0,
"delete_time_in_millis" : 0,
"delete_current" : 0,
"noop_update_total" : 0,
"is_throttled" : false,
"throttle_time_in_millis" : 0
}
},
"total" : {
"store" : {
"size_in_bytes" : 5304,
"throttle_time_in_millis" : 0
},
"indexing" : {
"index_total" : 1,
"index_time_in_millis" : 90,
"index_current" : 0,
"index_failed" : 0,
"delete_total" : 0,
"delete_time_in_millis" : 0,
"delete_current" : 0,
"noop_update_total" : 0,
"is_throttled" : false,
"throttle_time_in_millis" : 0
}
}
}
}
}
激活另外一个空节点,并观察数据同步情况
主节点应用日志
[2017-08-18T23:38:13,202][INFO ][o.e.c.s.ClusterService ] [node-1] added {{y1FxQfR}{y1FxQfRnTpyKdtid8ISeLQ}{lJd8BNRhQ5ywFkjLhZ2bPQ}{192.168.1.104}{192.168.1.104:9300},}, reason: zen-disco-node-join[{y1FxQfR}{y1FxQfRnTpyKdtid8ISeLQ}{lJd8BNRhQ5ywFkjLhZ2bPQ}{192.168.1.104}{192.168.1.104:9300}]
[2017-08-18T23:38:14,038][WARN ][o.e.d.z.ElectMasterService] [node-1] value for setting "discovery.zen.minimum_master_nodes" is too low. This can result in data loss! Please set it to at least a quorum of master-eligible nodes (current value: [-1], total number of master-eligible nodes used for publishing in this round: [2])
[2017-08-18T23:38:16,294][INFO ][o.e.c.r.a.AllocationService] [node-1] Cluster health status changed from [YELLOW] to [GREEN] (reason: [shards started [[website][4]] ...]).
从节点应用日志
[2017-08-18T23:38:38,303][INFO ][o.e.d.DiscoveryModule ] [y1FxQfR] using discovery type [zen]
[2017-08-18T23:38:40,023][INFO ][o.e.n.Node ] initialized
[2017-08-18T23:38:40,023][INFO ][o.e.n.Node ] [y1FxQfR] starting ...
[2017-08-18T23:38:40,420][INFO ][o.e.t.TransportService ] [y1FxQfR] publish_address {192.168.1.104:9300}, bound_addresses {[::]:9300}
[2017-08-18T23:38:40,428][INFO ][o.e.b.BootstrapChecks ] [y1FxQfR] bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks
[2017-08-18T23:38:43,810][INFO ][o.e.c.s.ClusterService ] [y1FxQfR] detected_master {node-1}{VlKvChmBR7aQhObWv_e4Kw}{cVyIHfX9RI6glUw3YsLEVg}{192.168.1.132}{192.168.1.132:9300}, added {{node-1}{VlKvChmBR7aQhObWv_e4Kw}{cVyIHfX9RI6glUw3YsLEVg}{192.168.1.132}{192.168.1.132:9300},}, reason: zen-disco-receive(from master [master {node-1}{VlKvChmBR7aQhObWv_e4Kw}{cVyIHfX9RI6glUw3YsLEVg}{192.168.1.132}{192.168.1.132:9300} committed version [26]])
[2017-08-18T23:38:44,653][INFO ][o.e.h.n.Netty4HttpServerTransport] [y1FxQfR] publish_address {192.168.1.104:9200}, bound_addresses {[::]:9200}
[2017-08-18T23:38:44,653][INFO ][o.e.n.Node ] [y1FxQfR] started
从节点数据目录结构
[root@es-slave01 data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-6.st
140 ./nodes/0/_state/global-6.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog-1.tlog
747 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-5.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/write.lock
405 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfe
3620 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfs
375 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.si
256 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/segments_4
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog-1.tlog
[root@es-slave01 data]# echo `find ./ -type f | xargs du -lsb | awk '{print $1}'` | tr " " "+" | bc
7387
大小发生了变化,所以要再看一下主节点的数据目录结构
主节点数据目录结构
[root@es-master data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-17.st
140 ./nodes/0/_state/global-16.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/write.lock
3620 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfs
405 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfe
375 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.si
256 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/segments_3
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog-2.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog-1.tlog
747 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-9.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog-1.tlog
[root@es-master data]# echo `find ./ -type f | xargs du -lsb | awk '{print $1}'` | tr " " "+" | bc
7387
集群生效后, 两个节点的数据完全一致, 这其实不是我想要看到的结果, 因为数据完全是镜像同步, 如果增加更多机器也是这种效果,那么就相当于是我所有的数据只存放在一台机器上一样,无非是多了灾备的情况和更高效的读,存储成本太高。
所以我需要多起两台空节点加入到集群中,然后再观察,发现数据存储方式发生了变化,elasticsearch内部将数据平摊到每个节点上了.
es-master
[root@es-master data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-17.st
140 ./nodes/0/_state/global-16.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/write.lock
3620 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfs
405 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfe
375 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.si
256 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/segments_3
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog-2.tlog
747 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-13.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/segments_1
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog-1.tlog
es-slave01
[root@es-slave01 data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-6.st
140 ./nodes/0/_state/global-6.st
747 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-9.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/write.lock
405 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfe
3620 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.cfs
375 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/_0.si
256 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/index/segments_4
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/3/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/_state/state-0.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/4/translog/translog-1.tlog
es-slave02
[root@es-slave02 data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-10.st
140 ./nodes/0/_state/global-9.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/_state/state-1.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/1/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/_state/state-1.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog-1.tlog
747 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-4.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/segments_2
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/_state/state-1.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog-1.tlog
es-slave03
[root@es-slave03 data]# find ./ -type f | xargs du -lsb
0 ./nodes/0/node.lock
71 ./nodes/0/_state/node-3.st
141 ./nodes/0/_state/global-3.st
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/index/segments_3
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/_state/state-1.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/2/translog/translog-1.tlog
0 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/write.lock
162 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/index/segments_3
134 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/_state/state-1.st
48 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog.ckp
43 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/0/translog/translog-1.tlog
747 ./nodes/0/indices/YhxMl-cOQUq6WCYH1sKjRg/_state/state-1.st
总结
四个节点, 仔细看下来shards分片们分别分布在:
es-mater: 1 3 4
es-slave01: 3 4
es-slave02: 0 1 2
es-slave03: 2 0
五个节点:
es-master: 0 1
es-slave01: 3 4
es-slave02: 2 4
es-slave03: 2 3
es-slave04: 0 1
六个节点:
es-master: 0 1
es-slave01: 3 4
es-slave02: 2 4
es-slave03: 2 3
es-slave04: 1
es-slave05: 0
综合下来看,数据要求至少是一份主分片和一份副本分片,并且散落在不同的机器上。
补充(提高性能)
正常情况下, 一个索引(index)会拥有五个分片(shards),当机器数量达到5时,每个机器上都会均匀的分配到一个分片和一个副本分片。但是当拥有十台机器时,每个机器上就只会有一个分片(可能是主分片也可能是副本分片但是最终只有一份),性能理论上就会得到大大的提升。
如果这还不够,那么可以在加5台机器,并在/webiste/_settings
中设置number_of_replicas
为2(默认是一份副本),那么15台机器中将会有10台机器是副本分片5台机器是主分片,理论上来讲性能又会得到更多提升,同时灾备更健壮。
以此类推,20台机器可以将副本分片设置为3,25台机器可以将副本分片设置为4等等。