搭建ceph集群

cluster install 

ca-01 ---------- cm-01

                |---- co-01

                |---- co-02

in all node

# deploy ceph-deploy

echo deb http://download.ceph.com/debian-infernalis/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list

wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -

sudo apt-get update && sudo apt-get install ceph-deploy -y

In admin node

# ssh key

ssh-keygen

ssh-copy-id ceph-bj-monitor-01

ssh-copy-id ceph-bj-osd-01

ssh-copy-id ceph-bj-osd-02

# edit ~/.ssh/config

Host ceph-bj-monitor-01

  Hostname ceph-bj-monitor-01

  User root

Host ceph-bj-osd-01

  Hostname ceph-bj-osd-01

  User root

Host ceph-bj-osd-02

  Hostname ceph-bj-osd-01

  User root

# local repo

export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/debian-luminous/

export CEPH_DEPLOY_GPG_URL=https://mirrors.163.com/ceph/keys/release.asc

# deploy

ceph-deploy new ceph-bj-monitor-01

# check ceph.conf as follows:

[global]

fsid = 144cec2f-ceae-460c-84f7-5df374685e9d

mon_initial_members = ceph-bj-monitor-01

mon_host = 172.16.182.175

auth_cluster_required = cephx

auth_service_required = cephx

auth_client_required = cephx

public network = 172.16.182.0/24

osd pool default size = 2

/*especially note below two lines if you are using ext4 in osd, refer to http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ for more*

osd_max_object_name_len = 256

osd_max_object_namespace_len = 64

osd journal size = 1024

filestore xattr use omap = true

osd pool default min size = 1

osd pool default pg num = 333

osd pool default pgp num = 333

osd crush chooseleaf type = 1

# deploy continue, add --no-adjust-repos if you are bebind firewall or using proxy

ceph-deploy install dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02 --no-adjust-repos

ceph-deploy mon create-initial

# mkdir in osds

# emulation with local disk, ignore this if extra hard disk placed

ssh ceph-bj-osd-01

rm -rf /var/local/osd1

sudo mkdir /var/local/osd1

chmod 777 /var/local/osd1

exit

# emulation with local disk, ignore this if extra hard disk placed

ssh ceph-bj-osd-02

rm -rf /var/local/osd2

sudo mkdir /var/local/osd2

chmod 777 /var/local/osd2

exit

ceph-deploy disk zap ceph-bj-osd-01:nvme0n1

ceph-deploy disk zap ceph-bj-osd-01:nvme1n1

ceph-deploy disk zap ceph-bj-osd-02:nvme0n1

ceph-deploy disk zap ceph-bj-osd-02:nvme1n1

ceph-deploy osd prepare ceph-bj-osd-01:/dev/nvme0n1:/dev/nvme1n1 ceph-bj-osd-02:/dev/nvme0n1:/dev/nvme1n1

ceph-deploy osd activate ceph-bj-osd-01:/dev/nvme0n1p1:/dev/nvme1n1p1 ceph-bj-osd-02:/dev/nvme0n1p1:/dev/nvme1n1p1

# distribute key

ceph-deploy admin dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02

sudo chmod +r /etc/ceph/ceph.client.admin.keyring

# uninstall

ceph-deploy uninstall dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02

ceph-deploy purgedata dcg-bj-02 ceph-bj-monitor-01 ceph-bj-osd-01 ceph-bj-osd-02

sudo rm -rf –one-file-system – /var/lib/ceph

sudo rm -rf –one-file-system – /etc/ceph/

rm -rf ./my_ceph

# check

ceph health

ceph -w

ceph osd tree

RBD (kernel + librbd)

# ceph -w

    cluster 44eb667d-b061-4ef0-900b-6a173559d702

    health HEALTH_OK

    monmap e1: 1 mons at {ceph-bj-monitor-01=172.16.182.175:6789/0}

            election epoch 4, quorum 0 ceph-bj-monitor-01

    osdmap e17: 2 osds: 2 up, 2 in

            flags sortbitwise,require_jewel_osds

      pgmap v2095: 64 pgs, 1 pools, 0 bytes data, 0 objects

            15476 MB used, 862 GB / 924 GB avail

                  64 active+clean

2018-10-25 03:14:06.925774  [INF] pgmap v2095: 64 pgs: 64 active+clean; 0 bytes data, 15476 MB used, 862 GB / 924 GB avail

# ceph osd tree

ID WEIGHT  TYPE NAME              UP/DOWN REWEIGHT PRIMARY-AFFINITY

-1 0.90309 root default

-2 0.45689    host ceph-bj-osd-01

0 0.45689        osd.0                up  1.00000          1.00000

-3 0.44620    host ceph-bj-osd-02

1 0.44620        osd.1                up  1.00000          1.00000

# rbd create test_image --size 10240

# rbd list

test_image

# rbd info test_image

rbd image 'test_image':

        size 10GiB in 2560 objects

        order 22 (4MiB objects)

        block_name_prefix: rbd_data.37196b8b4567

        format: 2

        features: layering, exclusive-lock, object-map, fast-diff, deep-flatten

        flags:

issue happened when mapping to block

# rbd map test_image

rbd: sysfs write failed

RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".

In some cases useful info is found in syslog - try "dmesg | tail".

rbd: map failed: (6) No such device or address

# dmesg | tail

[351821.997061] usb 3-7: Manufacturer: Dell

[351821.997310] usb 3-7: ep 0x81 - rounding interval to 128 microframes, ep desc says 192 microframes

[351822.023940] input: Dell Dell USB Keyboard as /devices/pci0000:00/0000:00:14.0/usb3/3-7/3-7:1.0/0003:413C:2105.0003/input/input3

[351822.078560] hid-generic 0003:413C:2105.0003: input,hidraw2: USB HID v1.10 Keyboard [Dell Dell USB Keyboard] on usb-0000:00:14.0-7/input0

[351833.800882] usb 3-7: USB disconnect, device number 3

[1109182.054573] Key type ceph registered

[1109182.055604] libceph: loaded (mon/osd proto 15/24)

[1109182.059629] rbd: loaded

[1109682.929947] libceph: client14108 fsid 44eb667d-b061-4ef0-900b-6a173559d702

[1109682.932270] libceph: mon0 172.16.182.175:6789 session established

# rbd showmapped

# fdisk -l

Disk /dev/sda: 931.5 GiB, 1000204886016 bytes, 1953525168 sectors

Units: sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 512 bytes / 512 bytes

Disklabel type: dos

Disk identifier: 0x67de61a9

Device    Boot      Start        End    Sectors  Size Id Type

/dev/sda1  *          2048 1951522815 1951520768 930.6G 83 Linux

/dev/sda2      1951524862 1953523711    1998850  976M  5 Extended

/dev/sda5      1951524864 1953523711    1998848  976M 82 Linux swap / Solaris

#

resolve such issue by adding image-feature manually

# rbd create test_image2 --size 10G --image-format 2 --image-feature layering

# rbd ls

test_image

test_image2

# rbd map test_image2

/dev/rbd0

# rbd showmapped

id pool image      snap device

0  rbd  test_image2 -    /dev/rbd0

# fdisk -l

Disk /dev/sda: 931.5 GiB, 1000204886016 bytes, 1953525168 sectors

Units: sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 512 bytes / 512 bytes

Disklabel type: dos

Disk identifier: 0x67de61a9

Device    Boot      Start        End    Sectors  Size Id Type

/dev/sda1  *          2048 1951522815 1951520768 930.6G 83 Linux

/dev/sda2      1951524862 1953523711    1998850  976M  5 Extended

/dev/sda5      1951524864 1953523711    1998848  976M 82 Linux swap / Solaris

Disk /dev/rbd0: 10 GiB, 10737418240 bytes, 20971520 sectors

Units: sectors of 1 * 512 = 512 bytes

Sector size (logical/physical): 512 bytes / 512 bytes

I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes

# ll /dev/rbd0

brw-rw---- 1 root disk 251, 0 Oct 24 19:33 /dev/rbd0

# rados mkpool pool

successfully created pool pool

# rados lspools

rbd

pool

# rbd create pool/image1 --size 1G --image-format 2 --image-feature layering

# rbd list

test_image

test_image2

# rbd list pool

image1

# rbd info pool/image1

rbd image 'image1':

        size 1GiB in 256 objects

        order 22 (4MiB objects)

        block_name_prefix: rbd_data.37276b8b4567

        format: 2

        features: layering

        flags:

# rbd create pool/image2 --size 1G --order 24 --image-format 2 --image-feature layering

rbd: --order is deprecated, use --object-size

# rbd list pool

image1

image2

# rbd info pool/image2

rbd image 'image2':

        size 1GiB in 64 objects

        order 24 (16MiB objects)

        block_name_prefix: rbd_data.372b6b8b4567

        format: 2

        features: layering

        flags:

# rbd rm pool/image2

Removing image: 100% complete...done.

snapshot

# rbd snap create pool/image1@image1_snap

# rbd snap list

rbd: image name was not specified

# rbd snap list pool/image1

SNAPID NAME        SIZE TIMESTAMP

    4 image1_snap 1GiB

# rbd ls pool -l

NAME              SIZE PARENT FMT PROT LOCK

image1            1GiB          2

image1@image1_snap 1GiB          2

# rbd info pool/image1@image1_snap

rbd image 'image1':

        size 1GiB in 256 objects

        order 22 (4MiB objects)

        block_name_prefix: rbd_data.37276b8b4567

        format: 2

        features: layering

        flags:

        protected: False

# rbd snap protect pool/image1@image1_snap

# rbd info pool/image1@image1_snap

rbd image 'image1':

        size 1GiB in 256 objects

        order 22 (4MiB objects)

        block_name_prefix: rbd_data.37276b8b4567

        format: 2

        features: layering

        flags:

        protected: True

# rbd clone pool/image1@image1_snap rbd/image2

# rbd ls rbd -l

NAME        SIZE PARENT                  FMT PROT LOCK

image2      1GiB pool/image1@image1_snap  2

test_image  10GiB                          2

test_image2 10GiB                          2

# rbd children pool/image1@image1_snap

rbd/image2

# rbd flatten rbd/image2

Image flatten: 100% complete...done.

# rbd ls rbd -l

NAME        SIZE PARENT FMT PROT LOCK

image2      1GiB          2

test_image  10GiB          2

test_image2 10GiB          2

export/import

# rbd export pool/image1 /tmp/image1_export

Exporting image: 100% complete...done.

# ls -alh /tmp/image1_export

-rw-r--r-- 1 root root 1.0G Oct 24 19:47 /tmp/image1_export

# rbd import /tmp/image1_export pool/image2 --image-format 2

Importing image: 100% complete...done.

# rbd ls pool -l

NAME              SIZE PARENT FMT PROT LOCK

image1            1GiB          2

image1@image1_snap 1GiB          2 yes

image2            1GiB          2

# rbd ls -l

NAME        SIZE PARENT FMT PROT LOCK

image2      1GiB          2

test_image  10GiB          2

test_image2 10GiB          2

最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
  • 序言:七十年代末,一起剥皮案震惊了整个滨河市,随后出现的几起案子,更是在滨河造成了极大的恐慌,老刑警刘岩,带你破解...
    沈念sama阅读 200,667评论 5 472
  • 序言:滨河连续发生了三起死亡事件,死亡现场离奇诡异,居然都是意外死亡,警方通过查阅死者的电脑和手机,发现死者居然都...
    沈念sama阅读 84,361评论 2 377
  • 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
    开封第一讲书人阅读 147,700评论 0 333
  • 文/不坏的土叔 我叫张陵,是天一观的道长。 经常有香客问我,道长,这世上最难降的妖魔是什么? 我笑而不...
    开封第一讲书人阅读 54,027评论 1 272
  • 正文 为了忘掉前任,我火速办了婚礼,结果婚礼上,老公的妹妹穿的比我还像新娘。我一直安慰自己,他们只是感情好,可当我...
    茶点故事阅读 62,988评论 5 361
  • 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
    开封第一讲书人阅读 48,230评论 1 277
  • 那天,我揣着相机与录音,去河边找鬼。 笑死,一个胖子当着我的面吹牛,可吹牛的内容都是我干的。 我是一名探鬼主播,决...
    沈念sama阅读 37,705评论 3 393
  • 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
    开封第一讲书人阅读 36,366评论 0 255
  • 序言:老挝万荣一对情侣失踪,失踪者是张志新(化名)和其女友刘颖,没想到半个月后,有当地人在树林里发现了一具尸体,经...
    沈念sama阅读 40,496评论 1 294
  • 正文 独居荒郊野岭守林人离奇死亡,尸身上长有42处带血的脓包…… 初始之章·张勋 以下内容为张勋视角 年9月15日...
    茶点故事阅读 35,405评论 2 317
  • 正文 我和宋清朗相恋三年,在试婚纱的时候发现自己被绿了。 大学时的朋友给我发了我未婚夫和他白月光在一起吃饭的照片。...
    茶点故事阅读 37,453评论 1 329
  • 序言:一个原本活蹦乱跳的男人离奇死亡,死状恐怖,灵堂内的尸体忽然破棺而出,到底是诈尸还是另有隐情,我是刑警宁泽,带...
    沈念sama阅读 33,126评论 3 315
  • 正文 年R本政府宣布,位于F岛的核电站,受9级特大地震影响,放射性物质发生泄漏。R本人自食恶果不足惜,却给世界环境...
    茶点故事阅读 38,725评论 3 303
  • 文/蒙蒙 一、第九天 我趴在偏房一处隐蔽的房顶上张望。 院中可真热闹,春花似锦、人声如沸。这庄子的主人今日做“春日...
    开封第一讲书人阅读 29,803评论 0 19
  • 文/苍兰香墨 我抬头看了看天上的太阳。三九已至,却和暖如春,着一层夹袄步出监牢的瞬间,已是汗流浃背。 一阵脚步声响...
    开封第一讲书人阅读 31,015评论 1 255
  • 我被黑心中介骗来泰国打工, 没想到刚下飞机就差点儿被人妖公主榨干…… 1. 我叫王不留,地道东北人。 一个月前我还...
    沈念sama阅读 42,514评论 2 346
  • 正文 我出身青楼,却偏偏与公主长得像,于是被迫代替她去往敌国和亲。 传闻我的和亲对象是个残疾皇子,可洞房花烛夜当晚...
    茶点故事阅读 42,111评论 2 341

推荐阅读更多精彩内容

  • 随着国内劳动力成本增加,机器人行业即将爆发。今天来说说这个行业的销售技巧,希望自己的一点销售技巧能在对正从事这个行...
    一抹在抹阅读 196评论 0 0
  • 1 笑来老师最近因为语音泄露事情又引起媒体广泛关注。我对于事情本身不予置评,只说说我对笑来老师的一些看法。 2 最...
    冀云来了阅读 210评论 0 0
  • 六顶思考帽 蓝色(控制之帽,负责整理思考本身,就像乐队指挥,负责定义思考所指向的主题,确定焦点,负责小结,综述,和...
    梅利酱阅读 198评论 0 0
  • 认识一个人需要时间,需要条件,很多人呈现在你面前的是做出来的常态,说出来的是不负责任尽兴的好听话。
    丹青妙音阅读 71评论 0 0