CentOS 7 单点部署ceph环境

原创
01/12 09:58
阅读数 401
  • 配置ceph的yum源
    [Ceph]
    name=Ceph packages for $basearch
    baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/$basearch
    enabled=1
    gpgcheck=1
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    priority=1
    [Ceph-noarch]
    name=Ceph noarch packages
    baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch
    enabled=1
    gpgcheck=1
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    priority=1
    [ceph-source]
    name=Ceph source packages
    baseurl=https://mirrors.aliyun.com/ceph/rpm-jewel/el7/SRPMS
    enabled=1
    gpgcheck=1
    type=rpm-md
    gpgkey=https://mirrors.aliyun.com/ceph/keys/release.asc
    priority=1
    
  • 关闭 firewalld、NetworkManager 和 SELinux
    systemctl stop firewalld
    systemctl disable firewalld
    systemctl stop NetworkManager
    systemctl disable NetworkManager
    sed -i '/^SELINUX=/cSELINUX=disabled' /etc/selinux/config # 重启操作系统生效
    
  • 服务器上创建 cephdeploy 用户,并配置免密 sudo
    useradd -m cephdeploy
    passwd cephdeploy
    echo "cephdeploy ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d	/cephdeploy
    chmod 0440 /etc/sudoers.d/cephdeploy
    
  • 配置节点主机名解析
    echo "192.168.1.2 ceph" >> /etc/hosts
    
  • 配置 cephdeploy 用户免密 ssh 到 cephdeploy 用户
    su - cephdeploy
    ssh-keygen
    for host in $(seq -f 'ceph%g' 191 193); do
    	ssh-copy-id $host
    done
    
  • 初始化
    yum install ceph-deploy
    su - cephdeploy
    mkdir $HOME/my-cluster
    cd $HOME/my-cluster
    ceph-deploy new ceph
    
  • 修改配置文件
    echo "osd crush chooseleaf type = 0" >> ceph.conf
    echo "osd pool default size = 1" >> ceph.conf
    echo "osd journal size = 100" >> ceph.conf
    #集群部署需要设置
    #public network = 192.168.1.0/24
    #cluster network = 192.168.2.0/24
    #rgw_override_bucket_index_max_shards = 100
    
  • 安装ceph
    ceph-deploy install ceph
    #安装完毕用 ceph -version 查看
    #如果不是第一次安装ceph并报错可以尝试清除ceph数据重新安装
    #ceph-deploy purgedata ceph
    #ceph-deploy forgetkeys
    #ceph-deploy purge ceph
    #或者运行
    #yum install *argparse* -y
    
  • 部署Monitor
    ceph-deploy mon create-initial
    #如果报错有"/etc/init.d/ceph: line 15: /lib/lsb/init-functions: No such file or directory" 可运行
    #yum install redhat-lsb  -y
    #警告:[WARNIN] neither `public_addr` nor `public_network` keys are defined for monitors
    #则需要在ceph.conf 文件中添加 public_network 信息后重新推送配置文件
    #ceph-deploy --overwrite-conf config push ceph
    #如果是集群搭建也需要将生成的keyring推送到别的主机
    ceph-deploy admin ceph
    配置完毕可用 ceph -s 查看集群健康状态
    
  • 添加 osd
    1、准备两个块设备(可以是硬盘也可以是LVM卷),这里LVM(逻辑卷) 
    dd if=/dev/zero of=ceph-volumes.img bs=1M count=16384 oflag=direct
    sgdisk -g --clear ceph-volumes.img
    sudo vgcreate ceph-volumes $(sudo losetup --show -f ceph-volumes.img)
    sudo lvcreate -L8G -nceph0 ceph-volumes
    sudo lvcreate -L8G -nceph1 ceph-volumes
    sudo mkfs.xfs -f /dev/ceph-volumes/ceph0
    sudo mkfs.xfs -f /dev/ceph-volumes/ceph1
    mkdir -p /srv/ceph/{osd0,osd1,mon0,mds0}
    sudo mount /dev/ceph-volumes/ceph0 /srv/ceph/osd0
    sudo mount /dev/ceph-volumes/ceph1 /srv/ceph/osd1 
    #创建了两个虚拟磁盘ceph0和ceph1并分别挂载到/srv/ceph/osd0和/srv/ceph/osd1目录下
    2、挂载两个OSD
    ceph-deploy osd prepare monster:/srv/ceph/osd0
    ceph-deploy osd prepare monster:/srv/ceph/osd1
    3、激活两个OSD
    ceph-deploy osd activate monster:/srv/ceph/osd0
    ceph-deploy osd activate monster:/srv/ceph/osd1
    #报错:RuntimeError: Failed to execute command: ceph-disk -v activate –mark-init upstart –mount /srv/ceph/osd0或者 [master][WARNIN] 2021-01-12 16:43:04.109256 7fbcdfb4aac0 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
    #[WARNIN] 2021-01-12 16:43:04.109310 7fbcdfb4aac0 -1  ** ERROR: error creating empty object store in /srv/ceph/osd0: (13) Permission denied
    #[WARNIN] 2021-01-12 16:43:04.109256 7fbcdfb4aac0 -1 OSD::mkfs: ObjectStore::mkfs failed with error -13
    #[WARNIN] 2021-01-12 16:43:04.109310 7fbcdfb4aac0 -1  ** ERROR: error creating empty object store in /srv/ceph/osd0: (13) Permission denied
    
    #解决:使用命令 sudo chown ceph:ceph -R /srv/ceph/,然后重新激活
    
  • 验证
    ceph 的安装状态:ceph -s
    ceph 集群健康状态:ceph -w
    ceph monitor仲裁状态:ceph quorum_status –format json-pretty
    ceph mon stat
    ceph osd stat
    ceph osd tree(显示crush图)
    ceph pg stat
    ceph auth list(集群的认证密码)
    
  • 创建对象存储网关
    su - cephdeploy
    ceph-deploy rgw create ceph
    
  • 修改默认 pool 的默认 pg 和 pgs
    ceph osd pool set default.rgw.buckets.data pg_num 128
    ceph osd pool set default.rgw.buckets.data pgp_num 128
    
  • 创建 swift 用户
    radosgw-admin user create --subuser="admin:swift_user" --uid="swift_id" --display-name="swift_name" --key-type=swift --secret="swift_password" --access=full
    #admin:swift_user: swift接口使用的"用户名:子用户名"
    #swift_id: swfit用户id
    #swift_name: swift用户名
    #swift_password: swift密码
    
  • 安装 swift 客户端
    yum install python2-pip
    pip install python-swiftclient
    
  • 创建 swift 容器
    swift -A http://localhost:port/auth/v1 -Uadmin:swift_user -Kswift_password post container_name
    
  • 小练习
    #创建一个对象,测试文件
    echo {Test-data}> testfiles.txt
    ceph osd pool create mytest 8
    #使用rados put 命令指定对象名,含有对象数据的测试文件,pool name
    #rados put {object-name} {file-path} --pool=mytest
    rados put test-object-1 testfile.txt --pool=mytest
    #验证ceph集群已经存储了此object
    rados -p mytest ls
    #找到对象位置
    #ceph osd map {pool-name} {object-name}
    ceph osd map mytest test-oobject-1
    #ceph会输出对象位置
    osdmap e537 pool 'mytest'(1) object 'test-object-1'-> pg 1.d1743484(1.4)-> up [1,0] acting [1,0]
    #删除测试对象object
    rados rm test-object-1--pool-mytest
    #删除mytest pool
    ceph osd pool rm mytest
    
展开阅读全文
打赏
0
0 收藏
分享
加载中
更多评论
打赏
0 评论
0 收藏
0
分享
返回顶部
顶部