nodes: - name: "172.24.234.128" devices: # specific devices to use for storage can be specified for each node - name: "vdb" - name: "172.24.234.147" devices: - name: "vdb" - name: "172.24.234.156" devices: - name: "vdb"
注意:nodes:name处要与kubectl get node出来的显示一致,若为ip显示ip若为主机名显示主机名
ceph-dashboard访问
1
kubectl apply -f dashboard-external-https.yaml
获取访问端口
1 2 3 4
kubectl get svc/rook-ceph-mgr-dashboard-external-https -n rook-ceph NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE rook-ceph-mgr-dashboard-external-https NodePort 10.43.117.2 <none> 8443:30519/TCP 53s
apiVersion: ceph.rook.io/v1 kind: CephBlockPool metadata: name: replicapool namespace: rook-ceph spec: failureDomain: host replicated: size: 3 --- apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: rook-ceph-block # Change "rook-ceph" provisioner prefix to match the operator namespace if needed provisioner: rook-ceph.rbd.csi.ceph.com parameters: # clusterID is the namespace where the rook cluster is running clusterID: rook-ceph # Ceph pool into which the RBD image shall be created pool: replicapool
# RBD image format. Defaults to "2". imageFormat: "2"
# RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. imageFeatures: layering
#!/usr/bin/env bash DISK="/dev/sdb" # Zap the disk to a fresh, usable state (zap-all is important, b/c MBR has to be clean) # You will have to run this step for all disks. sgdisk --zap-all $DISK dd if=/dev/zero of="$DISK" bs=1M count=100 oflag=direct,dsync
# These steps only have to be run once on each node # If rook sets up osds using ceph-volume, teardown leaves some devices mapped that lock the disks. ls /dev/mapper/ceph-* | xargs -I% -- dmsetup remove % # ceph-volume setup can leave ceph-<UUID> directories in /dev (unnecessary clutter) rm -rf /dev/ceph-* rm /var/lib/rook/ -rf rm /var/lib/kubelet/plugins/ -rf rm /var/lib/kubelet/plugins_registry/ -rf