Linux:Filesystems

From Cheatsheet
Jump to navigationJump to search


Filesystems


# List clients connected to local mounts
showmount

NFS

Checks

# NFS 
nfsstat

# Detailed RPC and package information
nfsstat -o all

# Every RPC "program" is bound to a specific NFS version. Use NFS/CTDB logs in combination with the program ID to identify the failing component
rpcinfo -p

Common

Exports

Use file /etc/exports to define exports to cliënts.

# Create the folders before exporting them
mkdir -p /data/exports/customer1000/finance
mkdir -p /data/exports/customer1001/backup

NFSv3 example:

#////////////////////////////////////////////////////////////////////////////////////////////
# Customer1000
/data/exports/customer1000/finance 192.168.20.1(rw,no_root_squash,sync) 192.168.20.2(rw,sync)
#////////////////////////////////////////////////////////////////////////////////////////////
# Customer1001
/data/exports/customer1001/backup 192.168.30.1(rw,no_root_squash) 192.168.30.1(rw,no_root_squash,sync)
# Reload the NFS server to apply changes within /etc/exports
systemctl reload nfs-server
Client mount
# Install NFS cliënt (Ubuntu)
apt install nfs-common

# Install NFS cliënt (RHEL)
yum install nfs-utils

# Mount NFS share located on server 192.168.20.1 on path /data/exports/customer1000/finance, to local server /mnt/nfs/
mount -v -t nfs 192.168.20.1:/data/exports/customer1000/finance /mnt/nfs/

Optimizations

Change these values depending on your usage and the available resources on your server.

# /etc/sysctl.d/nfs-tuning.conf
net.core.rmem_max=1048576
net.core.rmem_default=1048576
net.core.wmem_max=1048576
net.core.wmem_default=1048576
net.ipv4.tcp_rmem=4096 1048576 134217728
net.ipv4.tcp_wmem=4096 1048576 134217728
vm.min_free_kbytes=8388608
# Reload above optimization
sysctl -p /etc/sysctl.d/nfs-tuning.conf


Raise the number of NFS threads

# /etc/sysconfig/nfs

# Number of nfs server processes to be started.
# The default is 8.
#RPCNFSDCOUNT=16
RPCNFSDCOUNT=128


Activate NFSD count on the fly

rpc.nfsd 64

# Check amount of threads
/proc/fs/nfsd/threads

Ceph

Checks

# Display the running Ceph version
ceph -v

# Check the clusters' health and status
ceph -s

# Watch the clusters' health and status in real time
ceph -w

# Show detailed logs relating to cluster health
ceph health detail

# List configurations for a lot of stuff
cephadm ls

# List configurations for a lot of stuff
ceph config dump

# List all Ceph 'containers' and OSDs
ceph orch ls

# Lists all hosts, labels and basic host resource information
ceph orch host ls --detail

# List available storage devices
ceph orch device ls

# List all Ceph daemons
ceph orch ps

# List Ceph daemons of a specific type
ceph orch ps --daemon_type=mgr

# Show logs for a specific service
ceph orch ls --service_name osd.all-available-devices --format yaml

# Re-check the status of a host
ceph cephadm check-host storage-3

# Check the current number of operations on a primary Ceph node
ceph daemon /var/run/ceph/ceph-mds.xxxxxxxx.vxokby.asok dump_ops_in_flight

OSDs

# List all pools
ceph osd lspools

# See the status of all OSDs
ceph osd stat

# List all OSDs
ceph osd tree

# List all OSDs and related information in detail
ceph osd df tree

PGs

# List all Placement Groups
ceph pg dump

# Check the status of Ceph PGs
ceph pg stat

Authentication

# List all created clients and their permissions
ceph auth ls

# List permissions for a specific client
ceph auth get client.cinder

Commands

# Enter the Ceph shell (single cluster)
cephadm shell

# Enter the Ceph shell for a specific cluster
sudo /usr/sbin/cephadm shell --fsid asdjwqe-asjd324-asdki321-821asd-asd241-asdn1234- -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin2.keyring

# Give node storage-4, which is already a cluster member, the admin tag
ceph orch host label add storage-4 _admin

Installation (Quincy)

Using Cephadm: https://docs.ceph.com/en/quincy/cephadm/install/

Cephadm
# Create a folder for the cephadm tool
mkdir cephadm
cd cephadm/

# Download cephadm (Quincy)
curl --silent --remote-name --location https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
chmod +x cephadm

# Output help
./cephadm -h

# Install cephadm (Quincy) release
./cephadm add-repo --release quincy
./cephadm install

# Check if cephadm is properly installed
which cephadm
Bootstrap
# Bootstrap node and install Ceph
cephadm bootstrap --mon-ip 192.168.100.11

# Check the status of the cluster
cephadm shell -- ceph -s
docker ps


## Optional
# Enter the Ceph shell (single cluster)
cephadm shell

# Exit the Ceph shell
exit

# Install common Ceph packages/tools 
cephadm install ceph-common

# Display the Ceph version
ceph -v
Add additional hosts
# On your bootstrapped node create a key for SSH-access to the other hosts.
ssh-keygen
cat .ssh/id_rsa.pub

# Add the newly generated key to the authorized_keys file for the relevant user, on the other hosts.

# Copy the Ceph clusters' public key to the other nodes
ssh-copy-id -f -i /etc/ceph/ceph.pub root@storage-2
ssh-copy-id -f -i /etc/ceph/ceph.pub root@storage-3

# Add the other nodes to the cluster, and assign them the admin role
ceph orch host add storage-2 10.4.20.2 _admin
ceph orch host add storage-3 10.4.20.3 _admin

Configuration

OSD creation

If you've installed ceph-osd on your host, this step will fail horribly with errors such as:

-1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label failed to open /var/lib/ceph/osd/ceph-1//block: (13) Permission denied
-1 bdev(0x5571d5f69400 /var/lib/ceph/osd/ceph-1//block) open open got: (13) Permission denied
-1 OSD::mkfs: ObjectStore::mkfs failed with error (13) Permission denied
-1 ESC[0;31m ** ERROR: error creating empty object store in /var/lib/ceph/osd/ceph-0/: (13) Permission deniedESC[0m
 OSD, will rollback changes
# Configure all available storage to be used as OSD storage
ceph orch apply osd --all-available-devices

# Check for OSD problems
watch ceph -s
watch ceph osd tree

Delete pool

# Set ability to remove pools to true
ceph config set mon mon_allow_pool_delete true

# Remove the pool
ceph osd pool rm tester tester --yes-i-really-really-mean-it

# Set ability to remove pools to false
ceph config set mon mon_allow_pool_delete false

Upgrade

Make sure your cluster status is healthy first!

# Upgrade Ceph to a specific version
ceph orch upgrade start --ceph-version 17.2.0

# Check the status of the Ceph upgrade
ceph orch upgrade status

# Stop the Ceph upgrade
ceph orch upgrade stop

Ceph client

Via Kernel

Mount a Ceph filesystem share using the kernel, Cephx and 3 mon hosts:

# Install common Ceph package for your distribution
apt-get install ceph-common

# Create and fill the ceph.conf file, mind the enter in the end
cat << 'EOF' >> /etc/ceph/ceph.conf
# minimal ceph.conf for 492f528f-90ae-49e0-b622-ae58b85e8cf0
[global]
        fsid = 492f528f-90ae-49e0-b622-ae58b85e8cf0
        mon_host = [v2:192.168.0.11:3300/0,v1:192.168.0.11:6789/0] [v2:192.168.0.12:3300/0,v1:192.168.0.12:6789/0] [v2:192.168.0.13:3300/0,v1:192.168.0.13:6789/0]
EOF

# Add the Cephx used by your user
cat << 'EOF' >> /etc/ceph/ceph.client.sofie.keyring
[client.sofie]
        key = AIAOIWmaskjhqweASKhqwekjhASD==
EOF

# Mount your Ceph share by referring to the Ceph mons, the share on the Ceph mons, where you want to mount the share, and the userdata you use to connect to said share respectively.
mount -t ceph 192.168.0.11:6789,192.168:6789.0.12,192.168.0.13:6789:/shares/mycustomer/asd8asd8-as8d83-df4mjvjdf /mnt/ceph/mylocalsharelocation -o name=sofie
Ceph-fuse
# Mount a Ceph filesystem using the ceph-fuse client
apt install ceph-fuse
mkdir myshare/

nano sofie.keyring
[client.sofie]
        key = AQCHc7tlvEUqOBasjdHASJD9Lma84nASDJqwe==


nano ceph.conf
[client]
        client quota = true
        mon host = 192.168.10.1:6789, 192.168.10.2:6789, 192.168.10.3:6789


sudo ceph-fuse ~/myshare \
--id=sofie \
--conf=./ceph.conf \
--keyring=./sofie.keyring \
--client-mountpoint=/volumes/_nogroup/6e99687f-asd2-47b0-8ba1-asduoiqwe/12398asnjd-0126-4cb3-9242-asduio1q23


# Debugmode in case of shit
sudo ceph-fuse ~/myshare \
--id=sofie \
--conf=./ceph.conf \
--keyring=./sofie.keyring \
--client-mountpoint=/volumes/_nogroup/6e99687f-asd2-47b0-8ba1-asduoiqwe/12398asnjd-0126-4cb3-9242-asduio1q23 -d -o debug

RBD-NBD

# List available volumes within the openstackvolumes pool
rbd ls openstackhdd

# List all available snapshots for object volume-asd9p12o3-90b2-1238-1209-as980d7213hs, which reside in pool openstackhdd
rbd snap ls openstackhdd/volume-asd9p12o3-90b2-1238-1209-as980d7213hs

# Map the volume-object to the local filesystem
rbd-nbd map openstackhdd/volume-asd9p12o3-90b2-1238-1209-as980d7213hs

# Map the volume-object as read-only to the local filesystem
rbd-nbd map --read-only openstackhdd/volume-asd9p12o3-90b2-1238-1209-as980d7213hs

# List currently mapped objects
rbd-nbd list-mapped

# Check what filesystem and partition the device contains
fdisk -l /dev/nbd1

# Mount the device to a local folder
mount /dev/nbd1p1 /mnt/storage

# Unmount the device from the local folder
umount /mnt/storage


# 2 methods to unmap
# Unmap the mapped object
rbd-nbd unmap /dev/nbd2

# Unmap the mapped object
rbd-nbd unmap volume-asd9p12o3-90b2-1238-1209-as980d7213hs

Remove node

# Remove running daemons
ceph orch host drain storage-3

# Remove host from the cluster
ceph orch host rm storage-3

# In storage-3, restart the node
shutdown -r now
Destroy node

Scorched earth
Only execute if you want to annihilate your node and or cluster.

# Kill and destroy OSD 0
ceph osd down 0 && ceph osd destroy 0 --force

# Stop Ceph services
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@crash.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mgr.host-1.xmatqa.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@node-exporter.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@prometheus.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn.target

# Disable Ceph services
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@crash.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mgr.host-1.xmatqa.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@node-exporter.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@prometheus.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn.target

# Destroy everything (packages, containers, configuration)
ceph-deploy uninstall host-1
ceph-deploy purge host-1
rm -rf /var/lib/ceph

# Check for failed services
systemctl | grep ceph

# Reset them so they disable properly
systemctl reset-failed ceph-asd82asd-asd8-as92-a889-po89xc732cmn@prometheus.host-1.service

# reboot
shutdown -r now

BTRFS

Using LVM

# Install LVM creation tools depending on your OS
yum install lvm2
apt install lvm2

# Check and note the disk you need
fdisk -l

# Format /dev/vdb as BTRFS
echo -e "n\np\n1\n\n\nt\n8E\np\nw" | fdisk /dev/vdb
 
# Create LVM 
pvcreate /dev/vdb1
vgcreate vdb_vg /dev/vdb1
lvcreate -l 100%FREE  -n btrfs vdb_vg
 
# Check
pvs
vgs
 
# Create the BTRFS filesystem
mkfs.btrfs /dev/vdb_vg/btrfs
 
# Create a folder for the BTRFS mount
mkdir -p /mnt/btrfs1

# Mount the BTRFS filesystem
mount -t btrfs /dev/vdb_vg/btrfs /mnt/btrfs1/
 
# Modify fstab so the filesystem get mounted automatically on boot
cat << 'EOF' >> /etc/fstab
/dev/mapper/vdb_vg-btrfs  /mnt/btrfs1    btrfs     defaults        0 0
EOF