Linux:Filesystems

From Cheatsheet
Revision as of 11:46, 30 October 2023 by Patrick (talk | contribs) (→‎Checks)
Jump to navigationJump to search


Filesystems


# List clients connected to the local filesystem
showmount

SMB/CIFS checks

# Samba checks
smbstatus
smbstatus -S
smbstatus -b

# Samba set debug mode
smbcontrol smbd debug 1

NFS

Checks

# NFS 
nfsstat

# Detailed RPC and package information
nfsstat -o all

# Every RPC "program" is bound to a specific NFS version. Use NFS/CTDB logs in combination with the program ID to identify the failing component
rpcinfo -p

Common

Exports

Use file /etc/exports to define exports to cliënts.

# Create the folders before exporting them
mkdir -p /data/exports/customer1000/finance
mkdir -p /data/exports/customer1001/backup

NFSv3 example:

#////////////////////////////////////////////////////////////////////////////////////////////
# Customer1000
/data/exports/customer1000/finance 192.168.20.1(rw,no_root_squash,sync) 192.168.20.2(rw,sync)
#////////////////////////////////////////////////////////////////////////////////////////////
# Customer1001
/data/exports/customer1001/backup 192.168.30.1(rw,no_root_squash) 192.168.30.1(rw,no_root_squash,sync)
# Reload the NFS server to apply changes within /etc/exports
systemctl reload nfs-server
Mount
# Install NFS cliënt (Ubuntu)
apt install nfs-common

# Install NFS cliënt (RHEL)
yum install nfs-utils

# Mount NFS share located on server 192.168.20.1 on path /data/exports/customer1000/finance, to local server /mnt/nfs/
mount -v -t nfs 192.168.20.1:/data/exports/customer1000/finance /mnt/nfs/

Optimizations

Change these values depending on your usage and the available resources on your server.

# /etc/sysctl.d/nfs-tuning.conf
net.core.rmem_max=1048576
net.core.rmem_default=1048576
net.core.wmem_max=1048576
net.core.wmem_default=1048576
net.ipv4.tcp_rmem=4096 1048576 134217728
net.ipv4.tcp_wmem=4096 1048576 134217728
vm.min_free_kbytes=8388608
# Reload above optimization
sysctl -p /etc/sysctl.d/nfs-tuning.conf


Raise the number of NFS threads

# /etc/sysconfig/nfs

# Number of nfs server processes to be started.
# The default is 8.
#RPCNFSDCOUNT=16
RPCNFSDCOUNT=128


Activate NFSD count on the fly

rpc.nfsd 64

# Check amount of threads
/proc/fs/nfsd/threads

Ceph

Checks

# Display the running Ceph version
ceph -v

# Check the clusters' health and status
ceph -s

# Watch the clusters' health and status in real time
ceph -w

# Show detailed logs relating to cluster health
ceph health detail

# List all Ceph 'containers' and OSDs
ceph orch ls

# List available storage devices
ceph orch device ls

# Show logs for a specific service
ceph orch ls --service_name osd.all-available-devices --format yaml

# Re-check the status of a host
ceph cephadm check-host storage-3

OSDs

# List all pools
ceph osd lspools

# See the status of all OSDs
ceph osd stat

# List all OSDs
ceph osd tree

# List all OSDs and related information in detail
ceph osd df tree

PGs

# List all Placement Groups
ceph pg dump

# Check the status of Ceph PGs
ceph pg stat

Authentication

# List all created clients and their permissions
ceph auth ls

# List permissions for a specific client
ceph auth get client.cinder

Commands

# Enter the Ceph shell (single cluster)
cephadm shell

Installation

Using Cephadm: https://docs.ceph.com/en/quincy/cephadm/install/

Cephadm
# Create a folder for the cephadm tool
mkdir cephadm
cd cephadm/

# Download cephadm (Quincy)
curl --silent --remote-name --location https://github.com/ceph/ceph/raw/quincy/src/cephadm/cephadm
chmod +x cephadm

# Output help
./cephadm -h

# Install cephadm (Quincy) release
./cephadm add-repo --release quincy
./cephadm install

# Check if cephadm is properly installed
which cephadm
Bootstrap
# Bootstrap node and install Ceph
cephadm bootstrap --mon-ip 192.168.100.11

# Check the status of the cluster
cephadm shell -- ceph -s
docker ps


## Optional
# Enter the Ceph shell (single cluster)
cephadm shell

# Exit the Ceph shell
exit

# Install common Ceph packages/tools 
cephadm install ceph-common

# Display the Ceph version
ceph -v
Add additional hosts
# On your bootstrapped node create a key for SSH-access to the other hosts.
ssh-keygen
cat .ssh/id_rsa.pub

# Add the newly generated key to the authorized_keys file for the relevant user, on the other hosts.

# Copy the Ceph clusters' public key to the other nodes
ssh-copy-id -f -i /etc/ceph/ceph.pub root@storage-2
ssh-copy-id -f -i /etc/ceph/ceph.pub root@storage-3

# Add the admin role to the other nodes
ceph orch host add storage-2 10.4.20.2 _admin
ceph orch host add storage-3 10.4.20.3 _admin
OSD creation

If you've installed ceph-osd on your host, this step will fail horribly with errors such as:

-1 bluestore(/var/lib/ceph/osd/ceph-1//block) _read_bdev_label failed to open /var/lib/ceph/osd/ceph-1//block: (13) Permission denied
-1 bdev(0x5571d5f69400 /var/lib/ceph/osd/ceph-1//block) open open got: (13) Permission denied
-1 OSD::mkfs: ObjectStore::mkfs failed with error (13) Permission denied
-1 ESC[0;31m ** ERROR: error creating empty object store in /var/lib/ceph/osd/ceph-0/: (13) Permission deniedESC[0m
 OSD, will rollback changes
# Configure all available storage to be used as OSD storage
ceph orch apply osd --all-available-devices

# Check for OSD problems
watch ceph -s
watch ceph osd tree

Commands

# Enter the Ceph shell for a specific cluster
sudo /usr/sbin/cephadm shell --fsid asdjwqe-asjd324-asdki321-821asd-asd241-asdn1234- -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin2.keyring

# Give node storage-4, which is already a cluster member, the admin tag
ceph orch host label add storage-4 _admin

# Mount a Ceph filesystem with 3 mon hosts, using a secretfile 
# Contents in the secretfile is ONLY the secret / key
mount -t ceph 192.168.0.11,192.168.0.12,192.168.0.13:/shares/mycustomer/asd8asd8-as8d83-df4mjvjdf /mnt/ceph-storage -o name=customershare-28,secretfile=/etc/ceph/customer28-secretfile
Upgrade

Make sure your cluster status is healthy first!

# Upgrade Ceph to a specific version
ceph orch upgrade start --ceph-version 17.2.0

# Check the status of the Ceph upgrade
ceph orch upgrade status

# Stop the Ceph upgrade
ceph orch upgrade stop
RBD-NBD
# List available volumes within the openstackvolumes pool
rbd ls openstackhdd

# List all available snapshots for object volume-asd9p12o3-90b2-1238-1209-as980d7213hs, which resides in pool ghgvolumes
rbd snap ls openstackhdd/volume-asd9p12o3-90b2-1238-1209-as980d7213hs

# Map the volume-object to the local filesystem
rbd-nbd map openstackhdd/volume-asd9p12o3-90b2-1238-1209-as980d7213hs

# Map the volume-object as read-only to the local filesystem
rbd-nbd map --read-only openstackhdd/volume-asd9p12o3-90b2-1238-1209-as980d7213hs

# List currently mapped objects
rbd-nbd list-mapped

# Check what filesystem and partition the device contains
fdisk -l /dev/nbd1

# Mount the device to a local folder
mount /dev/nbd1p1 /mnt/storage

# Unmount the device from the local folder
umount /mnt/storage


# 2 methods to unmap
# Unmap the mapped object
rbd-nbd unmap /dev/nbd2

# Unmap the mapped object
rbd-nbd unmap volume-asd9p12o3-90b2-1238-1209-as980d7213hs

Remove node

# Remove running daemons
ceph orch host drain storage-3

# Remove host from the cluster
ceph orch host rm storage-3

# In storage-3, restart the node and restart
shutdown -r now
Destroy node

Scorched earth
Only execute if you want to annihalate your node and or cluster.

# Kill and destroy OSD 0
ceph osd down 0 && ceph osd destroy 0 --force

# Stop Ceph services
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@crash.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mgr.host-1.xmatqa.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@node-exporter.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn@prometheus.host-1.service
systemctl stop ceph-asd82asd-asd8-as92-a889-po89xc732cmn.target

# Disable Ceph services
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@crash.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mgr.host-1.xmatqa.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@mon.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@node-exporter.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn@prometheus.host-1.service
systemctl disable ceph-asd82asd-asd8-as92-a889-po89xc732cmn.target

# Destroy everything (packages, containers, configuration)
ceph-deploy uninstall host-1
ceph-deploy purge host-1
rm -rf /var/lib/ceph

# Check for failed services
systemctl | grep ceph

# Reset them so they disable properly
systemctl reset-failed ceph-asd82asd-asd8-as92-a889-po89xc732cmn@prometheus.host-1.service

# reboot
shutdown -r now

BTRFS

Using LVM

# Install LVM creation tools depending on your OS
yum install lvm2
apt install lvm2

# Check and note the disk you need
fdisk -l

# Format /dev/vdb as BTRFS
echo -e "n\np\n1\n\n\nt\n8E\np\nw" | fdisk /dev/vdb
 
# Create LVM 
pvcreate /dev/vdb1
vgcreate vdb_vg /dev/vdb1
lvcreate -l 100%FREE  -n btrfs vdb_vg
 
# Check
pvs
vgs
 
# Create the BTRFS filesystem
mkfs.btrfs /dev/vdb_vg/btrfs
 
# Create a folder for the BTRFS mount
mkdir -p /mnt/btrfs1

# Mount the BTRFS filesystem
mount -t btrfs /dev/vdb_vg/btrfs /mnt/btrfs1/
 
# Modify fstab so the filesystem get mounted automatically on boot
cat << 'EOF' >> /etc/fstab
/dev/mapper/vdb_vg-btrfs  /mnt/btrfs1    btrfs     defaults        0 0
EOF