Adds microceph client configuration support for rbd_cache #203
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Tests | |
on: | |
- push | |
- pull_request | |
jobs: | |
build-microceph: | |
name: Build microceph snap | |
runs-on: ubuntu-22.04 | |
env: | |
SNAPCRAFT_BUILD_ENVIRONMENT: "lxd" | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 0 | |
- name: Clear FORWARD firewall rules | |
run: tests/scripts/actionutils.sh cleaript | |
- name: Install dependencies | |
run: | | |
tests/scripts/actionutils.sh setup_lxd | |
sudo snap install snapcraft --classic | |
snap list | |
- name: Build snaps | |
run: snapcraft | |
- name: Upload snap artifact | |
if: always() | |
uses: actions/upload-artifact@v3 | |
with: | |
name: snaps | |
path: "*.snap" | |
retention-days: 5 | |
unit-tests: | |
name: Run Unit tests | |
runs-on: ubuntu-22.04 | |
steps: | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 0 | |
- name: Install dependencies | |
run: | | |
sudo add-apt-repository ppa:dqlite/dev | |
sudo apt-get update | |
sudo apt-get install -y libdqlite-dev golang-go | |
- name: Run unit-tests | |
run: | | |
cd microceph | |
make check-unit | |
single-system-tests: | |
name: Single node with encryption | |
runs-on: ubuntu-22.04 | |
needs: build-microceph | |
steps: | |
- name: Download snap | |
uses: actions/download-artifact@v3 | |
with: | |
name: snaps | |
path: /home/runner | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 0 | |
- name: Free disk | |
run: tests/scripts/actionutils.sh free_runner_disk | |
- name: Install and setup | |
run: | | |
tests/scripts/actionutils.sh install_microceph | |
set -uex | |
# Verify metadata.yaml | |
meta=/var/snap/microceph/current/conf/metadata.yaml | |
cat $meta | |
grep -q ceph-version $meta | |
# Verify health and auto crush rule | |
sudo microceph.ceph health | grep -q "OSD count 0 < osd_pool_default_size 3" | |
sudo microceph.ceph osd crush rule ls | grep -F microceph_auto_osd | |
- name: Add OSD with failure | |
run: | | |
set -eux | |
loop_file="$(sudo mktemp -p /mnt XXXX.img)" | |
sudo truncate -s 1G "${loop_file}" | |
loop_dev="$(sudo losetup --show -f "${loop_file}")" | |
minor="${loop_dev##/dev/loop}" | |
sudo mknod -m 0660 "/dev/sdi21" b 7 "${minor}" | |
set +e | |
sudo microceph disk add --wipe "/dev/sdi21" --encrypt || rc="$?" | |
if [[ $rc -eq 0 ]] ; then echo "FDE should fail without dmcrypt: $rc"; exit 1; fi | |
- name: Add OSDs | |
run: tests/scripts/actionutils.sh add_encrypted_osds | |
- name: Enable RGW | |
run: tests/scripts/actionutils.sh enable_rgw | |
- name: Run system tests | |
run: | | |
set -eux | |
# Show ceph's status | |
sudo microceph.ceph status | |
# Ceph status expectations for a single node cluster | |
test_single() { | |
local status="$1" | |
( echo "$status" | grep -qF "mon: 1 daemons" ) || { echo fail ; return ; } | |
( echo "$status" | grep -qE "mgr: .*active, " )|| { echo fail ; return ; } | |
( echo "$status" | grep -qF "osd: 3 osds" ) || { echo fail ; return ; } | |
( echo "$status" | grep -qF "rgw: 1 daemon" ) || { echo fail ; return ; } | |
echo ok | |
} | |
# Confirm ceph is healthy and services started | |
res=$( test_single "$( sudo microceph.ceph status )" ) | |
[ $res = "ok" ] || { echo "single node status fails" ; exit 1 ; } | |
# Check health after restart | |
sudo snap stop microceph | |
sudo snap start microceph | |
for i in $(seq 1 16); do | |
status=$( sudo microceph.ceph status ) | |
echo "$status" | |
res=$( test_single "$status" ) | |
if [ $res = "ok" ] ; then | |
echo "Single tests pass" | |
break | |
else | |
echo "Single tests fail, retry $i/16" | |
sleep 15 | |
fi | |
done | |
sleep 1 | |
pgrep ceph-osd || { echo "No ceph-osd process found" ; exit 1; } | |
- name: Exercise RGW | |
run: | | |
set -eux | |
sudo microceph.ceph status | |
sudo systemctl status snap.microceph.rgw | |
sudo microceph.radosgw-admin user create --uid=test --display-name=test | |
sudo microceph.radosgw-admin key create --uid=test --key-type=s3 --access-key fooAccessKey --secret-key fooSecretKey | |
sudo apt-get -qq install s3cmd | |
echo hello-radosgw > ~/test.txt | |
s3cmd --host localhost --host-bucket="localhost/%(bucket)" --access_key=fooAccessKey --secret_key=fooSecretKey --no-ssl mb s3://testbucket | |
s3cmd --host localhost --host-bucket="localhost/%(bucket)" --access_key=fooAccessKey --secret_key=fooSecretKey --no-ssl put -P ~/test.txt s3://testbucket | |
curl -s http://localhost/testbucket/test.txt | grep -F hello-radosgw | |
- name: Test Cluster Config | |
run: | | |
set -eux | |
cip=$(ip -4 -j route | jq -r '.[] | select(.dst | contains("default")) | .prefsrc' | tr -d '[:space:]') | |
# pre config set timestamp for service age | |
ts=$(sudo systemctl show --property ActiveEnterTimestampMonotonic snap.microceph.osd.service | cut -d= -f2) | |
# set config | |
sudo microceph cluster config set cluster_network $cip/8 --wait | |
# post config set timestamp for service age | |
ts2=$(sudo systemctl show --property ActiveEnterTimestampMonotonic snap.microceph.osd.service | cut -d= -f2) | |
# Check config output | |
output=$(sudo microceph cluster config get cluster_network | grep -cim1 'cluster_network') | |
if [[ $output -lt 1 ]] ; then echo "config check failed: $output"; exit 1; fi | |
# Check service restarted | |
if [ $ts2 -lt $ts ]; then echo "config check failed: TS1: $ts2 TS2: $ts3"; exit 1; fi | |
# reset config | |
sudo microceph cluster config reset cluster_network --wait | |
# post config reset timestamp for service age | |
ts3=$(sudo systemctl show --property ActiveEnterTimestampMonotonic snap.microceph.osd.service | cut -d= -f2) | |
# Check service restarted | |
if [ $ts3 -lt $ts2 ]; then echo "config check failed: TS2: $ts2 TS3: $ts3"; exit 1; fi | |
multi-node-tests: | |
name: Multi node testing | |
runs-on: ubuntu-22.04 | |
needs: build-microceph | |
steps: | |
- name: Download snap | |
uses: actions/download-artifact@v3 | |
with: | |
name: snaps | |
path: /home/runner | |
- name: Checkout code | |
uses: actions/checkout@v3 | |
with: | |
fetch-depth: 0 | |
- name: Clear FORWARD firewall rules | |
run: tests/scripts/actionutils.sh cleaript | |
- name: Free disk | |
run: tests/scripts/actionutils.sh free_runner_disk | |
- name: Install dependencies | |
run: tests/scripts/actionutils.sh setup_lxd | |
- name: Create containers with loopback devices | |
run: tests/scripts/actionutils.sh create_containers | |
- name: Install local microceph snap | |
run: tests/scripts/actionutils.sh install_multinode | |
- name: Bootstrap | |
run: tests/scripts/actionutils.sh bootstrap_head | |
- name: Setup cluster | |
run: tests/scripts/actionutils.sh cluster_nodes | |
- name: Add 2 OSDs | |
run: | | |
for c in node-wrk1 node-wrk2 ; do | |
tests/scripts/actionutils.sh add_osd_to_node $c | |
done | |
lxc exec node-head -- sh -c "microceph.ceph -s" | |
- name: Test failure domain scale up | |
run: | | |
set -uex | |
# We still have failure domain OSD | |
lxc exec node-head -- sh -c "sudo microceph.ceph config get mon osd_pool_default_crush_rule" | fgrep -x 1 | |
# Add a 3rd OSD, should switch to host failure domain | |
tests/scripts/actionutils.sh add_osd_to_node node-head | |
for i in $(seq 1 8); do | |
res=$( ( lxc exec node-head -- sh -c 'sudo microceph.ceph -s | grep -F osd: | sed -E "s/.* ([[:digit:]]*) in .*/\1/"' ) || true ) | |
if [[ $res -gt 2 ]] ; then | |
echo "Found >2 OSDs" | |
break | |
else | |
echo -n '.' | |
sleep 2 | |
fi | |
done | |
sleep 1 | |
lxc exec node-head -- sh -c "sudo microceph.ceph -s" | |
# Now default to host rule | |
lxc exec node-head -- sh -c "sudo microceph.ceph config get mon osd_pool_default_crush_rule" | fgrep -x 2 | |
- name: Test 3 osds present | |
run: | | |
set -uex | |
lxc exec node-head -- sh -c "microceph.ceph -s" | egrep "osd: 3 osds: 3 up.*3 in" | |
- name: Test crush rules | |
run: | | |
set -uex | |
lxc exec node-head -- sh -c "microceph.ceph osd crush rule ls" | grep -F microceph_auto_host | |
lxc exec node-head -- sh -c "microceph.ceph osd pool ls detail" | grep -F "crush_rule 2" | |
- name: Add another OSD | |
run: | | |
tests/scripts/actionutils.sh add_osd_to_node node-wrk3 | |
for i in $(seq 1 8); do | |
res=$( ( lxc exec node-head -- sh -c 'sudo microceph.ceph -s | grep -F osd: | sed -E "s/.* ([[:digit:]]*) in .*/\1/"' ) || true ) | |
if [[ $res -gt 3 ]] ; then | |
echo "Found >3 OSDs" | |
break | |
else | |
echo -n '.' | |
sleep 2 | |
fi | |
done | |
- name: Remove OSD again | |
run: | | |
set -uex | |
lxc exec node-wrk3 -- sh -c "microceph disk remove 3" | |
lxc exec node-head -- sh -c "microceph.ceph -s" | egrep "osd: 3 osds: 3 up.*3 in" | |
- name: Test migrate services | |
run: | | |
set -uex | |
lxc exec node-head -- sh -c "microceph cluster migrate node-wrk1 node-wrk3" | |
sleep 2 | |
lxc exec node-head -- sh -c "microceph status" | grep -F -A 1 node-wrk1 | grep -E "^ Services: osd$" | |
lxc exec node-head -- sh -c "microceph status" | grep -F -A 1 node-wrk3 | grep -E "^ Services: mds, mgr, mon$" |