Skip to content

Commit

Permalink
[ceph-osd] Update directory-based OSD deployment for image changes
Browse files Browse the repository at this point in the history
Directory-based OSDs are failing to deploy because 'python' has
been replaced with 'python3' in the image. This change updates the
python commands to use python3 instead.

There is also a dependency on forego, which has been removed from
the image. This change also modifies the deployment so that it
doesn't depend on forego.

Ownership of the OSD keyring file has also been changed so that it
is owned by the 'ceph' user, and the ceph-osd process now uses
--setuser and --setgroup to run as the same user.

Change-Id: If825df283bca0b9f54406084ac4b8f958a69eab7
  • Loading branch information
st053q committed Mar 29, 2021
1 parent 1f52a1c commit 131ea21
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 13 deletions.
2 changes: 1 addition & 1 deletion ceph-osd/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,6 @@ apiVersion: v1
appVersion: v1.0.0
description: OpenStack-Helm Ceph OSD
name: ceph-osd
version: 0.1.19
version: 0.1.20
home: https://github.com/ceph/ceph
...
17 changes: 9 additions & 8 deletions ceph-osd/templates/bin/osd/_directory.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -56,22 +56,17 @@ if [[ -n "$(find /var/lib/ceph/osd -type d -empty ! -name "lost+found")" ]]; th
fi
# create the folder and own it
mkdir -p "${OSD_PATH}"
chown "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
echo "created folder ${OSD_PATH}"
# write the secret to the osd keyring file
ceph-authtool --create-keyring ${OSD_PATH%/}/keyring --name osd.${OSD_ID} --add-key ${OSD_SECRET}
chown -R "${CHOWN_OPT[@]}" ceph. "${OSD_PATH}"
OSD_KEYRING="${OSD_PATH%/}/keyring"
# init data directory
ceph-osd -i ${OSD_ID} --mkfs --osd-uuid ${UUID} --mkjournal --osd-journal ${OSD_JOURNAL} --setuser ceph --setgroup ceph
# add the osd to the crush map
crush_location
fi

# create the directory and an empty Procfile
mkdir -p /etc/forego/"${CLUSTER}"
echo "" > /etc/forego/"${CLUSTER}"/Procfile


for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
# NOTE(gagehugo): Writing the OSD_ID to tmp for logging
echo "${OSD_ID}" > /tmp/osd-id
Expand Down Expand Up @@ -99,7 +94,13 @@ for OSD_ID in $(ls /var/lib/ceph/osd | sed 's/.*-//'); do
fi

crush_location
echo "${CLUSTER}-${OSD_ID}: /usr/bin/ceph-osd --cluster ${CLUSTER} -f -i ${OSD_ID} --osd-journal ${OSD_JOURNAL} -k ${OSD_KEYRING}" | tee -a /etc/forego/"${CLUSTER}"/Procfile
done

exec /usr/local/bin/forego start -f /etc/forego/"${CLUSTER}"/Procfile
exec /usr/bin/ceph-osd \
--cluster ${CLUSTER} \
-f \
-i ${OSD_ID} \
--osd-journal ${OSD_JOURNAL} \
-k ${OSD_KEYRING}
--setuser ceph \
--setgroup disk $! > /run/ceph-osd.pid
8 changes: 4 additions & 4 deletions ceph-osd/templates/bin/osd/ceph-disk/_common.sh.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,11 @@ export PS4='+${BASH_SOURCE:+$(basename ${BASH_SOURCE}):${LINENO}:}${FUNCNAME:+${
: "${OSD_JOURNAL_SIZE:=$(awk '/^osd_journal_size/{print $3}' ${CEPH_CONF}.template)}"
: "${OSD_WEIGHT:=1.0}"

eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
eval CRUSH_FAILURE_DOMAIN_TYPE=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain"]))')
eval CRUSH_FAILURE_DOMAIN_NAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_name"]))')
eval CRUSH_FAILURE_DOMAIN_BY_HOSTNAME=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["failure_domain_by_hostname"]))')
eval CRUSH_FAILURE_DOMAIN_FROM_HOSTNAME_MAP=$(cat /etc/ceph/storage.json | jq '.failure_domain_by_hostname_map."'$HOSTNAME'"')
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')
eval DEVICE_CLASS=$(cat /etc/ceph/storage.json | python3 -c 'import sys, json; data = json.load(sys.stdin); print(json.dumps(data["device_class"]))')

if [[ $(ceph -v | egrep "octopus|nautilus|mimic|luminous" > /dev/null 2>&1; echo $?) -ne 0 ]]; then
echo "ERROR- need Luminous/Mimic/Nautilus/Octopus release"
Expand Down
1 change: 1 addition & 0 deletions releasenotes/notes/ceph-osd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,5 @@ ceph-osd:
- 0.1.17 Fix a bug with DB orphan volume removal
- 0.1.18 Uplift from Nautilus to Octopus release
- 0.1.19 Update rbac api version
- 0.1.20 Update directory-based OSD deployment for image changes
...

0 comments on commit 131ea21

Please sign in to comment.