feat(script): Avoid snap name collisions, always append counter to make them unique (#1)
This commit is contained in:
parent
ff963aa844
commit
917a71ced4
@ -148,39 +148,6 @@ function write_pkg_list_oneline () {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function find_max_dataset_name_length () {
|
|
||||||
local longest_op_suffix op_suffix_string
|
|
||||||
longest_op_suffix='0'
|
|
||||||
for op_suffix in "${snap_op_installation_suffix}" "${snap_op_remove_suffix}" "${snap_op_upgrade_suffix}"; do
|
|
||||||
if [[ "${#op_suffix}" -gt "${longest_op_suffix}" ]]; then
|
|
||||||
longest_op_suffix="${#op_suffix}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
op_suffix_string="$(head -c "${longest_op_suffix}" '/dev/zero' | tr '\0' '_')"
|
|
||||||
|
|
||||||
local longest_sev_suffix sev_suffix_string
|
|
||||||
longest_sev_suffix='0'
|
|
||||||
for sev_suffix in "${snaps_trivial_suffix}" "${snaps_important_suffix}"; do
|
|
||||||
if [[ "${#sev_suffix}" -gt "${longest_sev_suffix}" ]]; then
|
|
||||||
longest_sev_suffix="${#sev_suffix}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
sev_suffix_string="$(head -c "${longest_sev_suffix}" '/dev/zero' | tr '\0' '_')"
|
|
||||||
|
|
||||||
local dataset_name_no_pkgs
|
|
||||||
max_dataset_name_length='0'
|
|
||||||
for dataset in "${snappable_datasets[@]}"; do
|
|
||||||
dataset_name_no_pkgs="${dataset}"'@'"${snap_name_prefix}${snap_field_separator}${date_string}${snap_field_separator}"'op:'"${op_suffix_string}${snap_field_separator}"'sev:'"${sev_suffix_string}${snap_field_separator}"'pkgs:'
|
|
||||||
if [[ "${#dataset_name_no_pkgs}" -gt "${max_dataset_name_length}" ]]; then
|
|
||||||
max_dataset_name_length="${#dataset_name_no_pkgs}"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ "${max_dataset_name_length}" -gt "${max_zfs_snapshot_name_length}" ]]; then
|
|
||||||
pprint 'warn' 'Snapshot name would exceed ZFS '"${max_zfs_snapshot_name_length}"' chars limit. Skipping snapshots ...' '0'
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
function trim_single_remaining_package_name () {
|
function trim_single_remaining_package_name () {
|
||||||
local pkg_name
|
local pkg_name
|
||||||
pkg_name="${shorter_pkg_list}"
|
pkg_name="${shorter_pkg_list}"
|
||||||
@ -201,7 +168,7 @@ function trim_single_remaining_package_name () {
|
|||||||
|
|
||||||
function trim_pkg_list_oneline () {
|
function trim_pkg_list_oneline () {
|
||||||
local available_pkg_list_length
|
local available_pkg_list_length
|
||||||
available_pkg_list_length="$((${max_zfs_snapshot_name_length} - ${max_dataset_name_length}))"
|
available_pkg_list_length="${1}"
|
||||||
if [[ "${available_pkg_list_length}" -lt "${pkgs_list_max_length}" ]]; then
|
if [[ "${available_pkg_list_length}" -lt "${pkgs_list_max_length}" ]]; then
|
||||||
# If we have fewer characters available before hitting the
|
# If we have fewer characters available before hitting the
|
||||||
# ZFS internal maximum snapshot name length than the user
|
# ZFS internal maximum snapshot name length than the user
|
||||||
@ -236,76 +203,111 @@ function trim_pkg_list_oneline () {
|
|||||||
trimmed_pkg_list_oneline="${shorter_pkg_list}"
|
trimmed_pkg_list_oneline="${shorter_pkg_list}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function omit_duplicate_snaps () {
|
function test_snap_names_for_validity () {
|
||||||
local existing_snaps
|
local snap_counter max_dataset_name_length trimmed_pkg_list_oneline dataset_name_no_pkgs dataset_name_with_pkgs
|
||||||
local -a unneeded_snaps
|
snap_counter="${1}"
|
||||||
existing_snaps="$(zfs list -t all -oname -H)"
|
max_dataset_name_length='0'
|
||||||
|
for dataset in "${snappable_datasets[@]}"; do
|
||||||
|
# Begin building snapshot name
|
||||||
|
dataset_name_no_pkgs="${dataset}"'@'"${snap_name_prefix}${snap_field_separator}${date_string}"
|
||||||
|
|
||||||
for planned_snap in "${planned_snaps[@]}"; do
|
# Append counter
|
||||||
if grep -Piq -- '^'"${planned_snap}"'$' <<<"${existing_snaps}"; then
|
dataset_name_no_pkgs="${dataset_name_no_pkgs}${snap_field_separator}${snap_counter}"
|
||||||
unneeded_snaps+=("${planned_snap}")
|
|
||||||
else
|
# Append operation, severity and packages fields
|
||||||
needed_snaps+=("${planned_snap}")
|
dataset_name_no_pkgs="${dataset_name_no_pkgs}${snap_field_separator}"'op:'"${conf_op_suffix}${snap_field_separator}"'sev:'"${severity}"
|
||||||
|
|
||||||
|
# Update the longest snapshot name seen so far. We add an automatic
|
||||||
|
# +6 to string length (or more exactly ${#snap_field_separator}+5)
|
||||||
|
# to account for the fact that by default the dataset will end in
|
||||||
|
# the separator string "${snap_field_separator}" plus 'pkgs:' for a
|
||||||
|
# total of 6 additional characters. If these additional characters
|
||||||
|
# cause us to reach or go over the ZFS dataset name length limit
|
||||||
|
# there's no point in attempting to add package names to snapshots.
|
||||||
|
# We calculate as if these additional characters existed and we add
|
||||||
|
# dataset names to our planned_snaps array as if they don't.
|
||||||
|
if [[ "$(( ${#dataset_name_no_pkgs}+${#snap_field_separator}+5 ))" -gt "${max_dataset_name_length}" ]]; then
|
||||||
|
max_dataset_name_length="$(( ${#dataset_name_no_pkgs}+${#snap_field_separator}+5 ))"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
planned_snaps+=("${dataset_name_no_pkgs}")
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ "${#unneeded_snaps[@]}" -gt '0' ]]; then
|
# Abort if this is longer than what ZFS allows
|
||||||
if [[ "${do_dry_run}" == 'true' ]]; then
|
if [[ "${max_dataset_name_length}" -gt "${max_zfs_snapshot_name_length}" ]]; then
|
||||||
pprint 'warn' 'Dry-run, ZFS snapshot skipped (same operation exists at '"${date_string}"'):'
|
pprint 'err' 'Snapshot name would exceed ZFS '"${max_zfs_snapshot_name_length}"' chars limit. Aborting ...' '1'
|
||||||
else
|
fi
|
||||||
pprint 'warn' 'ZFS snapshot skipped (same operation exists at '"${date_string}"'):'
|
|
||||||
|
if [[ "${max_dataset_name_length}" -eq "${max_zfs_snapshot_name_length}" ]]; then
|
||||||
|
for planned_snap in "${planned_snaps[@]}"; do
|
||||||
|
if grep -Piq -- '^'"${planned_snap}"'$' <<<"${existing_snaps}"; then
|
||||||
|
# This snapshot name already exists. Unset array and break.
|
||||||
|
# Try again with next higher counter suffix.
|
||||||
|
unset planned_snaps[@]
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# If planned_snaps array still has members we take the snapshot
|
||||||
|
# names already generated. If not we return without array in which
|
||||||
|
# case this function will run again with the snapshot counter
|
||||||
|
# incremented by one. Maximum length seen across all snapshot names
|
||||||
|
# is exactly the ZFS snapshot character limit. We won't be able to
|
||||||
|
# add packages to snapshot names but they will all fit perfectly.
|
||||||
|
# This is good enough.
|
||||||
|
return
|
||||||
|
else
|
||||||
|
# We have enough room to add package names.
|
||||||
|
local available_pkg_list_length
|
||||||
|
available_pkg_list_length="${pkgs_list_max_length}"
|
||||||
|
if [[ "${max_dataset_name_length}" -gt $(( max_zfs_snapshot_name_length - pkgs_list_max_length )) ]]; then
|
||||||
|
available_pkg_list_length="$(( max_zfs_snapshot_name_length - max_dataset_name_length ))"
|
||||||
|
fi
|
||||||
|
trim_pkg_list_oneline "${available_pkg_list_length}"
|
||||||
|
for planned_snap_id in "${!planned_snaps[@]}"; do
|
||||||
|
planned_snaps["${planned_snap_id}"]="${planned_snaps[${planned_snap_id}]}${snap_field_separator}"'pkgs:'"${trimmed_pkg_list_oneline}"
|
||||||
|
if grep -Piq -- '^'"${planned_snaps[${planned_snap_id}]}"'$' <<<"${existing_snaps}"; then
|
||||||
|
# This snapshot name already exists. Unset array and break.
|
||||||
|
# Try again with next higher counter suffix.
|
||||||
|
unset planned_snaps[@]
|
||||||
|
break
|
||||||
fi
|
fi
|
||||||
for unneeded_snap in "${unneeded_snaps[@]}"; do
|
|
||||||
pprint 'warn' ' '"${unneeded_snap}"
|
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function do_snaps () {
|
function generate_snap_names () {
|
||||||
local snap_name snap_return_code
|
local snap_counter existing_snaps
|
||||||
local -a planned_snaps
|
snap_counter='0'
|
||||||
for snappable_dataset_id in "${!snappable_datasets[@]}"; do
|
existing_snaps="$(zfs list -t all -oname -H)"
|
||||||
snap_name="${snappable_datasets[${snappable_dataset_id}]}"'@'"${snap_name_prefix}${snap_field_separator}${date_string}${snap_field_separator}"'op:'"${conf_op_suffix}${snap_field_separator}"'sev:'"${severity}"
|
until [[ "${#planned_snaps[@]}" -gt '0' ]]; do
|
||||||
# If we have at least one pkg name character to append we do
|
snap_counter="$(( snap_counter+1 ))"
|
||||||
# so now but if we're not even allowed to append a single
|
test_snap_names_for_validity "${snap_counter}"
|
||||||
# character we might as well skip the 'pkgs' field
|
|
||||||
# altogether.
|
|
||||||
if [[ "${pkgs_list_max_length}" -ge '1' ]]; then
|
|
||||||
snap_name="${snap_name}${snap_field_separator}"'pkgs:'"${trimmed_pkg_list_oneline}"
|
|
||||||
fi
|
|
||||||
planned_snaps["${snappable_dataset_id}"]="${snap_name}"
|
|
||||||
done
|
done
|
||||||
local -a needed_snaps
|
}
|
||||||
omit_duplicate_snaps
|
|
||||||
if [[ "${#needed_snaps[@]}" -gt '0' ]]; then
|
function do_snaps () {
|
||||||
|
local snap_return_code
|
||||||
if [[ "${do_dry_run}" == 'true' ]]; then
|
if [[ "${do_dry_run}" == 'true' ]]; then
|
||||||
pprint 'info' 'Dry-run, pretending to atomically do ZFS snapshot:'
|
pprint 'info' 'Dry-run, pretending to atomically do ZFS snapshot:'
|
||||||
for needed_snap in "${needed_snaps[@]}"; do
|
for planned_snap in "${planned_snaps[@]}"; do
|
||||||
pprint 'info' ' '"${needed_snap}"
|
pprint 'info' ' '"${planned_snap}"
|
||||||
done
|
done
|
||||||
else
|
else
|
||||||
zfs snapshot "${needed_snaps[@]}"
|
zfs snapshot "${planned_snaps[@]}"
|
||||||
snap_return_code="${?}"
|
snap_return_code="${?}"
|
||||||
if [[ "${snap_return_code}" -eq '0' ]]; then
|
if [[ "${snap_return_code}" -eq '0' ]]; then
|
||||||
successfully_snapped_datasets=("${snappable_datasets[@]}")
|
successfully_snapped_datasets=("${snappable_datasets[@]}")
|
||||||
pprint 'info' 'ZFS snapshot atomically done:'
|
pprint 'info' 'ZFS snapshot atomically done:'
|
||||||
for needed_snap in "${needed_snaps[@]}"; do
|
for planned_snap in "${planned_snaps[@]}"; do
|
||||||
pprint 'info' ' '"${needed_snap}"
|
pprint 'info' ' '"${planned_snap}"
|
||||||
done
|
done
|
||||||
else
|
else
|
||||||
pprint 'warn' 'ZFS snapshot failed:'
|
pprint 'warn' 'ZFS snapshot failed:'
|
||||||
for needed_snap in "${needed_snaps[@]}"; do
|
for planned_snap in "${planned_snaps[@]}"; do
|
||||||
pprint 'warn' ' '"${needed_snap}"
|
pprint 'warn' ' '"${planned_snap}"
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
if [[ "${do_dry_run}" == 'true' ]]; then
|
|
||||||
pprint 'warn' 'Dry-run, no ZFS snapshot left to do after accounting for identical operations at '"${date_string}"'.'
|
|
||||||
else
|
|
||||||
pprint 'warn' 'No ZFS snapshot left to do after accounting for identical operations at '"${date_string}"'.'
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function get_snaps_in_cur_sev () {
|
function get_snaps_in_cur_sev () {
|
||||||
@ -392,12 +394,10 @@ function main () {
|
|||||||
local unabridged_pkg_list_oneline
|
local unabridged_pkg_list_oneline
|
||||||
write_pkg_list_oneline
|
write_pkg_list_oneline
|
||||||
|
|
||||||
local date_string max_dataset_name_length
|
local date_string
|
||||||
|
local -a planned_snaps
|
||||||
date_string="$($([[ "${snap_timezone}" ]] && printf -- 'export TZ='"${snap_timezone}"); date +"${snap_date_format}")"
|
date_string="$($([[ "${snap_timezone}" ]] && printf -- 'export TZ='"${snap_timezone}"); date +"${snap_date_format}")"
|
||||||
find_max_dataset_name_length
|
generate_snap_names
|
||||||
|
|
||||||
local trimmed_pkg_list_oneline
|
|
||||||
trim_pkg_list_oneline
|
|
||||||
|
|
||||||
local -a successfully_snapped_datasets
|
local -a successfully_snapped_datasets
|
||||||
do_snaps
|
do_snaps
|
||||||
|
Loading…
x
Reference in New Issue
Block a user