2650 lines
81 KiB
Bash
Executable File
2650 lines
81 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# shellcheck disable=SC2154
|
|
|
|
GIT_URL="https://git.autonomic.zone/coop-cloud/"
|
|
ABRA_APPS_URL="https://apps.coopcloud.tech"
|
|
ABRA_DIR="${ABRA_DIR:-$HOME/.abra}"
|
|
ABRA_VERSION="9.0.0"
|
|
ABRA_BACKUP_DIR="${ABRA_BACKUP_DIR:-$ABRA_DIR/backups}"
|
|
ABRA_VENDOR_DIR="$ABRA_DIR/vendor"
|
|
ABRA_APPS_JSON="${ABRA_DIR}/apps.json"
|
|
|
|
#######################################
|
|
# Global help
|
|
#######################################
|
|
|
|
DOC="
|
|
The Co-op Cloud utility belt 🎩🐇
|
|
|
|
____ ____ _ _
|
|
/ ___|___ ___ _ __ / ___| | ___ _ _ __| |
|
|
| | / _ \ _____ / _ \| '_ \ | | | |/ _ \| | | |/ _' |
|
|
| |__| (_) |_____| (_) | |_) | | |___| | (_) | |_| | (_| |
|
|
\____\___/ \___/| .__/ \____|_|\___/ \__,_|\__,_|
|
|
|_|
|
|
|
|
Usage:
|
|
abra [options] app (list|ls) [--status] [--server=<server>] [--type=<type>]
|
|
abra [options] app new [--server=<server>] [--domain=<domain>] [--app-name=<app_name>] [--pass] [--secrets] <type>
|
|
abra [options] app <app> backup (<service>|--all)
|
|
abra [options] app <app> deploy [--update] [--force] [--fast] [--chaos] [<version>]
|
|
abra [options] app <app> check
|
|
abra [options] app <app> version
|
|
abra [options] app <app> config
|
|
abra [options] app <app> cp <src> <dst>
|
|
abra [options] app <app> logs [<service>]
|
|
abra [options] app <app> ps
|
|
abra [options] app <app> restore (<service>|--all)
|
|
abra [options] app <app> (rm|delete) [--volumes] [--secrets]
|
|
abra [options] app <app> restore <service> [<backup file>]
|
|
abra [options] app <app> run [--no-tty] [--user=<user>] <service> <args>...
|
|
abra [options] app <app> rollback [<version>]
|
|
abra [options] app <app> secret generate (<secret> <version>|--all) [<cmd>] [--pass]
|
|
abra [options] app <app> secret insert <secret> <version> <data> [--pass]
|
|
abra [options] app <app> secret (rm|delete) (<secret>|--all) [--pass]
|
|
abra [options] app <app> undeploy
|
|
abra [options] app <app> volume ls
|
|
abra [options] app <app> volume (rm|delete) (<volume>|--all)
|
|
abra [options] app <app> <command> [<args>...]
|
|
abra [options] recipe ls
|
|
abra [options] recipe create <recipe>
|
|
abra [options] recipe <recipe> release [--force] [--bump]
|
|
abra [options] recipe <recipe> versions
|
|
abra [options] server add <host> [<user>] [<port>]
|
|
abra [options] server new <provider> -- <args>
|
|
abra [options] server (list|ls)
|
|
abra [options] server <host> rm
|
|
abra [options] server <host> init
|
|
abra [options] server <host> apps [--status]
|
|
abra [options] upgrade [--dev]
|
|
abra [options] version
|
|
abra [options] doctor
|
|
abra [options] help [<subcommands>...]
|
|
abra [options]
|
|
|
|
Options:
|
|
-e, --env=<path> Environment variables to load
|
|
-h, --help Show this message and exit
|
|
-s, --stack=<stack> Name of the target stack
|
|
-C, --skip-check Don't verify app variables
|
|
-U, --skip-update Don't pull latest app definitions
|
|
-v, --verbose Show INFO messages
|
|
-d, --debug Show DEBUG messages
|
|
-b, --branch=<branch> Git branch to use while cloning app repos
|
|
-n, --no-prompt Don't prompt for input and run non-interactively
|
|
|
|
See 'abra help <subcommands>...' to read about a specific subcommand.
|
|
"
|
|
|
|
# docopt parser below, refresh this parser with `docopt.sh abra`
|
|
# shellcheck disable=2016,1075,2154
|
|
docopt() { parse() { if ${DOCOPT_DOC_CHECK:-true}; then local doc_hash
|
|
if doc_hash=$(printf "%s" "$DOC" | (sha256sum 2>/dev/null || shasum -a 256)); then
|
|
if [[ ${doc_hash:0:5} != "$digest" ]]; then
|
|
stderr "The current usage doc (${doc_hash:0:5}) does not match \
|
|
what the parser was generated with (${digest})
|
|
Run \`docopt.sh\` to refresh the parser."; _return 70; fi; fi; fi
|
|
local root_idx=$1; shift; argv=("$@"); parsed_params=(); parsed_values=()
|
|
left=(); testdepth=0; local arg; while [[ ${#argv[@]} -gt 0 ]]; do
|
|
if [[ ${argv[0]} = "--" ]]; then for arg in "${argv[@]}"; do
|
|
parsed_params+=('a'); parsed_values+=("$arg"); done; break
|
|
elif [[ ${argv[0]} = --* ]]; then parse_long
|
|
elif [[ ${argv[0]} = -* && ${argv[0]} != "-" ]]; then parse_shorts
|
|
elif ${DOCOPT_OPTIONS_FIRST:-false}; then for arg in "${argv[@]}"; do
|
|
parsed_params+=('a'); parsed_values+=("$arg"); done; break; else
|
|
parsed_params+=('a'); parsed_values+=("${argv[0]}"); argv=("${argv[@]:1}"); fi
|
|
done; local idx; if ${DOCOPT_ADD_HELP:-true}; then
|
|
for idx in "${parsed_params[@]}"; do [[ $idx = 'a' ]] && continue
|
|
if [[ ${shorts[$idx]} = "-h" || ${longs[$idx]} = "--help" ]]; then
|
|
stdout "$trimmed_doc"; _return 0; fi; done; fi
|
|
if [[ ${DOCOPT_PROGRAM_VERSION:-false} != 'false' ]]; then
|
|
for idx in "${parsed_params[@]}"; do [[ $idx = 'a' ]] && continue
|
|
if [[ ${longs[$idx]} = "--version" ]]; then stdout "$DOCOPT_PROGRAM_VERSION"
|
|
_return 0; fi; done; fi; local i=0; while [[ $i -lt ${#parsed_params[@]} ]]; do
|
|
left+=("$i"); ((i++)) || true; done
|
|
if ! required "$root_idx" || [ ${#left[@]} -gt 0 ]; then error; fi; return 0; }
|
|
parse_shorts() { local token=${argv[0]}; local value; argv=("${argv[@]:1}")
|
|
[[ $token = -* && $token != --* ]] || _return 88; local remaining=${token#-}
|
|
while [[ -n $remaining ]]; do local short="-${remaining:0:1}"
|
|
remaining="${remaining:1}"; local i=0; local similar=(); local match=false
|
|
for o in "${shorts[@]}"; do if [[ $o = "$short" ]]; then similar+=("$short")
|
|
[[ $match = false ]] && match=$i; fi; ((i++)) || true; done
|
|
if [[ ${#similar[@]} -gt 1 ]]; then
|
|
error "${short} is specified ambiguously ${#similar[@]} times"
|
|
elif [[ ${#similar[@]} -lt 1 ]]; then match=${#shorts[@]}; value=true
|
|
shorts+=("$short"); longs+=(''); argcounts+=(0); else value=false
|
|
if [[ ${argcounts[$match]} -ne 0 ]]; then if [[ $remaining = '' ]]; then
|
|
if [[ ${#argv[@]} -eq 0 || ${argv[0]} = '--' ]]; then
|
|
error "${short} requires argument"; fi; value=${argv[0]}; argv=("${argv[@]:1}")
|
|
else value=$remaining; remaining=''; fi; fi; if [[ $value = false ]]; then
|
|
value=true; fi; fi; parsed_params+=("$match"); parsed_values+=("$value"); done
|
|
}; parse_long() { local token=${argv[0]}; local long=${token%%=*}
|
|
local value=${token#*=}; local argcount; argv=("${argv[@]:1}")
|
|
[[ $token = --* ]] || _return 88; if [[ $token = *=* ]]; then eq='='; else eq=''
|
|
value=false; fi; local i=0; local similar=(); local match=false
|
|
for o in "${longs[@]}"; do if [[ $o = "$long" ]]; then similar+=("$long")
|
|
[[ $match = false ]] && match=$i; fi; ((i++)) || true; done
|
|
if [[ $match = false ]]; then i=0; for o in "${longs[@]}"; do
|
|
if [[ $o = $long* ]]; then similar+=("$long"); [[ $match = false ]] && match=$i
|
|
fi; ((i++)) || true; done; fi; if [[ ${#similar[@]} -gt 1 ]]; then
|
|
error "${long} is not a unique prefix: ${similar[*]}?"
|
|
elif [[ ${#similar[@]} -lt 1 ]]; then
|
|
[[ $eq = '=' ]] && argcount=1 || argcount=0; match=${#shorts[@]}
|
|
[[ $argcount -eq 0 ]] && value=true; shorts+=(''); longs+=("$long")
|
|
argcounts+=("$argcount"); else if [[ ${argcounts[$match]} -eq 0 ]]; then
|
|
if [[ $value != false ]]; then
|
|
error "${longs[$match]} must not have an argument"; fi
|
|
elif [[ $value = false ]]; then
|
|
if [[ ${#argv[@]} -eq 0 || ${argv[0]} = '--' ]]; then
|
|
error "${long} requires argument"; fi; value=${argv[0]}; argv=("${argv[@]:1}")
|
|
fi; if [[ $value = false ]]; then value=true; fi; fi; parsed_params+=("$match")
|
|
parsed_values+=("$value"); }; required() { local initial_left=("${left[@]}")
|
|
local node_idx; ((testdepth++)) || true; for node_idx in "$@"; do
|
|
if ! "node_$node_idx"; then left=("${initial_left[@]}"); ((testdepth--)) || true
|
|
return 1; fi; done; if [[ $((--testdepth)) -eq 0 ]]; then
|
|
left=("${initial_left[@]}"); for node_idx in "$@"; do "node_$node_idx"; done; fi
|
|
return 0; }; either() { local initial_left=("${left[@]}"); local best_match_idx
|
|
local match_count; local node_idx; ((testdepth++)) || true
|
|
for node_idx in "$@"; do if "node_$node_idx"; then
|
|
if [[ -z $match_count || ${#left[@]} -lt $match_count ]]; then
|
|
best_match_idx=$node_idx; match_count=${#left[@]}; fi; fi
|
|
left=("${initial_left[@]}"); done; ((testdepth--)) || true
|
|
if [[ -n $best_match_idx ]]; then "node_$best_match_idx"; return 0; fi
|
|
left=("${initial_left[@]}"); return 1; }; optional() { local node_idx
|
|
for node_idx in "$@"; do "node_$node_idx"; done; return 0; }; oneormore() {
|
|
local i=0; local prev=${#left[@]}; while "node_$1"; do ((i++)) || true
|
|
[[ $prev -eq ${#left[@]} ]] && break; prev=${#left[@]}; done
|
|
if [[ $i -ge 1 ]]; then return 0; fi; return 1; }; _command() { local i
|
|
local name=${2:-$1}; for i in "${!left[@]}"; do local l=${left[$i]}
|
|
if [[ ${parsed_params[$l]} = 'a' ]]; then
|
|
if [[ ${parsed_values[$l]} != "$name" ]]; then return 1; fi
|
|
left=("${left[@]:0:$i}" "${left[@]:((i+1))}")
|
|
[[ $testdepth -gt 0 ]] && return 0; if [[ $3 = true ]]; then
|
|
eval "((var_$1++)) || true"; else eval "var_$1=true"; fi; return 0; fi; done
|
|
return 1; }; switch() { local i; for i in "${!left[@]}"; do local l=${left[$i]}
|
|
if [[ ${parsed_params[$l]} = "$2" ]]; then
|
|
left=("${left[@]:0:$i}" "${left[@]:((i+1))}")
|
|
[[ $testdepth -gt 0 ]] && return 0; if [[ $3 = true ]]; then
|
|
eval "((var_$1++))" || true; else eval "var_$1=true"; fi; return 0; fi; done
|
|
return 1; }; value() { local i; for i in "${!left[@]}"; do local l=${left[$i]}
|
|
if [[ ${parsed_params[$l]} = "$2" ]]; then
|
|
left=("${left[@]:0:$i}" "${left[@]:((i+1))}")
|
|
[[ $testdepth -gt 0 ]] && return 0; local value
|
|
value=$(printf -- "%q" "${parsed_values[$l]}"); if [[ $3 = true ]]; then
|
|
eval "var_$1+=($value)"; else eval "var_$1=$value"; fi; return 0; fi; done
|
|
return 1; }; stdout() { printf -- "cat <<'EOM'\n%s\nEOM\n" "$1"; }; stderr() {
|
|
printf -- "cat <<'EOM' >&2\n%s\nEOM\n" "$1"; }; error() {
|
|
[[ -n $1 ]] && stderr "$1"; stderr "$usage"; _return 1; }; _return() {
|
|
printf -- "exit %d\n" "$1"; exit "$1"; }; set -e; trimmed_doc=${DOC:1:2779}
|
|
usage=${DOC:368:1842}; digest=7cc59
|
|
shorts=(-s -U -C -h -d -v -b -n -e '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '')
|
|
longs=(--stack --skip-update --skip-check --help --debug --verbose --branch --no-prompt --env --status --server --type --domain --app-name --pass --secrets --all --update --force --fast --chaos --volumes --no-tty --user --bump --dev)
|
|
argcounts=(1 0 0 0 0 0 1 0 1 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0); node_0(){
|
|
value __stack 0; }; node_1(){ switch __skip_update 1; }; node_2(){
|
|
switch __skip_check 2; }; node_3(){ switch __help 3; }; node_4(){
|
|
switch __debug 4; }; node_5(){ switch __verbose 5; }; node_6(){ value __branch 6
|
|
}; node_7(){ switch __no_prompt 7; }; node_8(){ value __env 8; }; node_9(){
|
|
switch __status 9; }; node_10(){ value __server 10; }; node_11(){
|
|
value __type 11; }; node_12(){ value __domain 12; }; node_13(){
|
|
value __app_name 13; }; node_14(){ switch __pass 14; }; node_15(){
|
|
switch __secrets 15; }; node_16(){ switch __all 16; }; node_17(){
|
|
switch __update 17; }; node_18(){ switch __force 18; }; node_19(){
|
|
switch __fast 19; }; node_20(){ switch __chaos 20; }; node_21(){
|
|
switch __volumes 21; }; node_22(){ switch __no_tty 22; }; node_23(){
|
|
value __user 23; }; node_24(){ switch __bump 24; }; node_25(){ switch __dev 25
|
|
}; node_26(){ value _type_ a; }; node_27(){ value _app_ a; }; node_28(){
|
|
value _service_ a; }; node_29(){ value _version_ a; }; node_30(){ value _src_ a
|
|
}; node_31(){ value _dst_ a; }; node_32(){ value _backup_file_ a; }; node_33(){
|
|
value _args_ a true; }; node_34(){ value _secret_ a; }; node_35(){ value _cmd_ a
|
|
}; node_36(){ value _data_ a; }; node_37(){ value _volume_ a; }; node_38(){
|
|
value _command_ a; }; node_39(){ value _recipe_ a; }; node_40(){ value _host_ a
|
|
}; node_41(){ value _user_ a; }; node_42(){ value _port_ a; }; node_43(){
|
|
value _provider_ a; }; node_44(){ value _subcommands_ a true; }; node_45(){
|
|
_command app; }; node_46(){ _command list; }; node_47(){ _command ls; }
|
|
node_48(){ _command new; }; node_49(){ _command backup; }; node_50(){
|
|
_command deploy; }; node_51(){ _command check; }; node_52(){ _command version; }
|
|
node_53(){ _command config; }; node_54(){ _command cp; }; node_55(){
|
|
_command logs; }; node_56(){ _command ps; }; node_57(){ _command restore; }
|
|
node_58(){ _command rm; }; node_59(){ _command delete; }; node_60(){
|
|
_command run; }; node_61(){ _command rollback; }; node_62(){ _command secret; }
|
|
node_63(){ _command generate; }; node_64(){ _command insert; }; node_65(){
|
|
_command undeploy; }; node_66(){ _command volume; }; node_67(){ _command recipe
|
|
}; node_68(){ _command create; }; node_69(){ _command release; }; node_70(){
|
|
_command versions; }; node_71(){ _command server; }; node_72(){ _command add; }
|
|
node_73(){ _command __ --; }; node_74(){ _command init; }; node_75(){
|
|
_command apps; }; node_76(){ _command upgrade; }; node_77(){ _command doctor; }
|
|
node_78(){ _command help; }; node_79(){ optional 0 1 2 3 4 5 6 7 8; }
|
|
node_80(){ optional 79; }; node_81(){ either 46 47; }; node_82(){ required 81; }
|
|
node_83(){ optional 9; }; node_84(){ optional 10; }; node_85(){ optional 11; }
|
|
node_86(){ required 80 45 82 83 84 85; }; node_87(){ optional 12; }; node_88(){
|
|
optional 13; }; node_89(){ optional 14; }; node_90(){ optional 15; }; node_91(){
|
|
required 80 45 48 84 87 88 89 90 26; }; node_92(){ either 28 16; }; node_93(){
|
|
required 92; }; node_94(){ required 80 45 27 49 93; }; node_95(){ optional 17; }
|
|
node_96(){ optional 18; }; node_97(){ optional 19; }; node_98(){ optional 20; }
|
|
node_99(){ optional 29; }; node_100(){ required 80 45 27 50 95 96 97 98 99; }
|
|
node_101(){ required 80 45 27 51; }; node_102(){ required 80 45 27 52; }
|
|
node_103(){ required 80 45 27 53; }; node_104(){ required 80 45 27 54 30 31; }
|
|
node_105(){ optional 28; }; node_106(){ required 80 45 27 55 105; }; node_107(){
|
|
required 80 45 27 56; }; node_108(){ required 80 45 27 57 93; }; node_109(){
|
|
either 58 59; }; node_110(){ required 109; }; node_111(){ optional 21; }
|
|
node_112(){ required 80 45 27 110 111 90; }; node_113(){ optional 32; }
|
|
node_114(){ required 80 45 27 57 28 113; }; node_115(){ optional 22; }
|
|
node_116(){ optional 23; }; node_117(){ oneormore 33; }; node_118(){
|
|
required 80 45 27 60 115 116 28 117; }; node_119(){ required 80 45 27 61 99; }
|
|
node_120(){ required 34 29; }; node_121(){ either 120 16; }; node_122(){
|
|
required 121; }; node_123(){ optional 35; }; node_124(){
|
|
required 80 45 27 62 63 122 123 89; }; node_125(){
|
|
required 80 45 27 62 64 34 29 36 89; }; node_126(){ either 34 16; }; node_127(){
|
|
required 126; }; node_128(){ required 80 45 27 62 110 127 89; }; node_129(){
|
|
required 80 45 27 65; }; node_130(){ required 80 45 27 66 47; }; node_131(){
|
|
either 37 16; }; node_132(){ required 131; }; node_133(){
|
|
required 80 45 27 66 110 132; }; node_134(){ optional 117; }; node_135(){
|
|
required 80 45 27 38 134; }; node_136(){ required 80 67 47; }; node_137(){
|
|
required 80 67 68 39; }; node_138(){ optional 24; }; node_139(){
|
|
required 80 67 39 69 96 138; }; node_140(){ required 80 67 39 70; }; node_141(){
|
|
optional 41; }; node_142(){ optional 42; }; node_143(){
|
|
required 80 71 72 40 141 142; }; node_144(){ required 80 71 48 43 73 33; }
|
|
node_145(){ required 80 71 82; }; node_146(){ required 80 71 40 58; }
|
|
node_147(){ required 80 71 40 74; }; node_148(){ required 80 71 40 75 83; }
|
|
node_149(){ optional 25; }; node_150(){ required 80 76 149; }; node_151(){
|
|
required 80 52; }; node_152(){ required 80 77; }; node_153(){ oneormore 44; }
|
|
node_154(){ optional 153; }; node_155(){ required 80 78 154; }; node_156(){
|
|
required 80; }; node_157(){
|
|
either 86 91 94 100 101 102 103 104 106 107 108 112 114 118 119 124 125 128 129 130 133 135 136 137 139 140 143 144 145 146 147 148 150 151 152 155 156
|
|
}; node_158(){ required 157; }; cat <<<' docopt_exit() {
|
|
[[ -n $1 ]] && printf "%s\n" "$1" >&2; printf "%s\n" "${DOC:368:1842}" >&2
|
|
exit 1; }'; unset var___stack var___skip_update var___skip_check var___help \
|
|
var___debug var___verbose var___branch var___no_prompt var___env var___status \
|
|
var___server var___type var___domain var___app_name var___pass var___secrets \
|
|
var___all var___update var___force var___fast var___chaos var___volumes \
|
|
var___no_tty var___user var___bump var___dev var__type_ var__app_ \
|
|
var__service_ var__version_ var__src_ var__dst_ var__backup_file_ var__args_ \
|
|
var__secret_ var__cmd_ var__data_ var__volume_ var__command_ var__recipe_ \
|
|
var__host_ var__user_ var__port_ var__provider_ var__subcommands_ var_app \
|
|
var_list var_ls var_new var_backup var_deploy var_check var_version var_config \
|
|
var_cp var_logs var_ps var_restore var_rm var_delete var_run var_rollback \
|
|
var_secret var_generate var_insert var_undeploy var_volume var_recipe \
|
|
var_create var_release var_versions var_server var_add var___ var_init \
|
|
var_apps var_upgrade var_doctor var_help; parse 158 "$@"
|
|
local prefix=${DOCOPT_PREFIX:-''}; unset "${prefix}__stack" \
|
|
"${prefix}__skip_update" "${prefix}__skip_check" "${prefix}__help" \
|
|
"${prefix}__debug" "${prefix}__verbose" "${prefix}__branch" \
|
|
"${prefix}__no_prompt" "${prefix}__env" "${prefix}__status" \
|
|
"${prefix}__server" "${prefix}__type" "${prefix}__domain" \
|
|
"${prefix}__app_name" "${prefix}__pass" "${prefix}__secrets" "${prefix}__all" \
|
|
"${prefix}__update" "${prefix}__force" "${prefix}__fast" "${prefix}__chaos" \
|
|
"${prefix}__volumes" "${prefix}__no_tty" "${prefix}__user" "${prefix}__bump" \
|
|
"${prefix}__dev" "${prefix}_type_" "${prefix}_app_" "${prefix}_service_" \
|
|
"${prefix}_version_" "${prefix}_src_" "${prefix}_dst_" \
|
|
"${prefix}_backup_file_" "${prefix}_args_" "${prefix}_secret_" \
|
|
"${prefix}_cmd_" "${prefix}_data_" "${prefix}_volume_" "${prefix}_command_" \
|
|
"${prefix}_recipe_" "${prefix}_host_" "${prefix}_user_" "${prefix}_port_" \
|
|
"${prefix}_provider_" "${prefix}_subcommands_" "${prefix}app" "${prefix}list" \
|
|
"${prefix}ls" "${prefix}new" "${prefix}backup" "${prefix}deploy" \
|
|
"${prefix}check" "${prefix}version" "${prefix}config" "${prefix}cp" \
|
|
"${prefix}logs" "${prefix}ps" "${prefix}restore" "${prefix}rm" \
|
|
"${prefix}delete" "${prefix}run" "${prefix}rollback" "${prefix}secret" \
|
|
"${prefix}generate" "${prefix}insert" "${prefix}undeploy" "${prefix}volume" \
|
|
"${prefix}recipe" "${prefix}create" "${prefix}release" "${prefix}versions" \
|
|
"${prefix}server" "${prefix}add" "${prefix}__" "${prefix}init" "${prefix}apps" \
|
|
"${prefix}upgrade" "${prefix}doctor" "${prefix}help"
|
|
eval "${prefix}"'__stack=${var___stack:-}'
|
|
eval "${prefix}"'__skip_update=${var___skip_update:-false}'
|
|
eval "${prefix}"'__skip_check=${var___skip_check:-false}'
|
|
eval "${prefix}"'__help=${var___help:-false}'
|
|
eval "${prefix}"'__debug=${var___debug:-false}'
|
|
eval "${prefix}"'__verbose=${var___verbose:-false}'
|
|
eval "${prefix}"'__branch=${var___branch:-}'
|
|
eval "${prefix}"'__no_prompt=${var___no_prompt:-false}'
|
|
eval "${prefix}"'__env=${var___env:-}'
|
|
eval "${prefix}"'__status=${var___status:-false}'
|
|
eval "${prefix}"'__server=${var___server:-}'
|
|
eval "${prefix}"'__type=${var___type:-}'
|
|
eval "${prefix}"'__domain=${var___domain:-}'
|
|
eval "${prefix}"'__app_name=${var___app_name:-}'
|
|
eval "${prefix}"'__pass=${var___pass:-false}'
|
|
eval "${prefix}"'__secrets=${var___secrets:-false}'
|
|
eval "${prefix}"'__all=${var___all:-false}'
|
|
eval "${prefix}"'__update=${var___update:-false}'
|
|
eval "${prefix}"'__force=${var___force:-false}'
|
|
eval "${prefix}"'__fast=${var___fast:-false}'
|
|
eval "${prefix}"'__chaos=${var___chaos:-false}'
|
|
eval "${prefix}"'__volumes=${var___volumes:-false}'
|
|
eval "${prefix}"'__no_tty=${var___no_tty:-false}'
|
|
eval "${prefix}"'__user=${var___user:-}'
|
|
eval "${prefix}"'__bump=${var___bump:-false}'
|
|
eval "${prefix}"'__dev=${var___dev:-false}'
|
|
eval "${prefix}"'_type_=${var__type_:-}'; eval "${prefix}"'_app_=${var__app_:-}'
|
|
eval "${prefix}"'_service_=${var__service_:-}'
|
|
eval "${prefix}"'_version_=${var__version_:-}'
|
|
eval "${prefix}"'_src_=${var__src_:-}'; eval "${prefix}"'_dst_=${var__dst_:-}'
|
|
eval "${prefix}"'_backup_file_=${var__backup_file_:-}'
|
|
if declare -p var__args_ >/dev/null 2>&1; then
|
|
eval "${prefix}"'_args_=("${var__args_[@]}")'; else eval "${prefix}"'_args_=()'
|
|
fi; eval "${prefix}"'_secret_=${var__secret_:-}'
|
|
eval "${prefix}"'_cmd_=${var__cmd_:-}'; eval "${prefix}"'_data_=${var__data_:-}'
|
|
eval "${prefix}"'_volume_=${var__volume_:-}'
|
|
eval "${prefix}"'_command_=${var__command_:-}'
|
|
eval "${prefix}"'_recipe_=${var__recipe_:-}'
|
|
eval "${prefix}"'_host_=${var__host_:-}'
|
|
eval "${prefix}"'_user_=${var__user_:-}'
|
|
eval "${prefix}"'_port_=${var__port_:-}'
|
|
eval "${prefix}"'_provider_=${var__provider_:-}'
|
|
if declare -p var__subcommands_ >/dev/null 2>&1; then
|
|
eval "${prefix}"'_subcommands_=("${var__subcommands_[@]}")'; else
|
|
eval "${prefix}"'_subcommands_=()'; fi; eval "${prefix}"'app=${var_app:-false}'
|
|
eval "${prefix}"'list=${var_list:-false}'; eval "${prefix}"'ls=${var_ls:-false}'
|
|
eval "${prefix}"'new=${var_new:-false}'
|
|
eval "${prefix}"'backup=${var_backup:-false}'
|
|
eval "${prefix}"'deploy=${var_deploy:-false}'
|
|
eval "${prefix}"'check=${var_check:-false}'
|
|
eval "${prefix}"'version=${var_version:-false}'
|
|
eval "${prefix}"'config=${var_config:-false}'
|
|
eval "${prefix}"'cp=${var_cp:-false}'; eval "${prefix}"'logs=${var_logs:-false}'
|
|
eval "${prefix}"'ps=${var_ps:-false}'
|
|
eval "${prefix}"'restore=${var_restore:-false}'
|
|
eval "${prefix}"'rm=${var_rm:-false}'
|
|
eval "${prefix}"'delete=${var_delete:-false}'
|
|
eval "${prefix}"'run=${var_run:-false}'
|
|
eval "${prefix}"'rollback=${var_rollback:-false}'
|
|
eval "${prefix}"'secret=${var_secret:-false}'
|
|
eval "${prefix}"'generate=${var_generate:-false}'
|
|
eval "${prefix}"'insert=${var_insert:-false}'
|
|
eval "${prefix}"'undeploy=${var_undeploy:-false}'
|
|
eval "${prefix}"'volume=${var_volume:-false}'
|
|
eval "${prefix}"'recipe=${var_recipe:-false}'
|
|
eval "${prefix}"'create=${var_create:-false}'
|
|
eval "${prefix}"'release=${var_release:-false}'
|
|
eval "${prefix}"'versions=${var_versions:-false}'
|
|
eval "${prefix}"'server=${var_server:-false}'
|
|
eval "${prefix}"'add=${var_add:-false}'; eval "${prefix}"'__=${var___:-false}'
|
|
eval "${prefix}"'init=${var_init:-false}'
|
|
eval "${prefix}"'apps=${var_apps:-false}'
|
|
eval "${prefix}"'upgrade=${var_upgrade:-false}'
|
|
eval "${prefix}"'doctor=${var_doctor:-false}'
|
|
eval "${prefix}"'help=${var_help:-false}'; local docopt_i=1
|
|
[[ $BASH_VERSION =~ ^4.3 ]] && docopt_i=2; for ((;docopt_i>0;docopt_i--)); do
|
|
declare -p "${prefix}__stack" "${prefix}__skip_update" "${prefix}__skip_check" \
|
|
"${prefix}__help" "${prefix}__debug" "${prefix}__verbose" "${prefix}__branch" \
|
|
"${prefix}__no_prompt" "${prefix}__env" "${prefix}__status" \
|
|
"${prefix}__server" "${prefix}__type" "${prefix}__domain" \
|
|
"${prefix}__app_name" "${prefix}__pass" "${prefix}__secrets" "${prefix}__all" \
|
|
"${prefix}__update" "${prefix}__force" "${prefix}__fast" "${prefix}__chaos" \
|
|
"${prefix}__volumes" "${prefix}__no_tty" "${prefix}__user" "${prefix}__bump" \
|
|
"${prefix}__dev" "${prefix}_type_" "${prefix}_app_" "${prefix}_service_" \
|
|
"${prefix}_version_" "${prefix}_src_" "${prefix}_dst_" \
|
|
"${prefix}_backup_file_" "${prefix}_args_" "${prefix}_secret_" \
|
|
"${prefix}_cmd_" "${prefix}_data_" "${prefix}_volume_" "${prefix}_command_" \
|
|
"${prefix}_recipe_" "${prefix}_host_" "${prefix}_user_" "${prefix}_port_" \
|
|
"${prefix}_provider_" "${prefix}_subcommands_" "${prefix}app" "${prefix}list" \
|
|
"${prefix}ls" "${prefix}new" "${prefix}backup" "${prefix}deploy" \
|
|
"${prefix}check" "${prefix}version" "${prefix}config" "${prefix}cp" \
|
|
"${prefix}logs" "${prefix}ps" "${prefix}restore" "${prefix}rm" \
|
|
"${prefix}delete" "${prefix}run" "${prefix}rollback" "${prefix}secret" \
|
|
"${prefix}generate" "${prefix}insert" "${prefix}undeploy" "${prefix}volume" \
|
|
"${prefix}recipe" "${prefix}create" "${prefix}release" "${prefix}versions" \
|
|
"${prefix}server" "${prefix}add" "${prefix}__" "${prefix}init" "${prefix}apps" \
|
|
"${prefix}upgrade" "${prefix}doctor" "${prefix}help"; done; }
|
|
# docopt parser above, complete command for generating this parser is `docopt.sh abra`
|
|
|
|
PROGRAM_NAME=$(basename "$0")
|
|
|
|
#######################################
|
|
# Helpers
|
|
#######################################
|
|
|
|
###### Utility functions
|
|
|
|
error() {
|
|
echo "$(tput setaf 1)ERROR: $*$(tput sgr0)"
|
|
exit 1
|
|
}
|
|
|
|
warning() {
|
|
echo "$(tput setaf 3)WARNING: $*$(tput sgr0)"
|
|
}
|
|
|
|
success() {
|
|
echo "$(tput setaf 2)SUCCESS: $*$(tput sgr0)"
|
|
}
|
|
|
|
info() {
|
|
if [ "$abra___verbose" = "false" ] && [ "$abra___debug" = "false" ]; then
|
|
return
|
|
fi
|
|
echo "$(tput setaf 4)INFO: $*$(tput sgr0)"
|
|
}
|
|
|
|
debug() {
|
|
if [ "$abra___debug" = "false" ]; then
|
|
return
|
|
fi
|
|
echo "$(tput setaf 13)DEBUG: $*$(tput sgr0)"
|
|
}
|
|
|
|
# 3wc: temporarily disable debug and verbose
|
|
|
|
silence() {
|
|
# temporaily disable debug & verbose output. useful for getting raw output
|
|
# from abra subcommands
|
|
_abra___debug="$abra___debug"
|
|
_abra___verbose="$abra___verbose"
|
|
abra___verbose="false"
|
|
abra___debug="false"
|
|
}
|
|
|
|
unsilence() {
|
|
# restore original values of debug/verbose options
|
|
abra___verbose="$_abra___verbose"
|
|
abra___debug="$_abra___debug"
|
|
}
|
|
|
|
###### Default settings
|
|
|
|
if [ -z "$COMPOSE_FILE" ]; then
|
|
COMPOSE_FILE="compose.yml"
|
|
fi
|
|
|
|
###### Safety checks
|
|
|
|
require_bash_4() {
|
|
# we're using things like `mapfile` which require bash 4+
|
|
if ! bash -c '[[ $BASH_VERSION > 4.0 ]]'; then
|
|
error "bash version '$BASH_VERSION' is too old, 4 or newer required"
|
|
fi
|
|
}
|
|
|
|
require_binary() {
|
|
if ! type "$1" > /dev/null 2>&1; then
|
|
error "'$1' program is not installed"
|
|
fi
|
|
}
|
|
|
|
require_abra_dir() {
|
|
mkdir -p "$ABRA_DIR"
|
|
}
|
|
|
|
require_vendor_dir() {
|
|
mkdir -p "$ABRA_VENDOR_DIR"
|
|
}
|
|
|
|
require_consent_for_update() {
|
|
if [ "$CONSENT_TO_UPDATE" = "false" ]; then
|
|
error "A new app state will be deployed! Please use --update to consent"
|
|
fi
|
|
}
|
|
|
|
require_docker_version() {
|
|
get_servers
|
|
|
|
MIN_DOCKER_VERSION=19
|
|
|
|
SERVERS+=("default")
|
|
|
|
for SERVER in "${SERVERS[@]}"; do
|
|
SERVER="${SERVER##*/}" # basename
|
|
host=$(docker context inspect "$SERVER" -f "{{.Endpoints.docker.Host}}" 2>/dev/null)
|
|
if [[ -n "$host" ]]; then
|
|
major_version=$(DOCKER_CONTEXT="$SERVER" docker version --format "{{.Server.Version}}" | cut -d'.' -f1 2>/dev/null)
|
|
if [[ "$major_version" -lt "$MIN_DOCKER_VERSION" ]]; then
|
|
error "This tool requires Docker v${MIN_DOCKER_VERSION} or greater. Please upgrade your Docker installation on $SERVER"
|
|
exit 1
|
|
else
|
|
debug "Docker version on $SERVER is sufficient (v${major_version})"
|
|
fi
|
|
fi
|
|
done
|
|
}
|
|
|
|
require_valid_yaml() {
|
|
$YQ e "$1" > /dev/null || error "Invalid YAML '$1'"
|
|
}
|
|
|
|
###### Download and update data
|
|
|
|
require_apps_json() {
|
|
# Ensure we have the latest copy of apps.json
|
|
|
|
if [ "$abra___skip_update" = "true" ]; then
|
|
return
|
|
fi
|
|
|
|
if [ -f "$ABRA_APPS_JSON" ]; then
|
|
modified=$(curl --silent --head "$ABRA_APPS_URL" | \
|
|
awk '/^last-modified/{print tolower($0)}' | \
|
|
sed 's/^last-modified: //I')
|
|
remote_ctime=$(date --date="$modified" +%s)
|
|
local_ctime=$(stat -c %Z "$ABRA_APPS_JSON")
|
|
|
|
if [ "$local_ctime" -lt "$remote_ctime" ]; then
|
|
info "Downloading new apps.json"
|
|
curl -sLo "$ABRA_APPS_JSON" "$ABRA_APPS_URL"
|
|
else
|
|
debug "No apps.json update needed"
|
|
fi
|
|
else
|
|
info "Downloading apps.json"
|
|
curl -sLo "$ABRA_APPS_JSON" "$ABRA_APPS_URL"
|
|
fi
|
|
}
|
|
|
|
require_plugin() {
|
|
PLUGIN="$1"
|
|
|
|
BRANCH="${abra___branch:-master}"
|
|
|
|
warning "The $PLUGIN plugin was not found, fetching via Git"
|
|
|
|
mkdir -p "$ABRA_DIR/plugins"
|
|
|
|
if [[ "$BRANCH" != "master" ]]; then
|
|
git_extra_args="--branch $BRANCH"
|
|
fi
|
|
|
|
# shellcheck disable=SC2086
|
|
if ! git clone ${git_extra_args:-} "$GIT_URL/$PLUGIN.git" "$ABRA_DIR/plugins/$PLUGIN" > /dev/null 2>&1 ; then
|
|
error "Could not retrieve the $PLUGIN plugin, does it exist?"
|
|
fi
|
|
|
|
if [[ $(cd "$ABRA_DIR/plugins/$PLUGIN" && git branch --list | wc -l) == "0" ]]; then
|
|
debug "Failed to clone default branch, guessing alternative is 'main'"
|
|
(cd "$ABRA_DIR/plugins/$PLUGIN" && git checkout main > /dev/null 2>&1)
|
|
fi
|
|
|
|
success "Fetched the $PLUGIN plugin via Git"
|
|
}
|
|
|
|
require_app (){
|
|
APP="$1"
|
|
APP_DIR="$ABRA_DIR/apps/$APP"
|
|
|
|
BRANCH="${abra___branch:-master}"
|
|
|
|
warning "The app type '$APP' was not found, fetching via Git"
|
|
|
|
if [[ "$BRANCH" != "master" ]]; then
|
|
git_extra_args="--branch $BRANCH"
|
|
fi
|
|
|
|
# shellcheck disable=SC2086
|
|
if ! git clone ${git_extra_args:-} "$GIT_URL$APP.git" "$ABRA_DIR/apps/$APP" > /dev/null 2>&1 ; then
|
|
error "Could not retrieve app type '$APP', this app type doesn't exist?"
|
|
fi
|
|
|
|
cd "$APP_DIR" && checkout_main_or_master
|
|
|
|
if [[ $(cd "$ABRA_DIR/apps/$APP" && git branch --list | wc -l) == "0" ]]; then
|
|
debug "Failed to clone default branch, guessing alternative is 'main'"
|
|
(cd "$ABRA_DIR/apps/$APP" && git checkout main)
|
|
fi
|
|
|
|
success "Fetched app configuration via Git"
|
|
}
|
|
|
|
require_app_version() {
|
|
APP="$1"
|
|
VERSION="$2"
|
|
APP_DIR="$ABRA_DIR/apps/$APP"
|
|
|
|
debug "Checking for type '$APP'"
|
|
|
|
if [ ! -d "$APP_DIR" ]; then
|
|
require_app "$APP"
|
|
fi
|
|
|
|
debug "Using $APP_DIR"
|
|
cd "$APP_DIR" || error "Can't find app dir '$APP_DIR'"
|
|
|
|
if ! git tag -l | grep -q "$VERSION"; then
|
|
git fetch -q --all
|
|
fi
|
|
|
|
if [ -z "$VERSION" ]; then
|
|
warning "No version specified, dangerously using latest git 😨"
|
|
else
|
|
if [ "$abra___chaos" = "false" ]; then
|
|
git checkout -q "$VERSION" || error "Can't find version $VERSION"
|
|
else
|
|
warning "Chaos deploy specified, dangerously using latest git 😨"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
vendor_binary() {
|
|
require_vendor_dir
|
|
|
|
local REPO="$1"
|
|
local VERSION="$2"
|
|
local FILE="$3"
|
|
local BINARY="${REPO##*/}"
|
|
local RELEASE_URL="$REPO/releases/download/${VERSION}/${FILE}"
|
|
|
|
# Make the path to the binary available as a similarly-named variable, e.g.
|
|
# yq -> $YQ
|
|
export "${BINARY^^}=$ABRA_VENDOR_DIR/$BINARY"
|
|
|
|
if [ -f "$ABRA_DIR/vendor/$BINARY" ]; then
|
|
debug "$BINARY is already vendored"
|
|
return
|
|
fi
|
|
|
|
case $(uname -m) in
|
|
x86_64)
|
|
warning "Attempting to download the $BINARY binary from $RELEASE_URL into $ABRA_VENDOR_DIR"
|
|
;;
|
|
*)
|
|
error "Unable to automatically vendor $BINARY, you'll have to manually manage this.\n
|
|
Please see $REPO and place the $BINARY binary in $ABRA_VENDOR_DIR."
|
|
;;
|
|
esac
|
|
|
|
curl -sLo "$ABRA_VENDOR_DIR/$BINARY" "$RELEASE_URL"
|
|
chmod +x "$ABRA_VENDOR_DIR/$BINARY"
|
|
success "$BINARY is now vendored ☮"
|
|
}
|
|
|
|
require_jq() {
|
|
vendor_binary "https://github.com/stedolan/jq" "jq-1.6" "jq-linux64"
|
|
}
|
|
|
|
require_yq() {
|
|
vendor_binary "https://github.com/mikefarah/yq" "v4.9.6" "yq_linux_amd64"
|
|
}
|
|
|
|
checkout_main_or_master() {
|
|
git checkout main > /dev/null 2>&1 || git checkout master > /dev/null 2>&1
|
|
}
|
|
|
|
pwgen_native() {
|
|
if type pwgen > /dev/null 2>&1; then
|
|
pwgen -s "$length" 1
|
|
return
|
|
fi
|
|
|
|
tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c "$1"
|
|
}
|
|
|
|
pwqgen_native() {
|
|
if type pwqgen > /dev/null 2>&1; then
|
|
pwqgen
|
|
return
|
|
fi
|
|
|
|
shuf -n 3 /usr/share/dict/words | tr -dc 'a-zA-Z0-9' | tr -d '\n'
|
|
}
|
|
|
|
# FIXME 3wc: update or remove
|
|
if [ -z "$ABRA_ENV" ] && [ -f .env ] && type direnv > /dev/null 2>&1 && ! direnv status | grep -q 'Found RC allowed true'; then
|
|
error "direnv is blocked, run direnv allow"
|
|
fi
|
|
|
|
###### Parse apps.json
|
|
|
|
get_recipes() {
|
|
require_jq
|
|
|
|
mapfile -t RECIPES < <($JQ -r ". | keys | .[]" "$ABRA_APPS_JSON" | sort)
|
|
}
|
|
|
|
get_recipe_versions() {
|
|
require_jq
|
|
|
|
recipe="${1?Recipe not set}"
|
|
|
|
recipe_json=$($JQ ".\"${recipe}\"" "$ABRA_APPS_JSON")
|
|
|
|
if [ "$recipe_json" = "null" ]; then
|
|
declare -a RECIPE_VERSIONS
|
|
else
|
|
mapfile -t RECIPE_VERSIONS < <(echo "$recipe_json" | $JQ -r ".versions | keys | .[]" - | sort)
|
|
fi
|
|
}
|
|
|
|
get_recipe_version_latest() {
|
|
if [ "${#RECIPE_VERSIONS[@]}" = 0 ]; then
|
|
VERSION=""
|
|
info "No versions found"
|
|
else
|
|
VERSION="${RECIPE_VERSIONS[-1]}"
|
|
if [ "$abra___chaos" = "true" ]; then
|
|
info "Not choosing a version and instead deploying from latest commit"
|
|
else
|
|
info "Chose version $VERSION"
|
|
fi
|
|
fi
|
|
}
|
|
|
|
###### Run-time loading
|
|
|
|
load_abra_sh() {
|
|
if [ -f abra.sh ]; then
|
|
# shellcheck disable=SC1091
|
|
source abra.sh
|
|
debug "Loading ./abra.sh"
|
|
fi
|
|
|
|
if [ -f "$APP_DIR/abra.sh" ]; then
|
|
debug "Loading $APP_DIR/abra.sh"
|
|
# shellcheck disable=SC1090,SC1091
|
|
source "$APP_DIR/abra.sh"
|
|
fi
|
|
}
|
|
|
|
###### FIXME 3wc: name this section
|
|
|
|
output_version_summary() {
|
|
echo " Versions:"
|
|
|
|
CONSENT_TO_UPDATE=$abra___update
|
|
FORCE_DEPLOY=$abra___force
|
|
CHAOS_DEPLOY=$abra___chaos
|
|
|
|
local -a IS_AN_UPDATE="false"
|
|
local -a UNABLE_TO_DETECT="false"
|
|
local -a UNDEPLOYED_STATE="false"
|
|
local -a CHECKED_SERVICES # array
|
|
|
|
if ! docker stack ls --format "{{ .Name }}" | grep -q "$STACK_NAME"; then
|
|
UNDEPLOYED_STATE="true"
|
|
fi
|
|
|
|
IFS=':' read -ra COMPOSE_FILES <<< "$COMPOSE_FILE"
|
|
for COMPOSE in "${COMPOSE_FILES[@]}"; do
|
|
require_valid_yaml "$APP_DIR/$COMPOSE"
|
|
SERVICES=$($YQ e '.services | keys | .[]' "${APP_DIR}/${COMPOSE}")
|
|
|
|
for SERVICE in $SERVICES; do
|
|
if [[ ${CHECKED_SERVICES[*]} =~ ${SERVICE} ]]; then
|
|
debug "already inspected ${STACK_NAME}_${SERVICE} for versions, skipping..."
|
|
continue
|
|
fi
|
|
|
|
filter="{{index .Spec.Labels \"coop-cloud.$STACK_NAME.$SERVICE.version\" }}"
|
|
label=$(docker service inspect -f "$filter" "${STACK_NAME}_${SERVICE}" 2>/dev/null)
|
|
|
|
live_version=$(echo "$label" | cut -d- -f1)
|
|
live_digest=$(echo "$label" | cut -d- -f2)
|
|
|
|
if [ -n "$live_version" ] && [ -n "$live_digest" ]; then
|
|
service_data=$($YQ e ".services.${SERVICE}" "${APP_DIR}/${COMPOSE}")
|
|
service_image=$(echo "$service_data" | $YQ e ".image" - | cut -d':' -f1)
|
|
service_version=$(echo "$service_data" | $YQ e ".deploy.labels[] | select(. == \"coop*\")" - | cut -d'=' -f2)
|
|
service_tag="${service_version%-*}"
|
|
service_digest="${service_version##*-}"
|
|
|
|
echo " ${STACK_NAME}_${SERVICE} (${service_image}):"
|
|
echo " deployed: $(tput setaf 2)$live_version ($live_digest)$(tput sgr0)"
|
|
|
|
if [[ -z "$IS_VERSION_CHECK" ]] || [[ "$IS_VERSION_CHECK" != "true" ]]; then
|
|
if [ "$live_version" != "$service_tag" ] || [ "$live_digest" != "$service_digest" ]; then
|
|
IS_AN_UPDATE="true"
|
|
fi
|
|
if [ "$abra___chaos" = "true" ]; then
|
|
echo " to be deployed: $(tput setaf 1)$service_tag ($service_digest) (+ latest git)$(tput sgr0)"
|
|
else
|
|
echo " to be deployed: $(tput setaf 1)$service_tag ($service_digest)$(tput sgr0)"
|
|
fi
|
|
fi
|
|
else
|
|
if [[ $UNDEPLOYED_STATE == "true" ]]; then
|
|
image=$($YQ e ".services.${SERVICE}.image" "${APP_DIR}/${COMPOSE}" | cut -d':' -f1)
|
|
echo " ${STACK_NAME}_${SERVICE} (${image}):"
|
|
echo " undeployed!"
|
|
else
|
|
warning "Unable to detect deployed version of ${STACK_NAME}_${SERVICE}"
|
|
UNABLE_TO_DETECT="true"
|
|
fi
|
|
fi
|
|
CHECKED_SERVICES+=("$SERVICE")
|
|
done
|
|
done
|
|
|
|
if [[ -n "$IS_VERSION_CHECK" ]] && [[ "$IS_VERSION_CHECK" == "true" ]]; then
|
|
debug "Detected version check (without deploy), bailing out..."
|
|
exit 0
|
|
fi
|
|
|
|
if [[ $IS_AN_UPDATE == "true" ]]; then
|
|
require_consent_for_update
|
|
else
|
|
if [[ $UNABLE_TO_DETECT == "false" ]] && \
|
|
[[ $UNDEPLOYED_STATE == "false" ]] && \
|
|
[[ $FORCE_DEPLOY == "false" ]] && \
|
|
[[ $CHAOS_DEPLOY = "false" ]]; then
|
|
success "Nothing to deploy, you're on latest (use --force to re-deploy anyway)"
|
|
exit 0
|
|
fi
|
|
fi
|
|
}
|
|
|
|
ensure_stack_deployed() {
|
|
local -a HEALTHY # mapping
|
|
local -a MISSING # mapping
|
|
|
|
TIMEOUT=60
|
|
idx=0
|
|
|
|
IFS=' ' read -r -a SERVICES <<< "$(docker stack services "${STACK_NAME}" --format "{{.ID}}" | tr '\n' ' ')"
|
|
debug "Considering the following service IDs: ${SERVICES[*]} for ${STACK_NAME} deployment"
|
|
|
|
while [ ! $(( ${#HEALTHY[@]} + ${#MISSING[@]} )) -eq ${#SERVICES[@]} ]; do
|
|
for service in $(docker ps -f "name=$STACK_NAME" -q); do
|
|
debug "Polling $service for deployment status"
|
|
|
|
healthcheck=$(docker inspect --format "{{ json .State }}" "$service" | jq "try(.Health.Status // \"missing\")")
|
|
name=$(docker inspect --format '{{ index .Config.Labels "com.docker.swarm.service.name" }}' "$service")
|
|
|
|
if [[ ${MISSING[*]} =~ ${name} ]] || [[ ${HEALTHY[*]} =~ ${name} ]]; then
|
|
debug "$name already marked as missing healthcheck / healthy status"
|
|
continue
|
|
fi
|
|
|
|
if [[ "$healthcheck" == "\"missing\"" ]] && [[ ! "${MISSING[*]}" =~ $name ]]; then
|
|
MISSING+=("$name")
|
|
debug "No healthcheck configured for $name"
|
|
continue
|
|
fi
|
|
|
|
if [[ "$healthcheck" == "\"healthy\"" ]] && [[ ! "${HEALTHY[*]}" =~ $name ]]; then
|
|
HEALTHY+=("$name")
|
|
debug "Marking $name with healthy status"
|
|
continue
|
|
fi
|
|
|
|
if [[ "$healthcheck" == \""unhealthy"\" ]]; then
|
|
logs=$(docker inspect --format "{{ json .State.Health.Log }}" "$service")
|
|
exitcode="$(echo "$logs" | $JQ '.[-1] | .ExitCode')"
|
|
warning "Healthcheck for new instance of $name is failing (exit code: $exitcode)"
|
|
warning "$(echo "$logs" | $JQ -r '.[-1] | .Output')"
|
|
error "healthcheck for $name is failing, this deployment did not succeed :("
|
|
fi
|
|
done
|
|
|
|
idx=$(("$idx" + 1))
|
|
if [[ $idx -eq "$TIMEOUT" ]]; then
|
|
error "Waiting for healthy status timed out, this deployment did not succeed :("
|
|
fi
|
|
|
|
sleep 1
|
|
debug "Deploying: $(( ${#HEALTHY[@]} + ${#MISSING[@]} ))/${#SERVICES[@]} (timeout: $idx/$TIMEOUT)"
|
|
done
|
|
}
|
|
|
|
ensure_domain_deployed() {
|
|
local domain=$1
|
|
|
|
warning "Waiting for $domain to come up..."
|
|
|
|
idx=1
|
|
until curl --output /dev/null --silent --head --fail "$domain"; do
|
|
debug "Polled $domain $idx time(s) already"
|
|
sleep 3
|
|
idx=$(("$idx" + 1))
|
|
if [[ $idx -gt 10 ]]; then
|
|
error "$domain still isn't up, check status by running \"abra app ${STACK_NAME} ps\""
|
|
fi
|
|
done
|
|
}
|
|
|
|
get_servers() {
|
|
shopt -s nullglob dotglob
|
|
# shellcheck disable=SC2206
|
|
SERVERS=($ABRA_DIR/servers/*)
|
|
shopt -u nullglob dotglob
|
|
}
|
|
|
|
get_app_secrets() {
|
|
# FIXME 3wc: requires bash 4, use for loop instead
|
|
mapfile -t PASSWORDS < <(grep "^SECRET.*VERSION.*" "$ENV_FILE")
|
|
}
|
|
|
|
load_instance() {
|
|
APP="$abra__app_"
|
|
|
|
# load all files matching "$APP.env" into ENV_FILES array
|
|
mapfile -t ENV_FILES < <(find -L "$ABRA_DIR/servers/" -name "$APP.env")
|
|
# FIXME 3wc: requires bash 4, use for loop instead
|
|
|
|
case "${#ENV_FILES[@]}" in
|
|
1 ) ;;
|
|
0 ) error "Can't find app '$APP'"; return;;
|
|
* ) error "Found $APP in multiple servers: ${ENV_FILES[*]}"; return;;
|
|
esac
|
|
|
|
ENV_FILE="${ENV_FILES[0]}"
|
|
debug "ENV_FILE=$ENV_FILE"
|
|
|
|
if [ ! -f "$ENV_FILE" ]; then
|
|
error "Can't open ENV_FILE '$ENV_FILE'"
|
|
fi
|
|
|
|
# split up the path by "/"
|
|
IFS='/' read -r -a PARTS <<< "$ENV_FILE"
|
|
|
|
SERVER="${PARTS[-2]}"
|
|
|
|
export STACK_NAME="${APP//./_}"
|
|
debug "STACK_NAME=${STACK_NAME}"
|
|
}
|
|
|
|
load_instance_env() {
|
|
# 3wc: using set -a means we don't need `export` in the env files
|
|
set -a
|
|
# shellcheck disable=SC1090
|
|
source "$ENV_FILE"
|
|
set +a
|
|
|
|
debug "Loaded variables from $ENV_FILE"
|
|
|
|
if [ -z "$TYPE" ]; then
|
|
error "TYPE not set, maybe $ENV_FILE is using an old format?"
|
|
fi
|
|
|
|
APP_DIR="$ABRA_DIR/apps/$TYPE"
|
|
export DOCKER_CONTEXT="$SERVER"
|
|
info "DOCKER_CONTEXT=$DOCKER_CONTEXT"
|
|
|
|
export DOMAIN
|
|
}
|
|
|
|
load_context() {
|
|
# Load current context from env or Docker
|
|
if [ -z "$DOCKER_CONTEXT" ]; then
|
|
warning "\$DOCKER_CONTEXT not set, (slowly) looking it up"
|
|
# shellcheck disable=SC2063
|
|
DOCKER_CONTEXT=$(docker context ls | grep '*' | cut -d' ' -f1)
|
|
# make sure grep doesn't parse this, we want a literal '*'
|
|
fi
|
|
}
|
|
|
|
prompt_confirm() {
|
|
if [ "$abra___no_prompt" = "true" ]; then
|
|
return
|
|
fi
|
|
|
|
read -rp "Continue? [y/N]? " choice
|
|
|
|
case "$choice" in
|
|
y|Y ) return ;;
|
|
* ) exit;;
|
|
esac
|
|
}
|
|
|
|
parse_secret() {
|
|
SECRET="$1"
|
|
|
|
if [[ "$SECRET" == *"length"* ]]; then
|
|
# shellcheck disable=SC2001
|
|
abra__length_="$(echo "$SECRET" | sed -e 's/.*[^0-9]\([0-9]\+\)[^0-9]*$/\1/')"
|
|
else
|
|
# Note(decentral1se): unset this so that a length value from another secret
|
|
# definition does not get passed on to another secret generation flow
|
|
unset abra__length_
|
|
fi
|
|
|
|
abra__secret_="${SECRET%_VERSION=*}" # strip _VERSION=v1
|
|
abra__secret_="${abra__secret_#SECRET_}" # strip SECRET_
|
|
abra__secret_="${abra__secret_,,}" # lowercase
|
|
|
|
abra__version_="$(echo "$SECRET" | sed -n 's/.*\(v[0-9]\).*/\1/p')"
|
|
|
|
if [[ -n "$abra__length_" ]]; then
|
|
echo "Generating $abra__secret_, version: $abra__version_, length: $abra__length_"
|
|
else
|
|
echo "Generating $abra__secret_, version: $abra__version_"
|
|
fi
|
|
|
|
sub_app_secret_generate
|
|
}
|
|
|
|
stack_logs (){
|
|
# Note(decentral1se): see https://github.com/moby/moby/issues/31458#issuecomment-617871046
|
|
STACK="$1"
|
|
|
|
services=$(docker stack services "${STACK}" --format "{{.ID}}")
|
|
|
|
# shellcheck disable=SC2154
|
|
trap 'jobs=$(jobs -p) && test -n "$jobs" && kill $jobs' EXIT
|
|
|
|
for item in ${services//\\n/$'\n'}; do
|
|
docker service logs -f -t --tail 10 "$item" &
|
|
done
|
|
|
|
sleep infinity
|
|
}
|
|
|
|
auto_gen_secrets (){
|
|
get_app_secrets
|
|
|
|
if [ "${#PASSWORDS[@]}" -eq 0 ]; then
|
|
error "No secrets found in $ENV_FILE"
|
|
fi
|
|
|
|
for PASSWORD in "${PASSWORDS[@]}"; do
|
|
parse_secret "$PASSWORD"
|
|
done
|
|
}
|
|
|
|
#######################################
|
|
# abra app ..
|
|
#######################################
|
|
|
|
###### .. app ls
|
|
|
|
help_app_ls (){
|
|
help_app_list
|
|
}
|
|
|
|
sub_app_ls (){
|
|
sub_app_list
|
|
}
|
|
|
|
help_app_list (){
|
|
echo "abra [options] app (list|ls) [--status] [--server=<server>] [--type=<type>]
|
|
|
|
List your exciting apps.
|
|
|
|
OPTIONS
|
|
--status Show whether apps are deployed (warning! slow!)
|
|
--server=<server> Only show apps on a specific server
|
|
--type=<type> Only show apps of the given type
|
|
|
|
POWERED BY (for --status)
|
|
docker stack ls"
|
|
}
|
|
|
|
sub_app_list (){
|
|
SERVER="$abra___server"
|
|
if [ -z "$SERVER" ]; then
|
|
SERVER='*'
|
|
fi
|
|
|
|
shopt -s nullglob dotglob
|
|
# shellcheck disable=SC2206
|
|
ENV_FILES=($ABRA_DIR/servers/$SERVER/*.env)
|
|
shopt -u nullglob dotglob
|
|
|
|
STATUS="$( [[ $abra___status == "true" ]] && echo "Y" )"
|
|
|
|
if [ -n "$STATUS" ]; then
|
|
if [ "$SERVER" = "*" ]; then
|
|
get_servers
|
|
else
|
|
SERVERS=( "$SERVER" )
|
|
fi
|
|
|
|
local -a DEPLOYED_APPS # array
|
|
local -a CHECKED_SERVERS # array
|
|
|
|
warning "Loading status from ${#SERVERS[@]} server(s), patience advised.."
|
|
|
|
for SERVER in "${SERVERS[@]}"; do
|
|
SERVER="${SERVER##*/}" # basename
|
|
mapfile -t SERVER_APPS < <(DOCKER_CONTEXT="$SERVER" docker stack ls --format '{{ .Name }}' 2>/dev/null)
|
|
# add $SERVER~ to the start of each DEPLOYED_APPS
|
|
DEPLOYED_APPS+=("${SERVER_APPS[@]/#/$SERVER~}")
|
|
done
|
|
fi
|
|
|
|
# FIXME 3wc: doesn't take into account --type filtering
|
|
printf "%s lovely apps:\n\n" "${#ENV_FILES[@]}"
|
|
|
|
for i in "${!ENV_FILES[@]}"; do
|
|
# Output header inside the loop, so it's included in the pipe to `column`
|
|
if [ "$i" == 0 ]; then
|
|
printf " DOMAIN\tTYPE\tSERVER%s%s\n" "${STATUS:+ }" "${STATUS:+STATUS}"
|
|
printf " --\t--\t--%s\n" "${STATUS:+ --}"
|
|
fi
|
|
|
|
local ENV_FILE="${ENV_FILES[$i]}" APP_STACK_NAME
|
|
|
|
IFS='/' read -r -a PARTS <<< "$ENV_FILE"
|
|
|
|
FILE="${PARTS[-1]}"
|
|
SERVER="${PARTS[-2]}"
|
|
DOMAIN="${FILE%.env}"
|
|
|
|
set -a
|
|
# shellcheck disable=SC1090
|
|
TYPE="$(source "$ENV_FILE" && echo "$TYPE")"
|
|
# shellcheck disable=SC1090
|
|
APP_STACK_NAME="$(source "$ENV_FILE" && echo "$STACK_NAME")"
|
|
set +a
|
|
|
|
if [ "$abra___type" != "" ] && [ "$abra___type" != "$TYPE" ]; then
|
|
continue
|
|
fi
|
|
|
|
if [ -z "$APP_STACK_NAME" ]; then
|
|
APP_STACK_NAME="${DOMAIN//./_}"
|
|
fi
|
|
if [ -n "$STATUS" ]; then
|
|
APP_STATUS=$( printf '%s\n' "${DEPLOYED_APPS[@]}" | grep -qP "^${SERVER}~${APP_STACK_NAME}$" && echo "deployed" || echo "inactive")
|
|
if [[ "$APP_STATUS" == "inactive" ]] ; then
|
|
if [[ ${CHECKED_SERVERS[*]} =~ ${SERVER} ]]; then
|
|
APP_STATUS="unknown"
|
|
else
|
|
if ! docker context inspect "$SERVER" > /dev/null 2>&1; then
|
|
APP_STATUS="unknown"
|
|
fi
|
|
CHECKED_SERVERS+=("$SERVER")
|
|
fi
|
|
fi
|
|
fi
|
|
printf " %s\t%s\t%s%s\n" "$DOMAIN" "$TYPE" "$SERVER" "${STATUS:+ }${APP_STATUS}"
|
|
done | column -s' ' -t
|
|
# Align table `-t` based on tab characters -s`^V<Tab>`
|
|
}
|
|
|
|
###### .. app new
|
|
help_app_new (){
|
|
echo "abra [options] app new [--app-name=<app_name>] [--server=<server>] [--domain=<domain>] [--pass] [--secrets] <type>
|
|
|
|
Create a new app of <type> (e.g. wordpress or custom-html).
|
|
|
|
OPTIONS
|
|
--server=<server> Specify which server to use (default: prompt)
|
|
--domain=<domain> Set the domain name (default: prompt)
|
|
--app-name=<app-name> Set the app name (default: prompt)
|
|
--secrets Auto-generate secrets (default: no)
|
|
--pass Store generated secrets in pass (default: no)"
|
|
}
|
|
|
|
sub_app_new (){
|
|
shopt -s extglob
|
|
|
|
require_abra_dir
|
|
get_servers
|
|
require_apps_json
|
|
|
|
# decentral1se: we are overloading the use of the word "app" in the
|
|
# command-line interface to mean two things -- in the code, we differentiate
|
|
# between them as $APP ("an instance of an app") and $TYPE ("a kind of app")
|
|
TYPE=$abra__type_
|
|
|
|
SERVER=$abra___server
|
|
DOMAIN=$abra___domain
|
|
APP_NAME=$abra___app_name
|
|
|
|
get_recipe_versions "$TYPE"
|
|
get_recipe_version_latest
|
|
|
|
require_app_version "$TYPE" "$VERSION"
|
|
|
|
if [ -z "$SERVER" ]; then
|
|
echo "Where would you like to put $TYPE?"
|
|
|
|
select SERVER_ITEM in "${SERVERS[@]##*/}"; do
|
|
if [ 1 -le "$REPLY" ] && [ "$REPLY" -le ${#SERVERS[@]} ]; then
|
|
SERVER="$SERVER_ITEM"
|
|
success "Selected server ${SERVER}"
|
|
break
|
|
fi
|
|
done
|
|
fi
|
|
SERVER="$ABRA_DIR/servers/$SERVER"
|
|
|
|
if [ ! -d "$SERVER" ]; then
|
|
error "Server '$SERVER' not found"
|
|
fi
|
|
|
|
APP_DIR="$ABRA_DIR/apps/$TYPE"
|
|
|
|
if [ -z "$DOMAIN" ]; then
|
|
read -rp "Domain name: " DOMAIN
|
|
fi
|
|
|
|
if [ -z "$APP_NAME" ]; then
|
|
# e.g.:
|
|
# TYPE=custom-html, DOMAIN=foo.bar-baz.com
|
|
# -> custom_html_foo_bar_baz_com
|
|
DEFAULT_NAME="${TYPE/-/_}_${DOMAIN//+([.-])/_}"
|
|
# truncate to 45 chars (see below)
|
|
DEFAULT_NAME="${DEFAULT_NAME:0:45}"
|
|
# and remove trailing _
|
|
DEFAULT_NAME="${DEFAULT_NAME%%_}"
|
|
read -rp "App name [$DEFAULT_NAME]: " APP_NAME
|
|
if [ -z "$APP_NAME" ]; then
|
|
APP_NAME="$DEFAULT_NAME"
|
|
fi
|
|
fi
|
|
|
|
if [ ${#APP_NAME} -gt 45 ]; then
|
|
# 3wc: Docker won't create secret names > 64 characters -- setting a
|
|
# 45-character limit here is enough for all our secrets so far.
|
|
error "$APP_NAME cannot be longer than 45 characters in length"
|
|
fi
|
|
|
|
ENV_FILE="$SERVER/$APP_NAME.env"
|
|
|
|
if [ -f "$ENV_FILE" ]; then
|
|
error "$ENV_FILE already exists"
|
|
fi
|
|
|
|
cp "$APP_DIR/.env.sample" "$ENV_FILE"
|
|
sed -i "s/$TYPE\.example\.com/$DOMAIN/g" "$ENV_FILE"
|
|
sed -i "s/example\.com/$DOMAIN/g" "$ENV_FILE"
|
|
|
|
abra__app_="$APP_NAME"
|
|
|
|
get_app_secrets
|
|
|
|
if [ "$abra___secrets" == "true" ]; then
|
|
if [ "${#PASSWORDS[@]}" -eq 0 ]; then
|
|
warning "--secrets provided but no secrets found"
|
|
fi
|
|
load_instance
|
|
load_instance_env
|
|
auto_gen_secrets
|
|
fi
|
|
|
|
echo "$(tput setaf 4)Your new '$TYPE' has been created!$(tput sgr0)"
|
|
echo " $(tput setaf 3)Please customise the configuration defaults:"
|
|
echo " abra app $APP_NAME config$(tput sgr0)"
|
|
echo " $(tput setaf 2)Then you can deploy it:"
|
|
echo " abra app $APP_NAME deploy$(tput sgr0)"
|
|
}
|
|
|
|
###### .. app backup
|
|
sub_app_backup (){
|
|
# Add _<service> if it's defined
|
|
FUNCTION="abra_backup${abra__service_:+_}$abra__service_"
|
|
|
|
if ! type "$FUNCTION" > /dev/null 2>&1; then
|
|
error "'$TYPE' doesn't know how to do ${abra__service_}${abra__service_:+ }backups."\
|
|
"See $GIT_URL$TYPE/issues/"
|
|
fi
|
|
|
|
mkdir -p "$ABRA_DIR/backups"
|
|
|
|
$FUNCTION
|
|
}
|
|
|
|
###### .. app restore
|
|
sub_app_restore (){
|
|
FUNCTION="abra_restore_$abra__service_"
|
|
|
|
if ! type "$FUNCTION" > /dev/null 2>&1; then
|
|
error "'$TYPE' doesn't know how to restore '${abra__service_}' backups."\
|
|
"See $GIT_URL$TYPE/issues/"
|
|
fi
|
|
|
|
$FUNCTION "$abra__backup_file_"
|
|
}
|
|
|
|
###### backup utility functions
|
|
|
|
# Usage: _abra_backup_dir service:/path/to/src
|
|
_abra_backup_dir() {
|
|
{
|
|
abra__src_="$1"
|
|
abra__dst_="-"
|
|
}
|
|
|
|
# shellcheck disable=SC2154
|
|
FILENAME="$ABRA_BACKUP_DIR/${abra__app_}_$(basename "$1")_$(date +%F).tar.gz"
|
|
|
|
debug "Copying '$1' to '$FILENAME'"
|
|
|
|
silence
|
|
sub_app_cp | gzip > "$FILENAME"
|
|
success "Backed up '$1' to $FILENAME"
|
|
unsilence
|
|
}
|
|
|
|
_abra_backup_db_prep() {
|
|
# shellcheck disable=SC2034
|
|
abra__service_="$1"
|
|
# 3wc: necessary because $abra__service_ won't be set if we're coming from
|
|
# `abra_backup`, i.e. `abra app ... backup --all`
|
|
|
|
# What's the name of the Docker secret? Default to db_root_password
|
|
DB_PASSWORD_NAME=${4:-db_root_password}
|
|
|
|
debug "Looking up secret '$DB_PASSWORD_NAME'"
|
|
silence
|
|
DB_PASSWORD="$(sub_app_run cat "/run/secrets/$DB_PASSWORD_NAME")"
|
|
unsilence
|
|
|
|
# 3wc: strip newline \r from variable
|
|
DB_PASSWORD="${DB_PASSWORD//$'\015'}"
|
|
|
|
# shellcheck disable=SC2154
|
|
FILENAME="$ABRA_BACKUP_DIR/${abra__app_}_$(date +%F).sql.gz"
|
|
}
|
|
|
|
# usage: _abra_backup_postgres <service> <database> [<user> <secret name>]
|
|
_abra_backup_postgres() {
|
|
_abra_backup_db_prep "$@"
|
|
|
|
debug "Running pg_dump to '$FILENAME'"
|
|
|
|
silence
|
|
# shellcheck disable=SC2034
|
|
PGPASSWORD="$DB_PASSWORD"
|
|
sub_app_run pg_dump -U "${3:-postgres}" "$2" | gzip > "$FILENAME"
|
|
unsilence
|
|
|
|
success "Backed up '$abra__service_:$2' to '$FILENAME'"
|
|
}
|
|
|
|
_abra_backup_mysql() {
|
|
_abra_backup_db_prep "$@"
|
|
|
|
silence
|
|
# shellcheck disable=SC2086
|
|
sub_app_run mysqldump -u root -p"${DB_PASSWORD}" "$2" | gzip > "$FILENAME"
|
|
unsilence
|
|
|
|
success "Backed up '$abra__service_:$2' to $FILENAME"
|
|
}
|
|
|
|
###### .. app deploy
|
|
help_app_deploy (){
|
|
echo "abra [options] app <app> deploy [--update] [--force] [--fast]
|
|
|
|
Deploy app <app> to the configured server.
|
|
|
|
OPTIONS
|
|
--update Consent to deploying an updated app version
|
|
--force Force a deployment regardless of state
|
|
--fast Run deployment without various safety checks
|
|
--chaos Deploy straight from latest Git version (potentially chaotic!)
|
|
|
|
POWERED BY
|
|
docker stack deploy -c compose.yml <app>"
|
|
}
|
|
|
|
sub_app_deploy (){
|
|
require_yq
|
|
|
|
if [ "$abra___fast" = "true" ]; then
|
|
SKIP_VERSION_CHECK=true
|
|
NO_DOMAIN_POLL=true
|
|
NO_STATE_POLL=true
|
|
else
|
|
SKIP_VERSION_CHECK=false
|
|
NO_DOMAIN_POLL=false
|
|
NO_STATE_POLL=false
|
|
fi
|
|
|
|
if [ ! "$abra__version_" = "dev" ]; then
|
|
get_recipe_versions "$TYPE"
|
|
|
|
if [ -n "$abra__version_" ]; then
|
|
VERSION="$abra__version_"
|
|
if ! printf '%s\0' "${RECIPE_VERSIONS[@]}" | grep -Fqxz -- "$VERSION"; then
|
|
error "'$VERSION' doesn't appear to be a valid version of $TYPE"
|
|
fi
|
|
info "Chose version $VERSION"
|
|
else
|
|
get_recipe_version_latest
|
|
fi
|
|
fi
|
|
|
|
require_app_version "$TYPE" "$VERSION"
|
|
|
|
echo "Deployment overview:"
|
|
echo " Server: $(tput setaf 4)${SERVER}$(tput sgr0)"
|
|
|
|
if [ "${COMPOSE_FILE/:/}" == "${COMPOSE_FILE}" ]; then
|
|
echo " Compose: $(tput setaf 3)${APP_DIR}/${COMPOSE_FILE}$(tput sgr0)"
|
|
else
|
|
echo " Compose: $(tput setaf 3)${APP_DIR}/"
|
|
IFS=':' read -ra COMPOSE_FILES <<< "$COMPOSE_FILE"
|
|
for COMPOSE in "${COMPOSE_FILES[@]}"; do
|
|
echo " - ${COMPOSE}"
|
|
done
|
|
tput sgr0
|
|
fi
|
|
|
|
if [ -n "$DOMAIN" ]; then
|
|
echo " Domain: $(tput setaf 2)${DOMAIN}$(tput sgr0)"
|
|
fi
|
|
|
|
echo " Stack: $(tput setaf 3)${STACK_NAME}$(tput sgr0)"
|
|
|
|
if [ "$SKIP_VERSION_CHECK" = "false" ]; then
|
|
output_version_summary
|
|
fi
|
|
|
|
prompt_confirm
|
|
|
|
APP=$(basename "$APP_DIR")
|
|
|
|
(
|
|
(cd "$APP_DIR" || error "\$APP_DIR '$APP_DIR' not found")
|
|
# shellcheck disable=SC2086
|
|
if (cd "$APP_DIR" && docker stack deploy -c ${COMPOSE_FILE//:/ -c } "$STACK_NAME"); then
|
|
if [ "$abra___fast" = "true" ]; then
|
|
success "Something happened! Hope it was good 🙏"
|
|
exit 0
|
|
fi
|
|
if [[ $NO_STATE_POLL == "false" ]]; then
|
|
ensure_stack_deployed "$STACK_NAME"
|
|
fi
|
|
if [ -n "$DOMAIN" ]; then
|
|
if [[ $NO_DOMAIN_POLL == "false" ]]; then
|
|
ensure_domain_deployed "https://${DOMAIN}"
|
|
fi
|
|
success "Yay! App should be available at https://${DOMAIN}"
|
|
else
|
|
success "Yay! That worked. No \$DOMAIN defined, check status by running \"abra app ${STACK_NAME} ps\""
|
|
fi
|
|
else
|
|
error "Oh no! Something went wrong 😕 Check errors above"
|
|
fi
|
|
)
|
|
}
|
|
|
|
###### .. app <app> undeploy
|
|
help_app_undeploy (){
|
|
echo "abra [options] app <app> undeploy
|
|
|
|
Opposite of \`app <app> deploy\`; deactivate an app without deleting anything. If
|
|
you want to completely delete an app, then you're looking for \`app <app> rm\`.
|
|
|
|
POWERED BY
|
|
docker stack rm <app>"
|
|
}
|
|
|
|
sub_app_undeploy (){
|
|
warning "About to un-deploy $STACK_NAME from $SERVER"
|
|
|
|
if ! docker stack ls --format "{{ .Name }}" | grep -q "$STACK_NAME"; then
|
|
error "$STACK_NAME is already undeployed, nothing to do"
|
|
fi
|
|
|
|
docker stack rm "$STACK_NAME"
|
|
}
|
|
|
|
###### .. app config
|
|
help_app_config (){
|
|
echo "abra [options] app <app> config
|
|
|
|
Open the app configuration in \$EDITOR."
|
|
}
|
|
|
|
sub_app_config (){
|
|
if [ -z "$EDITOR" ]; then
|
|
warning "\$EDITOR not set; which text editor would you like to use?"
|
|
|
|
EDITORS_ALL=(vi vim nano pico emacs)
|
|
declare -a EDITORS_AVAILABLE
|
|
|
|
for EDITOR in "${EDITORS_ALL[@]}"; do
|
|
if type "$EDITOR" > /dev/null 2>&1; then
|
|
EDITORS_AVAILABLE+=("$EDITOR")
|
|
fi
|
|
done
|
|
|
|
if [ ${#EDITORS_AVAILABLE[@]} = 0 ]; then
|
|
error "No text editors found! Are you using a magnetised needle? 🤪"
|
|
fi
|
|
|
|
select EDITOR in "${EDITORS_AVAILABLE[@]}"; do
|
|
if [ 1 -le "$REPLY" ] && [ "$REPLY" -le ${#EDITORS_AVAILABLE[@]} ]; then
|
|
SERVER="$EDITOR"
|
|
success "Using '${EDITOR}'; Add 'export EDITOR=${EDITOR}' to your ~/.bashrc to set as default"
|
|
break
|
|
fi
|
|
done
|
|
fi
|
|
|
|
$EDITOR "$ENV_FILE"
|
|
}
|
|
|
|
###### .. app version
|
|
help_app_version (){
|
|
echo "abra [options] app <app> version
|
|
|
|
Show versions of the app that are currently deployed"
|
|
}
|
|
|
|
sub_app_version (){
|
|
require_yq
|
|
|
|
IS_VERSION_CHECK="true"
|
|
|
|
echo "Version overview:"
|
|
output_version_summary
|
|
}
|
|
|
|
###### .. app check
|
|
help_app_check (){
|
|
echo "abra [options] app <app> check
|
|
|
|
Make sure that all an app's required variables are set."
|
|
}
|
|
|
|
sub_app_check (){
|
|
if [ "$abra___skip_check" = "true" ]; then
|
|
return 0
|
|
fi
|
|
|
|
APP_ENV=$(grep -v '^#' "$ENV_FILE" | cut -d' ' -f2 | cut -d'=' -f1 | sort -u)
|
|
STACK_ENV=$(grep -v '^#' "$APP_DIR/.env.sample" | cut -d' ' -f2 | cut -d'=' -f1 | sort -u)
|
|
|
|
debug "APP_ENV: $APP_ENV"
|
|
|
|
debug "STACK_ENV: $STACK_ENV"
|
|
|
|
# Only show "1", items in STACK_ENV which aren't in APP_ENV
|
|
MISSING_VARS=$(comm -23 <(echo "$STACK_ENV") <(echo "$APP_ENV"))
|
|
|
|
if [ -z "$MISSING_VARS" ]; then
|
|
success "Yay! All the necessary basic variables are defined"
|
|
return 0
|
|
fi
|
|
|
|
error "Found missing variables: $MISSING_VARS"
|
|
}
|
|
|
|
###### .. app ps
|
|
help_app_ps (){
|
|
echo "abra [options] app <app> ps
|
|
|
|
Show <app>'s running containers.
|
|
|
|
POWERED BY
|
|
docker stack ps <app>"
|
|
}
|
|
|
|
sub_app_ps (){
|
|
docker stack ps "$STACK_NAME"
|
|
}
|
|
|
|
###### .. app delete
|
|
help_app_rm (){
|
|
help_app_delete
|
|
}
|
|
|
|
sub_app_rm (){
|
|
sub_app_delete
|
|
}
|
|
|
|
help_app_delete (){
|
|
echo "abra [options] app <app> (rm|delete)
|
|
|
|
Delete <app> completely (\"hard delete\"). All local configuration,
|
|
volumes and secrets can be removed with this command.
|
|
|
|
OPTIONS
|
|
--volumes Delete all storage volumes
|
|
--secrets Delete all secrets
|
|
|
|
POWERED BY
|
|
docker volume ls / docker volume rm
|
|
docker secret ls / docker secret rm
|
|
"
|
|
}
|
|
|
|
sub_app_delete (){
|
|
warning "About to delete $ENV_FILE"
|
|
prompt_confirm
|
|
|
|
rm "$ENV_FILE"
|
|
|
|
if [ "$abra___volumes" = "true" ]; then
|
|
abra___all="true"
|
|
sub_app_volume_delete
|
|
fi
|
|
|
|
|
|
if [ "$abra___secrets" = "true" ]; then
|
|
secrets="$(docker secret ls --filter "name=${STACK_NAME}" --quiet)"
|
|
|
|
if [ "$abra___secrets" = "true" ]; then
|
|
# shellcheck disable=SC2086
|
|
warning "SCARY: About to remove all secrets associated with ${STACK_NAME}: $(echo $secrets | tr -d '\n')"
|
|
|
|
prompt_confirm
|
|
fi
|
|
|
|
docker secret rm "$secrets"
|
|
fi
|
|
}
|
|
|
|
###### .. app secret insert
|
|
help_app_secret_insert (){
|
|
echo "abra [options] app <app> secret insert <secret> <version> <data> [--pass]
|
|
|
|
Store <data> as a Docker secret called <secret>_<version>.
|
|
|
|
OPTIONS
|
|
--pass Save the secret in \`pass\` as well
|
|
|
|
POWERED BY
|
|
docker secret insert"
|
|
}
|
|
sub_app_secret_insert() {
|
|
SECRET="$abra__secret_"
|
|
VERSION="$abra__version_"
|
|
PW="$abra__data_"
|
|
STORE_WITH_PASS="$abra___pass"
|
|
|
|
if [ -z "$SECRET" ] || [ -z "$VERSION" ] || [ -z "$PW" ]; then
|
|
error "Required arguments missing"
|
|
fi
|
|
|
|
# shellcheck disable=SC2059
|
|
printf "$PW" | docker secret create "${STACK_NAME}_${SECRET}_${VERSION}" - > /dev/null
|
|
|
|
# shellcheck disable=SC2181
|
|
if [[ $? != 0 ]]; then exit 1; fi # exit if secret wasn't created
|
|
|
|
if [ "$STORE_WITH_PASS" == "true" ] && type pass > /dev/null 2>&1; then
|
|
echo "$PW" | pass insert "hosts/$DOCKER_CONTEXT/${STACK_NAME}/${SECRET}" -m > /dev/null
|
|
success "pass: hosts/$DOCKER_CONTEXT/${STACK_NAME}/${SECRET}"
|
|
fi
|
|
}
|
|
|
|
###### .. app secret delete
|
|
help_app_secret_rm (){
|
|
help_app_secret_delete
|
|
}
|
|
|
|
sub_app_secret_rm(){
|
|
sub_app_secret_delete
|
|
}
|
|
|
|
help_app_secret_delete (){
|
|
echo "abra [options] app <app> secret (delete|rm) (<secret>|--all) [--pass]
|
|
|
|
Remove <app>'s Docker secret <secret>.
|
|
|
|
OPTIONS
|
|
--pass Remove secret(s) from \`pass\` as well
|
|
--all Delete all secrets for <app>
|
|
|
|
POWERED BY
|
|
docker secret rm
|
|
docker secret ls (for --all)"
|
|
}
|
|
|
|
sub_app_secret_delete(){
|
|
# if --all is provided then $abra__secret_ will be blank and this will work
|
|
# auto-magically
|
|
NAMES=$(docker secret ls --filter "name=${STACK_NAME}_${abra__secret_}" --format "{{.Name}}")
|
|
|
|
if [ -z "$NAMES" ]; then
|
|
error "Could not find any secrets under ${STACK_NAME}_${abra__secret_}"
|
|
fi
|
|
|
|
warning "About to delete $(echo "$NAMES" | paste -d "")"
|
|
prompt_confirm
|
|
|
|
for NAME in ${NAMES}; do
|
|
docker secret rm "$NAME" > /dev/null
|
|
|
|
# as above, no need to test for --all, cos if abra__secret_ is blank it'll
|
|
# Just Work anyway
|
|
if [ "$abra___pass" == "true" ] && type pass > /dev/null 2>&1; then
|
|
pass rm -r "hosts/$DOCKER_CONTEXT/${STACK_NAME}/${abra__secret_}" > /dev/null \
|
|
&& success "pass rm'd: hosts/$DOCKER_CONTEXT/${STACK_NAME}/${abra__secret_}"
|
|
fi
|
|
done
|
|
}
|
|
|
|
###### .. app secret generate
|
|
help_app_secret_generate (){
|
|
echo "abra [options] app <app> secret generate (<secret> <version>|--all) [<cmd>] [--pass]
|
|
|
|
Generate <secret>_<version> for <app> and store as a Docker secret.
|
|
|
|
OPTIONS
|
|
<secret> Generate a single secret
|
|
<version> Specify secret version (for single secret)
|
|
--all Auto-generate all secrets
|
|
<cmd> Run <cmd> to generate secret (default: pwqgen)
|
|
--pass Save generated secrets in \`pass\`
|
|
|
|
POWERED BY
|
|
docker secret insert"
|
|
}
|
|
|
|
sub_app_secret_generate(){
|
|
local secret="$abra__secret_"
|
|
local version="$abra__version_"
|
|
local length="$abra__length_"
|
|
local msg_already_outputted=${msg_already_outputted:-"false"}
|
|
|
|
if [ "$msg_already_outputted" == "false" ]; then
|
|
warning "These generated secrets are now stored as encrypted data on your server"
|
|
warning "Please take a moment to make sure you have saved a copy of the passwords"
|
|
warning "Abra is not able to show the password values in plain text again"
|
|
warning "See https://docs.cloud.autonomic.zone/secrets/ for more on secrets"
|
|
msg_already_outputted="true"
|
|
fi
|
|
|
|
if [ "$abra___all" == "true" ]; then
|
|
# Note(decentral1se): we need to reset the flag here to avoid the infinite
|
|
# recursion of auto_gen_secrets which calls this function itself
|
|
abra___all="false"
|
|
|
|
auto_gen_secrets
|
|
return
|
|
fi
|
|
|
|
if [[ -n "$length" ]]; then
|
|
abra__cmd_="pwgen_native $length"
|
|
else
|
|
abra__cmd_=pwqgen_native
|
|
fi
|
|
|
|
PWGEN=${abra__cmd_}
|
|
debug "SECRET: $SECRET, VERSION $VERSION, PW $PWGEN, ALL $abra___all"
|
|
|
|
if [ -z "$secret" ] || [ -z "$version" ] && [ "$abra___all" == "false" ]; then
|
|
error "Required arguments missing"
|
|
fi
|
|
|
|
PW=$($PWGEN)
|
|
|
|
success "Password: $PW"
|
|
|
|
# TODO 3wc: this is a little janky, might be better to make a
|
|
# util_secret_insert function which this and sub_secret_insert can call
|
|
abra__data_="$PW"
|
|
|
|
sub_app_secret_insert
|
|
}
|
|
|
|
###### .. app volume
|
|
|
|
###### .. app volume ls
|
|
help_app_volume_ls (){
|
|
help_app_volume_list
|
|
}
|
|
|
|
sub_app_volume_ls(){
|
|
sub_app_volume_list
|
|
}
|
|
|
|
help_app_volume_list (){
|
|
echo "abra [options] app <app> volume (ls|list)
|
|
|
|
Show all volumes associated with <app>.
|
|
|
|
POWERED BY
|
|
docker volume ls"
|
|
}
|
|
|
|
sub_app_volume_list(){
|
|
docker volume ls --filter "name=${STACK_NAME}"
|
|
}
|
|
|
|
###### .. app volume rm
|
|
help_app_volume_rm (){
|
|
help_app_volume_delete
|
|
}
|
|
|
|
sub_app_volume_rm(){
|
|
sub_app_volume_delete
|
|
}
|
|
|
|
help_app_volume_delete (){
|
|
echo "abra [options] app <app> volume (delete|rm) (<volume>|--all)
|
|
|
|
Remove <app>'s Docker volume <volume>, or all volumes with --all.
|
|
|
|
OPTIONS
|
|
--pass Remove volume(s) from \`pass\` as well
|
|
--all Delete all volumes for <app>
|
|
|
|
POWERED BY
|
|
docker volume rm
|
|
docker volume ls # for --all"
|
|
}
|
|
|
|
sub_app_volume_delete(){
|
|
# if --all is provided then $abra__secret_ will be blank and this will work
|
|
# auto-magically
|
|
NAMES=$(docker volume ls --filter "name=${STACK_NAME}_${abra__volume_}" --format "{{.Name}}")
|
|
|
|
if [ -z "$NAMES" ]; then
|
|
error "Could not find any volumes under ${STACK_NAME}_${abra__volume_}"
|
|
fi
|
|
|
|
warning "About to delete volume(s) $(echo "$NAMES" | paste -d "")"
|
|
prompt_confirm
|
|
|
|
for NAME in ${NAMES}; do
|
|
docker volume rm "$NAME" > /dev/null
|
|
|
|
done
|
|
}
|
|
|
|
###### .. app run
|
|
help_app_run (){
|
|
echo "abra [options] app <app> run [--no-tty] [--user=<user>] <service> <args>...
|
|
|
|
Run <args>... (often something like 'bash' or 'sh') in <app>'s <service>
|
|
container.
|
|
|
|
OPTIONS
|
|
--no-tty Don't allocate a TTY; sometimes running \`mysql\` enjoys this
|
|
--user=<user> Run as the UNIX user <user>, e.g. for running Wordpress-CLI
|
|
as www-data
|
|
|
|
EXAMPLES
|
|
abra wordpress_foo_bar run app bash
|
|
|
|
POWERED BY
|
|
CONTAINER_ID=\$(docker container ls -f ...)
|
|
docker exec \$CONTAINER_ID ..."
|
|
}
|
|
|
|
sub_app_run(){
|
|
if [ -n "$abra___user" ]; then
|
|
RUN_USER="-u $abra___user"
|
|
fi
|
|
|
|
if [ "$abra___no_tty" = "true" ]; then
|
|
ARGS="-i"
|
|
else
|
|
ARGS="-it"
|
|
fi
|
|
|
|
CONTAINER=$(docker container ls --format "table {{.ID}},{{.Names}}" \
|
|
| grep "${STACK_NAME}_${abra__service_}" | head -n1 | cut -d',' -f1)
|
|
|
|
if [ -z "$CONTAINER" ]; then
|
|
error "Can't find a container for ${STACK_NAME}_${abra__service_}"
|
|
exit
|
|
fi
|
|
|
|
debug "Using container ID ${CONTAINER}"
|
|
|
|
# 3wc: we want the "splitting" that shellcheck warns us about, so that -u and
|
|
# $RUN_USER aren't treated as a single argument:
|
|
# shellcheck disable=SC2086
|
|
docker exec $RUN_USER $ARGS "$CONTAINER" "$@"
|
|
|
|
return
|
|
}
|
|
|
|
###### .. app rollback
|
|
help_app_rollback (){
|
|
echo "abra [options] app <app> rollback [<version>]
|
|
|
|
Roll back a deployed app to a previous version.
|
|
|
|
You can specify a particular <version>; see \`abra recipe <recipe> version\` for
|
|
the list of options.
|
|
|
|
Otherwise, we'll roll back to the second-most-recent available version.
|
|
|
|
EXAMPLES
|
|
abra app wordpress rollback
|
|
|
|
POWERED BY
|
|
abra app <app> deploy <version> --update"
|
|
}
|
|
|
|
sub_app_rollback(){
|
|
version="${abra__version_}"
|
|
|
|
get_recipe_versions "$TYPE"
|
|
if [ "${#RECIPE_VERSIONS[@]}" -lt 2 ]; then
|
|
error "Can't roll back; need 2 versions, ${#RECIPE_VERSIONS[@]} available"
|
|
fi
|
|
|
|
if [ -z "$version" ]; then
|
|
version="${RECIPE_VERSIONS[-2]}"
|
|
info "Guessed version $version"
|
|
fi
|
|
|
|
# FIXME 3wc: check if $version is actually older than what's deployed
|
|
|
|
abra__version_="$version"
|
|
abra___update="true"
|
|
|
|
sub_app_deploy
|
|
}
|
|
|
|
###### .. app logs
|
|
help_app_logs (){
|
|
echo "abra [options] app <app> logs [<service>]
|
|
|
|
Show logs for <app>.
|
|
|
|
OPTIONS
|
|
<service> Only show logs for a specific service (default: combine all
|
|
services)
|
|
|
|
EXAMPLES
|
|
abra wordpress_foo_bar logs app
|
|
|
|
POWERED BY
|
|
docker service logs"
|
|
}
|
|
|
|
sub_app_logs (){
|
|
SERVICE="${abra__service_}"
|
|
|
|
if [ -z "$SERVICE" ]; then
|
|
stack_logs "${STACK_NAME}"
|
|
return
|
|
fi
|
|
|
|
shift
|
|
|
|
if [ $# -eq 0 ]; then
|
|
LOGS_ARGS="\
|
|
--follow \
|
|
--tail 20 \
|
|
--no-trunc \
|
|
--details \
|
|
--timestamps"
|
|
else
|
|
# shellcheck disable=SC2124
|
|
LOGS_ARGS=$@
|
|
fi
|
|
|
|
# shellcheck disable=SC2086
|
|
docker service logs "${STACK_NAME}_${SERVICE}" $LOGS_ARGS
|
|
}
|
|
|
|
###### .. app cp
|
|
help_app_cp (){
|
|
echo "abra [options] app <app> cp <src> <dst>
|
|
|
|
Copy files to or from a running container.
|
|
|
|
One of <src> or <dst> must have the format <service>:<path>.
|
|
|
|
Copying multiple files is possible using \`tar\`, see EXAMPLES.
|
|
|
|
If <dst> is a file then it will be over-written, if it is a folder then <src>
|
|
will be copied into it.
|
|
|
|
EXAMPLES
|
|
abra app customhtml_foo_bar_com cp index.html app:/usr/share/nginx/html/
|
|
tar cf - wp-content | abra app wordpress_bar_bat_com cp - app:/var/www/html/
|
|
|
|
POWERED BY
|
|
CONTAINER_ID=\$(docker container ls -f ...)
|
|
docker cp \$CONTAINER_ID:<src> <dst>
|
|
docker cp \$CONTAINER_ID:<dst> <src>"
|
|
}
|
|
|
|
sub_app_cp() {
|
|
SOURCE="${abra__src_}"
|
|
DEST="${abra__dst_}"
|
|
|
|
# Get the service name from either SOURCE or DEST
|
|
SERVICE=$(echo "$SOURCE" | grep -o '^[^:]\+:' || echo "$DEST" | grep -o '^[^:]\+:')
|
|
SERVICE=$(echo "$SERVICE" | tr -d ':')
|
|
|
|
if [ -z "$SERVICE" ]; then
|
|
echo "Usage: $PROGRAM_NAME cp SERVICE:SRC_PATH DEST_PATH"
|
|
echo " $PROGRAM_NAME cp SRC_PATH SERVICE:DEST_PATH"
|
|
echo ""
|
|
error "Can't find SERVICE in either SRC or DEST"
|
|
fi
|
|
|
|
CONTAINER=$(docker container ls --format "table {{.ID}},{{.Names}}" \
|
|
| grep "${STACK_NAME}_${SERVICE}" | cut -d',' -f1)
|
|
|
|
if [ -z "$CONTAINER" ]; then
|
|
error "Can't find a container for ${STACK_NAME}_${SERVICE}"
|
|
exit
|
|
fi
|
|
|
|
debug "Using container ID ${CONTAINER}"
|
|
|
|
# Replace $SERVICE with $CONTAINER in the original args
|
|
CP_ARGS=$(echo "$SOURCE $DEST" | sed "s/$SERVICE:/$CONTAINER:/")
|
|
# FIXME 3wc: this might cause problems for filenames with spaces..
|
|
|
|
# shellcheck disable=SC2086
|
|
docker cp ${CP_ARGS}
|
|
}
|
|
|
|
#######################################
|
|
# abra recipe ..
|
|
#######################################
|
|
|
|
###### .. recipe create
|
|
help_recipe_create() {
|
|
echo "abra [options] recipe create <recipe>
|
|
|
|
Create a new application recipe called <recipe>."
|
|
}
|
|
|
|
sub_recipe_create() {
|
|
recipe="$abra__recipe_"
|
|
recipe_kebab="${recipe//-/_}"
|
|
|
|
recipe_dir="$ABRA_DIR/apps/$recipe"
|
|
|
|
if [ -d "$recipe_dir" ]; then
|
|
error "Recipe '$recipe' already exists"
|
|
fi
|
|
|
|
cd "$ABRA_DIR/apps/" || error "Can't find '$ABRA_DIR/apps/'"
|
|
|
|
git clone -q "${GIT_URL}example" "$recipe_dir"
|
|
|
|
cd "$recipe_dir" || error "Failed to create directory '$recipe_dir'"
|
|
|
|
rm -rf .git .gitea .drone.yml
|
|
|
|
sed -i "s/\${REPO_NAME}/$recipe/g" README.md
|
|
sed -i "s/\${REPO_NAME_TITLE}/$recipe/g" README.md
|
|
sed -i "s/\${REPO_NAME_KEBAB}/$recipe_kebab/g" .env.sample
|
|
|
|
success "New recipe created in '$recipe_dir', happy hacking! 👌"
|
|
}
|
|
|
|
###### .. recipe ls
|
|
help_recipe_ls (){
|
|
help_recipe_list
|
|
}
|
|
|
|
sub_recipe_ls() {
|
|
sub_recipe_list
|
|
}
|
|
|
|
help_recipe_list() {
|
|
echo "abra [options] recipe (list|ls)
|
|
|
|
List all available recipes."
|
|
}
|
|
|
|
sub_recipe_list() {
|
|
require_apps_json
|
|
get_recipes
|
|
|
|
printf "%s delicious recipes:\n" "${#RECIPES[@]}"
|
|
printf '%s\n' "${RECIPES[@]}"
|
|
}
|
|
|
|
###### .. recipe <recipe> versions
|
|
help_recipe_versions() {
|
|
echo "abra [options] recipe versions
|
|
|
|
Show all available versions of <recipe>."
|
|
}
|
|
|
|
sub_recipe_versions() {
|
|
require_apps_json
|
|
get_recipe_versions "$abra__recipe_"
|
|
|
|
printf "%s thrilling versions of $abra__recipe_:\n" "${#RECIPE_VERSIONS[@]}"
|
|
|
|
for version in "${RECIPE_VERSIONS[@]}"; do
|
|
recipe_version_data=$($JQ -r ".\"${abra__recipe_}\".versions.\"${version}\"" "$ABRA_APPS_JSON")
|
|
mapfile -t services < <(echo "$recipe_version_data" | $JQ -r ". | keys | .[]" -)
|
|
printf '%s:\n' "$version"
|
|
for service in "${services[@]}"; do
|
|
image=$(echo "$recipe_version_data" | $JQ -r ".$service.image" -)
|
|
tag=$(echo "$recipe_version_data" | $JQ -r ".$service.tag" -)
|
|
digest=$(echo "$recipe_version_data" | $JQ -r ".$service.digest" -)
|
|
printf ' - %s (%s:%s, %s)\n' "$service" "$image" "$tag" "$digest"
|
|
done
|
|
done
|
|
}
|
|
|
|
###### .. recipe <recipe> release
|
|
help_recipe_release() {
|
|
echo "abra [options] recipe <recipe> release [--force] [--bump]
|
|
|
|
(For recipe maintainers)
|
|
|
|
Make sure the service labels and git tags for <recipe> are in sync with the
|
|
specified image tags.
|
|
|
|
Run this after you or comrade \`renovate-bot\` have bumped the version of
|
|
any of the images in <recipe>.
|
|
|
|
OPTIONS
|
|
--force Over-write existing tag; use this if you have a git tag for the
|
|
recipe version already, to make sure labels are in sync.
|
|
--bump Make an n+1 release (packager specific changes to recipe)
|
|
|
|
POWERED BY
|
|
skopeo inspect docker://image:tag
|
|
git commit
|
|
git tag"
|
|
}
|
|
|
|
sub_recipe_release() {
|
|
require_apps_json
|
|
require_binary skopeo
|
|
require_yq
|
|
|
|
recipe="$abra__recipe_"
|
|
force="$abra___force"
|
|
bump="$abra___bump"
|
|
recipe_dir="$ABRA_DIR/apps/$recipe"
|
|
|
|
if [ "$bump" = "true" ] && [ "$force" = "true" ]; then
|
|
error "--bump and --force don't play nicely together"
|
|
fi
|
|
|
|
cd "$recipe_dir" || error "Can't find recipe dir '$recipe_dir'"
|
|
|
|
get_recipe_versions "$recipe"
|
|
|
|
if [ "${#RECIPE_VERSIONS[@]}" -gt 0 ]; then
|
|
latest_version="${RECIPE_VERSIONS[-1]}"
|
|
if [ "$force" = "true" ]; then
|
|
latest_version_message=$(git tag -l "$latest_version" --format='%(contents)')
|
|
else
|
|
latest_version_message=$(git show -s --format=%s)
|
|
fi
|
|
info "Latest available version: '$latest_version'"
|
|
info "Latest version message: '$latest_version_message'"
|
|
else
|
|
latest_version=""
|
|
latest_version_message="Initial tagged release"
|
|
info "No previous releases found"
|
|
if [ "$bump" = "true" ]; then
|
|
error "--bump can't do its work when there are no existing release versions"
|
|
fi
|
|
fi
|
|
|
|
current_tag=$(git tag --points-at HEAD)
|
|
if [ "$force" = "false" ] && [ -n "$current_tag" ] && [ "$bump" = "false" ]; then
|
|
success "$recipe is already on $current_tag, no release needed"
|
|
fi
|
|
|
|
if [ "$(git rev-parse --abbrev-ref --symbolic-full-name HEAD)" = "HEAD" ]; then
|
|
warning "It looks like $recipe_dir is in 'detached HEAD' state"
|
|
if [ "$abra___no_prompt" = "false" ]; then
|
|
read -rp "Check out main/master branch first? [Y/n] "
|
|
if [ "${choice,,}" != "n" ]; then
|
|
checkout_main_or_master
|
|
fi
|
|
else
|
|
checkout_main_or_master
|
|
fi
|
|
fi
|
|
|
|
mapfile -t extra_compose_files < <(ls -- compose.*.yml 2> /dev/null || true)
|
|
|
|
compose_files=("compose.yml" "${extra_compose_files[@]}")
|
|
|
|
new_version="false"
|
|
|
|
for compose_file in "${compose_files[@]}"; do
|
|
if [ "$bump" = "true" ]; then
|
|
continue # skip trying to upgrade labels for --bump logic
|
|
fi
|
|
|
|
mapfile -t services < <($YQ e -N '.services | keys | .[]' "$compose_file" | sort -u)
|
|
|
|
if [ "$compose_file" = "compose.yml" ] && ! printf '%s\0' "${services[@]}" | grep -Fqxz -- "app"; then
|
|
# shellcheck disable=SC2016
|
|
warning 'No `app` service found; which one should we use for the version number?'
|
|
select service_item in "${services[@]##*/}"; do
|
|
if [ 1 -le "$REPLY" ] && [ "$REPLY" -le ${#services[@]} ]; then
|
|
main_service="$service_item"
|
|
success "Selected ${service_item} as main service"
|
|
break
|
|
fi
|
|
done
|
|
else
|
|
main_service="app"
|
|
fi
|
|
|
|
for service in "${services[@]}"; do
|
|
# 3wc: skip the "app" service unless we're in compose.yml; this service is
|
|
# often repeated in other compose.*.yml files to extend options, but we only
|
|
# want to add the deploy.label in one definition
|
|
# TODO 3wc: make this smarter, what if a separate compose file extends
|
|
# other services too?
|
|
if [ "$compose_file" != "compose.yml" ] && [ "$service" = "app" ]; then
|
|
debug "Skipping '$service'"
|
|
continue
|
|
fi
|
|
debug "Processing '$service'"
|
|
|
|
service_image=$($YQ e ".services.$service.image" "$compose_file")
|
|
service_tag="${service_image##*:}"
|
|
|
|
if [ -n "$latest_version" ]; then
|
|
latest_data=$($JQ ".\"$recipe\".versions.\"$latest_version\".\"$service\"" "$ABRA_APPS_JSON")
|
|
latest_tag="$(echo "$latest_data" | $JQ -r ".tag" -)"
|
|
fi
|
|
|
|
if [ -z "$latest_version" ] || [ "$force" = "true" ] || [ "$service_tag" != "$latest_tag" ]; then
|
|
if [ "$service" = "$main_service" ]; then
|
|
new_version="$service_tag"
|
|
fi
|
|
info "Fetching $service_image metadata from Docker Hub"
|
|
service_data=$(skopeo inspect "docker://$service_image")
|
|
service_digest=$(echo "$service_data" | $JQ -r '.Digest' | cut -d':' -f2 | cut -c-8)
|
|
|
|
label="coop-cloud.\${STACK_NAME}.$service.version=${service_tag}-${service_digest}"
|
|
|
|
debug "Replacing version label on $service with $label"
|
|
|
|
# delete old label, if one exists
|
|
$YQ eval -i "del(.services.$service.deploy.labels.[] | select(. == \"coop*\"))" "$compose_file"
|
|
# add new label
|
|
$YQ eval -i ".services.$service.deploy.labels += [\"$label\"]" "$compose_file"
|
|
else
|
|
debug "no updates for '$service_image'"
|
|
fi
|
|
done
|
|
done
|
|
|
|
if [ "$new_version" = "false" ]; then
|
|
# $main_service tag hasn't changed, just bump release
|
|
if echo "$latest_version" | grep -q '_'; then
|
|
latest_version_minor="${latest_version##*_}"
|
|
else
|
|
latest_version_minor=0
|
|
fi
|
|
new_version_minor="$((latest_version_minor + 1))"
|
|
new_version="${latest_version%_*}_$new_version_minor"
|
|
fi
|
|
|
|
debug "Calculated new version $new_version"
|
|
|
|
if [ -n "$latest_version" ] && [ "$force" = "false" ] && [ "$new_version" = "$latest_version" ]; then
|
|
error "Hmm, something went wrong generating a new version number.."
|
|
fi
|
|
|
|
success "All compose files updated; new version is $new_version"
|
|
|
|
if [ "$abra___no_prompt" = "false" ] && [ "$bump" = "false" ]; then
|
|
read -rp "Commit your changes to git? [y/N]? " choice
|
|
|
|
if [ "${choice,,}" != "y" ]; then
|
|
return
|
|
fi
|
|
fi
|
|
|
|
if [ "$abra___no_prompt" = "false" ] && [ "$bump" = "false" ]; then
|
|
git commit -avem "Version $new_version; sync labels" || exit
|
|
else
|
|
git commit -am "Version $new_version; sync labels" || true
|
|
fi
|
|
|
|
if [ "$abra___no_prompt" = "false" ]; then
|
|
read -rp "Tag this as \`$new_version\`? [y/N]? " choice
|
|
|
|
if [ "${choice,,}" != "y" ]; then
|
|
return
|
|
fi
|
|
fi
|
|
|
|
if [ "$force" = "true" ]; then
|
|
git tag -d "$new_version" || true
|
|
git push origin --delete "$new_version" || true
|
|
debug "Deleted local tag and remote tag if present"
|
|
fi
|
|
|
|
if [ "$abra___no_prompt" = "false" ]; then
|
|
git tag -aem "$latest_version_message" "$new_version"
|
|
else
|
|
git tag -am "$latest_version_message" "$new_version" || true
|
|
fi
|
|
|
|
if [ "$abra___no_prompt" = "false" ]; then
|
|
read -rp "Git push this new tag? [y/N]? " choice
|
|
|
|
if [ "${choice,,}" = "y" ]; then
|
|
git push && git push --tags
|
|
fi
|
|
else
|
|
git push && git push --tags
|
|
fi
|
|
}
|
|
|
|
#######################################
|
|
# abra server ..
|
|
#######################################
|
|
|
|
###### .. server ls
|
|
help_server_ls (){
|
|
help_server_list
|
|
}
|
|
|
|
sub_server_ls() {
|
|
sub_server_list
|
|
}
|
|
|
|
help_server_list (){
|
|
echo "abra [options] server (list|ls)
|
|
|
|
List locally-defined servers."
|
|
}
|
|
|
|
sub_server_list() {
|
|
get_servers
|
|
|
|
warning "Loading status from ${#SERVERS[@]} server(s), patience advised.."
|
|
printf "%s servers:\n\n" "${#SERVERS[@]}"
|
|
|
|
local -a idx=0
|
|
for SERVER in "${SERVERS[@]}"; do
|
|
if [[ "$idx" == 0 ]]; then
|
|
printf " NAME\tCONNECTION\n"
|
|
printf " --\t--\t\n"
|
|
fi
|
|
|
|
name="${SERVER##*/}"
|
|
host=$(docker context inspect "$name" -f "{{.Endpoints.docker.Host}}" 2>/dev/null)
|
|
printf " %s\t%s\n" "$name" "${host:-UNKNOWN}"
|
|
|
|
idx+=1
|
|
done | column -s' ' -t
|
|
}
|
|
|
|
###### .. server init
|
|
help_server_init (){
|
|
echo "abra [options] server init
|
|
|
|
Set up a server for Docker swarm joy. This initialisation explicitly chooses
|
|
for the \"single host swarm\" mode which uses the default IPv4 address as the
|
|
advertising address. This can be re-configured later for more advanced use
|
|
cases.
|
|
|
|
POWERED BY
|
|
docker swarm init
|
|
docker network create ..."
|
|
}
|
|
|
|
sub_server_init() {
|
|
export DOCKER_CONTEXT="${abra__host_}"
|
|
|
|
load_context
|
|
|
|
# Note(decentral1se): it sucks to use Google DNS but it seems like a reliable method
|
|
# for determining the default IPv4 address especially nowadays
|
|
# when there are often multiple internal addresses assigned to eth0
|
|
default_ipv4="$(ip route get 8.8.8.8 | head -1 | awk '{print $7}')"
|
|
|
|
if [ "$abra___debug" = "true" ]; then
|
|
DOCKER_ENDPOINT=$(docker context inspect "$DOCKER_CONTEXT" -f "{{.Endpoints.docker.Host}}" 2>/dev/null)
|
|
debug "Connecting to $DOCKER_CONTEXT via SSH ($DOCKER_ENDPOINT)"
|
|
fi
|
|
|
|
docker swarm init --advertise-addr "$default_ipv4" || true
|
|
docker network create --driver=overlay proxy --scope swarm || true
|
|
}
|
|
|
|
###### .. server add
|
|
help_server_add (){
|
|
echo "abra [options] server add <host> [<user>] [<port>]
|
|
|
|
Add a server, reachable on <host>.
|
|
|
|
OPTIONS
|
|
<user>, <port> SSH connection details
|
|
|
|
POWERED BY
|
|
docker context create ..."
|
|
}
|
|
|
|
sub_server_add() {
|
|
require_abra_dir
|
|
|
|
HOST="$abra__host_"
|
|
USERNAME="$abra__user_"
|
|
PORT="$abra__port_"
|
|
|
|
if [ -n "$PORT" ]; then
|
|
PORT=":$PORT"
|
|
fi
|
|
|
|
if [ -n "$USERNAME" ]; then
|
|
USERNAME="$USERNAME@"
|
|
fi
|
|
|
|
docker context create "$HOST" \
|
|
--docker "host=ssh://$USERNAME$HOST$PORT" \
|
|
|| true
|
|
|
|
mkdir -p "$ABRA_DIR/servers/$HOST"
|
|
}
|
|
|
|
###### .. server new
|
|
help_server_new (){
|
|
echo "abra [options] server new <provider> -- \"<args>\"
|
|
|
|
Use a provider plugin to create an actual new server resource (VPS or
|
|
otherwise) which can then be used to house a new Co-op Cloud installation.
|
|
|
|
OPTIONS
|
|
<provider> Provider plugin for creating new server (choices: hetzner)"
|
|
}
|
|
|
|
sub_server_new() {
|
|
require_abra_dir
|
|
require_jq
|
|
|
|
PROVIDER="$abra__provider_"
|
|
|
|
if [ "$PROVIDER" != "hetzner" ]; then
|
|
error "Unknown provider plugin 'abra-${PROVIDER}'"
|
|
fi
|
|
|
|
if [ ! -d "$ABRA_DIR/plugins/abra-$PROVIDER" ]; then
|
|
require_plugin "abra-$PROVIDER"
|
|
fi
|
|
|
|
# shellcheck disable=SC1090
|
|
# shellcheck disable=SC2086
|
|
source "$ABRA_DIR/plugins/abra-$PROVIDER/abra-$PROVIDER" $abra__args_
|
|
}
|
|
|
|
###### .. server delete
|
|
help_server_rm (){
|
|
help_server_delete
|
|
}
|
|
|
|
sub_server_rm() {
|
|
sub_server_delete
|
|
}
|
|
|
|
help_server_delete (){
|
|
echo "abra [options] server <host> delete
|
|
|
|
Remove server <host>
|
|
|
|
POWERED BY
|
|
docker context rm ..."
|
|
}
|
|
|
|
sub_server_delete() {
|
|
docker context rm "$abra__host_"
|
|
}
|
|
|
|
###### .. server <host> apps
|
|
help_server_apps (){
|
|
echo "abra [options] server <host> apps [--status]
|
|
|
|
Alias for \`abra app ls --server=<host>.
|
|
|
|
OPTIONS
|
|
--status Show whether apps are deployed (warning! slow!)
|
|
|
|
POWERED BY (for --status)
|
|
docker stack ls"
|
|
}
|
|
|
|
sub_server_apps() {
|
|
abra___server="$abra__host_"
|
|
sub_app_list
|
|
}
|
|
|
|
#######################################
|
|
# Misc commands
|
|
#######################################
|
|
|
|
###### .. upgrade
|
|
help_upgrade (){
|
|
echo "abra [options] upgrade [--dev]
|
|
|
|
Upgrade abra itself, using the online installer script.
|
|
|
|
OPTIONS
|
|
--dev Upgrade to the latest development version (HEAD)"
|
|
}
|
|
|
|
sub_upgrade() {
|
|
if [[ "$abra___dev" == "true" ]]; then
|
|
curl https://install.abra.coopcloud.tech | bash -s -- --dev
|
|
else
|
|
curl https://install.abra.coopcloud.tech | bash
|
|
fi
|
|
}
|
|
|
|
###### .. version
|
|
help_version (){
|
|
echo "abra [options] version
|
|
|
|
Show the installed version of abra."
|
|
}
|
|
|
|
sub_version() {
|
|
if [ -L "$0" ] && [ -e "$0" ]; then
|
|
ABRA_SRC=$(readlink "$0")
|
|
ABRA_DIGEST=$(cd "${ABRA_SRC%/*}" && git rev-parse --short HEAD)
|
|
fi
|
|
echo "$ABRA_VERSION${ABRA_DIGEST:+-}${ABRA_DIGEST}"
|
|
}
|
|
|
|
###### .. doctor
|
|
help_doctor (){
|
|
echo "abra [options] doctor
|
|
|
|
Help diagnose setup issues."
|
|
}
|
|
|
|
sub_doctor() {
|
|
require_docker_version
|
|
success "Hurrah! Everything is in working order!"
|
|
}
|
|
|
|
###### .. help
|
|
help_help (){
|
|
echo "HEEEEEELP! 😱"
|
|
}
|
|
|
|
sub_help() {
|
|
SUBCOMMAND=$(IFS="_"; echo "${abra__subcommands_[*]}")
|
|
if [ -z "$SUBCOMMAND" ]; then
|
|
printf "%s" "$DOC"
|
|
exit
|
|
fi
|
|
HELP_CMD="help_${SUBCOMMAND}"
|
|
if type "$HELP_CMD" > /dev/null 2>&1; then
|
|
"$HELP_CMD"
|
|
else
|
|
HELP_COMMANDS=$(declare -Ff | grep 'help_' | cut -d' ' -f3 | sed 's/_/ /g')
|
|
error "No help found for '$abra__subcommands_'
|
|
|
|
Try one of these:
|
|
${HELP_COMMANDS//help /}"
|
|
fi
|
|
}
|
|
|
|
#######################################
|
|
# cheeky docker aliases
|
|
#######################################
|
|
|
|
###### .. stack <args>...
|
|
sub_stack() {
|
|
# shellcheck disable=SC2068
|
|
docker stack $@
|
|
}
|
|
|
|
###### .. volume <args>...
|
|
sub_volume() {
|
|
# shellcheck disable=SC2068
|
|
docker volume $@
|
|
}
|
|
|
|
###### .. network <args>...
|
|
sub_network() {
|
|
# shellcheck disable=SC2068
|
|
docker network $@
|
|
}
|
|
|
|
#######################################
|
|
# Main
|
|
#######################################
|
|
|
|
abra() {
|
|
require_bash_4
|
|
|
|
if ! type tput > /dev/null 2>&1; then
|
|
tput() {
|
|
echo -n
|
|
}
|
|
fi
|
|
|
|
DOCOPT_PREFIX=abra_
|
|
DOCOPT_ADD_HELP=false
|
|
eval "$(docopt "$@")"
|
|
|
|
# --stack <stack>
|
|
STACK_NAME=$abra___stack
|
|
|
|
# --env <env>
|
|
if [ -n "$abra___env" ]; then
|
|
set -a
|
|
# shellcheck disable=SC1090
|
|
source "$abra___env" || error "Unable to load env from '$abra___env'"
|
|
set +a
|
|
fi
|
|
|
|
if [ -n "$abra__app_" ]; then
|
|
load_instance
|
|
load_instance_env
|
|
require_apps_json
|
|
fi
|
|
|
|
load_abra_sh
|
|
|
|
# Search for sub_* functions, and check if any of them matches enabled
|
|
# arguments (i.e. is a command and is specified). The `awk / sort` sorts by
|
|
# the number of occurrences of '_' in the function name, to ensure that
|
|
# `abra app <app> version` will be matched before `abra version`.
|
|
SUBCOMMANDS=$(declare -Ff | grep 'sub_' | cut -d' ' -f3 | awk '{ print gsub("_","&"), $0 }' | sort -n -r | cut -d" " -f2-)
|
|
for SUBCOMMAND in $SUBCOMMANDS; do
|
|
IFS='_' read -r -a PARTS <<< "$SUBCOMMAND"
|
|
for PART in "${PARTS[@]:1}"; do
|
|
# TODO 3wc: probably a better way to check if a variable is defined..
|
|
VAR=$(eval "echo \$abra_$PART")
|
|
if [ ! "$VAR" == "true" ]; then
|
|
continue 2
|
|
fi
|
|
done
|
|
abra__command_=$(IFS="_"; echo "${PARTS[*]:1}")
|
|
break
|
|
done
|
|
|
|
if [ "$abra___help" = "true" ]; then
|
|
if [ -z "$abra__command_" ]; then
|
|
# shellcheck disable=SC2059
|
|
printf "$DOC"
|
|
exit
|
|
elif type "help_${abra__command_}" > /dev/null 2>&1; then
|
|
"help_${abra__command_}"
|
|
exit
|
|
else
|
|
error "No help for '$abra__command_'"
|
|
fi
|
|
fi
|
|
|
|
# Use abra__command_ in case `command` is provided (i.e. `volume` or `stack`)
|
|
CMD="sub_${abra__command_}"
|
|
if type "$CMD" > /dev/null 2>&1; then
|
|
# shellcheck disable=SC2086,SC2048
|
|
"$CMD" ${abra__args_[*]}
|
|
else
|
|
docopt_exit
|
|
fi
|
|
}
|
|
|
|
abra "$@"
|