Compare commits

..

1 Commits

Author SHA1 Message Date
3wc
08d937104e Possible fix to invalid length "20"? 2021-05-04 08:10:54 +02:00
21 changed files with 2192 additions and 882 deletions

View File

@ -18,60 +18,32 @@ steps:
commands:
- bats tests
- name: publish image
image: plugins/docker
settings:
auto_tag: true
username: thecoopcloud
password:
from_secret: thecoopcloud_password
repo: thecoopcloud/abra
tags: latest
depends_on:
- run shellcheck
- run flake8
- run unit tests
when:
event:
exclude:
- pull_request
- name: collect code coverage
failure: ignore # until we fix this
image: decentral1se/docker-dind-bats-kcov
commands:
- kcov . bats tests || true
- name: trigger downstream builds
image: plugins/downstream
- name: send code coverage report to codecov
failure: ignore # until we fix this
image: plugins/codecov
settings:
server: https://drone.autonomic.zone
token:
from_secret: decentral1se_token
fork: true
repositories:
- coop-cloud/drone-abra
depends_on:
- run shellcheck
- run flake8
- run unit tests
- publish image
when:
event:
exclude:
- pull_request
from_secret: codecov_token
required: true
- name: notify on failure
image: plugins/matrix
- name: notify rocket chat
image: plugins/slack
settings:
homeserver: https://matrix.autonomic.zone
roomid: "IFazIpLtxiScqbHqoa:autonomic.zone"
userid: "@autono-bot:autonomic.zone"
accesstoken:
from_secret: autono_bot_access_token
depends_on:
- run shellcheck
- run flake8
- run unit tests
- publish image
- trigger downstream builds
webhook:
from_secret: rc_builds_url
username: comradebritney
channel: "internal.builds"
template: "{{repo.owner}}/{{repo.name}} build failed: {{build.link}}"
when:
status:
- failure
trigger:
branch:
- main

3
.gitignore vendored
View File

@ -1,5 +1,2 @@
*.json
*.pyc
/.venv
__pycache__
coverage/

View File

@ -9,49 +9,9 @@
# abra x.x.x (UNRELEASED)
- Add `--bump` to `deploy` command to allow packagers to make minor package related releases ([#173](https://git.autonomic.zone/coop-cloud/abra/issues/173))
- Drop `--skip-version-check`/`--no-domain-poll`/`--no-state-poll` in favour of `--fast` ([#169](https://git.autonomic.zone/coop-cloud/abra/issues/169))
- Move `abra` image under the new `thecoopcloud/...` namespace ([#1](https://git.autonomic.zone/coop-cloud/auto-apps-json/issues/1))
- Add a `--output` flag to the `app-json.py` app generator for the CI environment ([#2](https://git.autonomic.zone/coop-cloud/auto-apps-json/issues/2))
- Support logging in as new `thecoopcloud` Docker account via `skopeo` when generating new `apps.json` ([7482362af1](https://git.autonomic.zone/coop-cloud/abra/commit/7482362af1d01cc02828abd45b1222fa643d1f80))
- App deployment checks are somewhat more reliable (see [#193](https://git.autonomic.zone/coop-cloud/abra/issues/193) for remaining work) ([#165](https://git.autonomic.zone/coop-cloud/abra/issues/165))
# abra 9.0.0 (2021-06-10)
- Add Docker image for `abra` ([64d578cf91](https://git.autonomic.zone/coop-cloud/abra/commit/64d578cf914bd2bad378ea4ef375747d10b33191))
- Support unattended mode for recipe releasing ([3759bcd641](https://git.autonomic.zone/coop-cloud/abra/commit/3759bcd641cf60611c13927e83425e773d2bb629))
- Add Renovate bot configuraiton script ([9fadc430a7](https://git.autonomic.zone/coop-cloud/abra/commit/9fadc430a7bb2d554c0ee26c0f9b6c51dc5b0475))
- Add release automation via [drone-abra](https://git.autonomic.zone/coop-cloud/drone-abra) ([#56](https://git.autonomic.zone/coop-cloud/organising/issues/56))
- Move `apps.json` generation to [auto-apps-json](https://git.autonomic.zone/coop-cloud/auto-apps-json) ([#125](https://git.autonomic.zone/coop-cloud/abra/issues/125))
- Add Github mirroring script ([4ef433312d](https://git.autonomic.zone/coop-cloud/abra/commit/4ef433312dd0b0ace91b3c285f82f3973093d92d))
- Add `--chaos` flag to deploy (always choose latest Git commit) ([#178](https://git.autonomic.zone/coop-cloud/abra/issues/178))
# abra 8.0.1 (2021-05-31)
- Fix help for `... app ... volume ls` ([efad71c470](https://git.autonomic.zone/coop-cloud/abra/commits/branch/main))
- Only output secrets warnings once ([#143](https://git.autonomic.zone/coop-cloud/abra/issues/143))
- Migrate `abra` installation script to `coopcloud.tech` domain ([#150](https://git.autonomic.zone/coop-cloud/abra/issues/150))
- Add `--no-state-poll` to avoid success/failure forecasting on deployment ([#165](https://git.autonomic.zone/coop-cloud/abra/issues/165))
# abra 8.0.0 (2021-05-30)
- Fix secret length generation ([f537417](https://git.autonomic.zone/coop-cloud/abra/commit/1b85bf3d37280e9632c315d759c0f2d09c039fef))
- Fix checking out new apps ([#164](https://git.autonomic.zone/coop-cloud/abra/issues/164)
- Give up if YAML is invalid ([#154](https://git.autonomic.zone/coop-cloud/abra/issues/154))
- Switch from wget to cURL ([fc0caaa](https://git.autonomic.zone/coop-cloud/abra/commit/fc0caaa))
- Add Bash completion for `recipe ..` ([8c93d1a](https://git.autonomic.zone/coop-cloud/abra/commit/8c93d1a))
- Tweak README parsing in `app-json.py` ([b14219b](https://git.autonomic.zone/coop-cloud/abra/commit/b14219b))
- Add fallback names to `app.json` ([#157](https://git.autonomic.zone/coop-cloud/abra/issues/157))
- Remove duplicate message ([#155](https://git.autonomic.zone/coop-cloud/abra/issues/155))
- Add `deploy --fast` ([a7f7c96](https://git.autonomic.zone/coop-cloud/abra/commit/a7f7c96))
- Add `app .. volume` commands, fix volume deletion with `app .. delete --volumes` ([#161](https://git.autonomic.zone/coop-cloud/abra/issues/161))
# abra 0.7.4 (2021-05-10)
- Sort `apps.json` when publishing ([39a7fc0](https://git.autonomic.zone/coop-cloud/abra/commit/39a7fc04fb5df1a6d78b84f51838530ab3eb76db))
- Fix publishing of rating for new apps ([0e28af9](https://git.autonomic.zone/coop-cloud/abra/commit/0e28af9eb1af6c6da705b4614ddd173c60576629))
- Detect compose filenames in `n+1` release generation ([ffc569e](https://git.autonomic.zone/coop-cloud/abra/commit/ffc569e275df7ca784a4db1a3331e17975fd8c87))
- Fix secret generation when specifying length ([3a353f4](https://git.autonomic.zone/coop-cloud/abra/commit/3a353f4062baccde2c9f175b03afb2db6d462ae4))
# abra 0.7.3 (2021-04-28)

View File

@ -1,33 +0,0 @@
FROM alpine:latest
RUN apk add --upgrade --no-cache \
bash \
curl \
git \
grep \
openssh-client \
py3-requests \
skopeo \
util-linux
RUN mkdir -p ~./local/bin
RUN mkdir -p ~/.abra/apps
RUN mkdir -p ~/.abra/vendor
RUN mkdir -p ~/.ssh/
RUN ssh-keyscan -p 2222 git.autonomic.zone > ~/.ssh/known_hosts
RUN curl -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 --output ~/.abra/vendor/jq
RUN chmod +x ~/.abra/vendor/jq
RUN curl -L https://github.com/mikefarah/yq/releases/download/v4.9.3/yq_linux_amd64 --output ~/.abra/vendor/yq
RUN chmod +x ~/.abra/vendor/yq
# Note(decentral1se): it is fine to always use the development branch because
# our Drone CI docker auto-tagger will publish official release tags and
# otherwise give us the latest abra on the latest tag
RUN curl https://install.abra.coopcloud.tech | bash -s -- --dev
COPY bin/* /root/.local/bin/
ENTRYPOINT ["/root/.local/bin/abra"]

View File

@ -1,10 +1,11 @@
# abra
[![Build Status](https://drone.autonomic.zone/api/badges/coop-cloud/abra/status.svg)](https://drone.autonomic.zone/coop-cloud/abra)
[![codecov](https://codecov.io/gh/Autonomic-Cooperative/abra/branch/main/graph/badge.svg?token=aX3I5NMRsj)](undefined)
> https://coopcloud.tech
The Co-op Cloud utility belt 🎩🐇
The cooperative cloud utility belt 🎩🐇
`abra` is a command-line tool for managing your own [Co-op Cloud](https://coopcloud.tech). It can provision new servers, create applications, deploy them, run backup and restore operations and a whole lot of other things. It is the go-to tool for day-to-day operations when managing a Co-op Cloud instance.
@ -27,35 +28,19 @@ See [CHANGELOG.md](./CHANGELOG.md).
## Install
Requirements:
- `pwqgen` (optional)
- `pwgen` (optional)
- `curl`
- `docker`
- `bash` >= 4
Install the latest stable release:
```sh
curl https://install.abra.coopcloud.tech | bash
curl https://install.abra.autonomic.zone | bash
```
or the bleeding-edge development version:
```sh
curl https://install.abra.coopcloud.tech | bash -s -- --dev
curl https://install.abra.autonomic.zone | bash -s -- --dev
```
The source for this script is [here](./deploy/install.abra.coopcloud.tech/installer).
## Container
An [image](https://hub.docker.com/r/thecoopcloud/abra) is also provided.
```
docker run thecoopcloud/abra app ls
```
The source for this script is [here](./deploy/install.abra.autonomic.zone/installer).
## Update
@ -68,22 +53,41 @@ To update the development version, run `abra upgrade --dev`.
It's written in Bash version 4 or greater!
Install it via `curl https://install.abra.coopcloud.tech | bash -s -- --dev`, then you can hack on the source in `~/.abra/src`.
Install it via `curl https://install.abra.autonomic.zone | bash -s -- --dev`, then you can hack on the source in `~/.abra/src`.
The command-line interface is generated via [docopt](http://docopt.org/). If you add arguments then you need to run `make docopt` ro regenerate the parser.
Please remember to update the [CHANGELOG](./CHANGELOG.md) when you make a change.
## Generating a new apps.json
You'll need to install the following requirements:
- [requests](https://docs.python-requests.org/en/master/) (`apt install python3-requests` / `pip install requests`)
- [skopeo](https://github.com/containers/skopeo) (check [the install docs](https://github.com/containers/skopeo/blob/master/install.md))
- [jq](https://stedolan.github.io/jq/tutorial/) (`sudo apt-get install jq` or see [the install docs](https://stedolan.github.io/jq/download/))
- [yq](https://mikefarah.gitbook.io/yq/) (see [the install docs](https://mikefarah.gitbook.io/yq/#install))
Then run `./bin/app-json.py` ([source](./bin/app-json.py)) and it will spit out the JSON file into [deploy/apps.coopcloud.tech/apps.json](./deploy/apps.coopcloud.tech/apps.json).
## Releasing
### `abra`
> [install.abra.coopcloud.tech](https://install.abra.coopcloud.tech)
> [install.abra.autonomic.zone](https://install.abra.autonomic.zone)
- Change the `x.x.x` header in [CHANGELOG.md](./CHANGELOG.md) to reflect new version and mark date
- Update the version in [abra](./abra)
- Update the version in [deploy/install.abra.coopcloud.tech/installer](./deploy/install.abra.coopcloud.tech/installer)
- Update the version in [deploy/install.abra.autonomic.zone/installer](./deploy/install.abra.autonomic.zone/installer)
- `git commit` the above changes and then tag it with `git tag <your-new-version>`
- `git push` and `git push --tags`
- Deploy a new installer script `make release-installer`
- Tell the world (CoTech forum, Matrix public channel, Autonomic mastodon, etc.)
### apps.coopcloud.tech
> [apps.coopcloud.tech](https://apps.coopcloud.tech)
```bash
$ make release-apps
```

613
abra
View File

@ -3,7 +3,7 @@
GIT_URL="https://git.autonomic.zone/coop-cloud/"
ABRA_APPS_URL="https://apps.coopcloud.tech"
ABRA_DIR="${ABRA_DIR:-$HOME/.abra}"
ABRA_VERSION="9.0.0"
ABRA_VERSION="0.7.3"
ABRA_BACKUP_DIR="${ABRA_BACKUP_DIR:-$ABRA_DIR/backups}"
ABRA_VENDOR_DIR="$ABRA_DIR/vendor"
ABRA_APPS_JSON="${ABRA_DIR}/apps.json"
@ -19,7 +19,7 @@ Usage:
abra [options] app (list|ls) [--status] [--server=<server>] [--type=<type>]
abra [options] app new [--server=<server>] [--domain=<domain>] [--app-name=<app_name>] [--pass] [--secrets] <type>
abra [options] app <app> backup (<service>|--all)
abra [options] app <app> deploy [--update] [--force] [--fast] [--chaos] [<version>]
abra [options] app <app> deploy [--update] [--force] [--skip-version-check] [--no-domain-poll] [<version>]
abra [options] app <app> check
abra [options] app <app> version
abra [options] app <app> config
@ -35,12 +35,10 @@ Usage:
abra [options] app <app> secret insert <secret> <version> <data> [--pass]
abra [options] app <app> secret (rm|delete) (<secret>|--all) [--pass]
abra [options] app <app> undeploy
abra [options] app <app> volume ls
abra [options] app <app> volume (rm|delete) (<volume>|--all)
abra [options] app <app> <command> [<args>...]
abra [options] recipe ls
abra [options] recipe create <recipe>
abra [options] recipe <recipe> release [--force] [--bump]
abra [options] recipe <recipe> release [--force]
abra [options] recipe <recipe> versions
abra [options] server add <host> [<user>] [<port>]
abra [options] server new <provider> -- <args>
@ -167,127 +165,123 @@ eval "var_$1+=($value)"; else eval "var_$1=$value"; fi; return 0; fi; done
return 1; }; stdout() { printf -- "cat <<'EOM'\n%s\nEOM\n" "$1"; }; stderr() {
printf -- "cat <<'EOM' >&2\n%s\nEOM\n" "$1"; }; error() {
[[ -n $1 ]] && stderr "$1"; stderr "$usage"; _return 1; }; _return() {
printf -- "exit %d\n" "$1"; exit "$1"; }; set -e; trimmed_doc=${DOC:1:2451}
usage=${DOC:40:1842}; digest=c7bae
shorts=(-C -n -U -e -b -d -h -s -v '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '')
longs=(--skip-check --no-prompt --skip-update --env --branch --debug --help --stack --verbose --status --server --type --domain --app-name --pass --secrets --all --update --force --fast --chaos --volumes --no-tty --user --bump --dev)
argcounts=(0 0 0 1 1 0 0 1 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0 0); node_0(){
switch __skip_check 0; }; node_1(){ switch __no_prompt 1; }; node_2(){
switch __skip_update 2; }; node_3(){ value __env 3; }; node_4(){
value __branch 4; }; node_5(){ switch __debug 5; }; node_6(){ switch __help 6; }
node_7(){ value __stack 7; }; node_8(){ switch __verbose 8; }; node_9(){
printf -- "exit %d\n" "$1"; exit "$1"; }; set -e; trimmed_doc=${DOC:1:2365}
usage=${DOC:40:1756}; digest=de09c
shorts=(-s -b -d -C -e -n -h -v -U '' '' '' '' '' '' '' '' '' '' '' '' '' '' '' '')
longs=(--stack --branch --debug --skip-check --env --no-prompt --help --verbose --skip-update --status --server --type --domain --app-name --pass --secrets --all --update --force --skip-version-check --no-domain-poll --volumes --no-tty --user --dev)
argcounts=(1 1 0 0 1 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 0 1 0); node_0(){
value __stack 0; }; node_1(){ value __branch 1; }; node_2(){ switch __debug 2; }
node_3(){ switch __skip_check 3; }; node_4(){ value __env 4; }; node_5(){
switch __no_prompt 5; }; node_6(){ switch __help 6; }; node_7(){
switch __verbose 7; }; node_8(){ switch __skip_update 8; }; node_9(){
switch __status 9; }; node_10(){ value __server 10; }; node_11(){
value __type 11; }; node_12(){ value __domain 12; }; node_13(){
value __app_name 13; }; node_14(){ switch __pass 14; }; node_15(){
switch __secrets 15; }; node_16(){ switch __all 16; }; node_17(){
switch __update 17; }; node_18(){ switch __force 18; }; node_19(){
switch __fast 19; }; node_20(){ switch __chaos 20; }; node_21(){
switch __volumes 21; }; node_22(){ switch __no_tty 22; }; node_23(){
value __user 23; }; node_24(){ switch __bump 24; }; node_25(){ switch __dev 25
}; node_26(){ value _type_ a; }; node_27(){ value _app_ a; }; node_28(){
value _service_ a; }; node_29(){ value _version_ a; }; node_30(){ value _src_ a
}; node_31(){ value _dst_ a; }; node_32(){ value _backup_file_ a; }; node_33(){
value _args_ a true; }; node_34(){ value _secret_ a; }; node_35(){ value _cmd_ a
}; node_36(){ value _data_ a; }; node_37(){ value _volume_ a; }; node_38(){
value _command_ a; }; node_39(){ value _recipe_ a; }; node_40(){ value _host_ a
}; node_41(){ value _user_ a; }; node_42(){ value _port_ a; }; node_43(){
value _provider_ a; }; node_44(){ value _subcommands_ a true; }; node_45(){
_command app; }; node_46(){ _command list; }; node_47(){ _command ls; }
node_48(){ _command new; }; node_49(){ _command backup; }; node_50(){
_command deploy; }; node_51(){ _command check; }; node_52(){ _command version; }
node_53(){ _command config; }; node_54(){ _command cp; }; node_55(){
_command logs; }; node_56(){ _command ps; }; node_57(){ _command restore; }
node_58(){ _command rm; }; node_59(){ _command delete; }; node_60(){
_command run; }; node_61(){ _command rollback; }; node_62(){ _command secret; }
node_63(){ _command generate; }; node_64(){ _command insert; }; node_65(){
_command undeploy; }; node_66(){ _command volume; }; node_67(){ _command recipe
}; node_68(){ _command create; }; node_69(){ _command release; }; node_70(){
_command versions; }; node_71(){ _command server; }; node_72(){ _command add; }
node_73(){ _command __ --; }; node_74(){ _command init; }; node_75(){
_command apps; }; node_76(){ _command upgrade; }; node_77(){ _command doctor; }
node_78(){ _command help; }; node_79(){ optional 0 1 2 3 4 5 6 7 8; }
node_80(){ optional 79; }; node_81(){ either 46 47; }; node_82(){ required 81; }
node_83(){ optional 9; }; node_84(){ optional 10; }; node_85(){ optional 11; }
node_86(){ required 80 45 82 83 84 85; }; node_87(){ optional 12; }; node_88(){
optional 13; }; node_89(){ optional 14; }; node_90(){ optional 15; }; node_91(){
required 80 45 48 84 87 88 89 90 26; }; node_92(){ either 28 16; }; node_93(){
required 92; }; node_94(){ required 80 45 27 49 93; }; node_95(){ optional 17; }
node_96(){ optional 18; }; node_97(){ optional 19; }; node_98(){ optional 20; }
node_99(){ optional 29; }; node_100(){ required 80 45 27 50 95 96 97 98 99; }
node_101(){ required 80 45 27 51; }; node_102(){ required 80 45 27 52; }
node_103(){ required 80 45 27 53; }; node_104(){ required 80 45 27 54 30 31; }
node_105(){ optional 28; }; node_106(){ required 80 45 27 55 105; }; node_107(){
required 80 45 27 56; }; node_108(){ required 80 45 27 57 93; }; node_109(){
either 58 59; }; node_110(){ required 109; }; node_111(){ optional 21; }
node_112(){ required 80 45 27 110 111 90; }; node_113(){ optional 32; }
node_114(){ required 80 45 27 57 28 113; }; node_115(){ optional 22; }
node_116(){ optional 23; }; node_117(){ oneormore 33; }; node_118(){
required 80 45 27 60 115 116 28 117; }; node_119(){ required 80 45 27 61 99; }
node_120(){ required 34 29; }; node_121(){ either 120 16; }; node_122(){
required 121; }; node_123(){ optional 35; }; node_124(){
required 80 45 27 62 63 122 123 89; }; node_125(){
required 80 45 27 62 64 34 29 36 89; }; node_126(){ either 34 16; }; node_127(){
required 126; }; node_128(){ required 80 45 27 62 110 127 89; }; node_129(){
required 80 45 27 65; }; node_130(){ required 80 45 27 66 47; }; node_131(){
either 37 16; }; node_132(){ required 131; }; node_133(){
required 80 45 27 66 110 132; }; node_134(){ optional 117; }; node_135(){
required 80 45 27 38 134; }; node_136(){ required 80 67 47; }; node_137(){
required 80 67 68 39; }; node_138(){ optional 24; }; node_139(){
required 80 67 39 69 96 138; }; node_140(){ required 80 67 39 70; }; node_141(){
optional 41; }; node_142(){ optional 42; }; node_143(){
required 80 71 72 40 141 142; }; node_144(){ required 80 71 48 43 73 33; }
node_145(){ required 80 71 82; }; node_146(){ required 80 71 40 58; }
node_147(){ required 80 71 40 74; }; node_148(){ required 80 71 40 75 83; }
node_149(){ optional 25; }; node_150(){ required 80 76 149; }; node_151(){
required 80 52; }; node_152(){ required 80 77; }; node_153(){ oneormore 44; }
node_154(){ optional 153; }; node_155(){ required 80 78 154; }; node_156(){
required 80; }; node_157(){
either 86 91 94 100 101 102 103 104 106 107 108 112 114 118 119 124 125 128 129 130 133 135 136 137 139 140 143 144 145 146 147 148 150 151 152 155 156
}; node_158(){ required 157; }; cat <<<' docopt_exit() {
[[ -n $1 ]] && printf "%s\n" "$1" >&2; printf "%s\n" "${DOC:40:1842}" >&2
exit 1; }'; unset var___skip_check var___no_prompt var___skip_update var___env \
var___branch var___debug var___help var___stack var___verbose var___status \
var___server var___type var___domain var___app_name var___pass var___secrets \
var___all var___update var___force var___fast var___chaos var___volumes \
var___no_tty var___user var___bump var___dev var__type_ var__app_ \
var__service_ var__version_ var__src_ var__dst_ var__backup_file_ var__args_ \
var__secret_ var__cmd_ var__data_ var__volume_ var__command_ var__recipe_ \
var__host_ var__user_ var__port_ var__provider_ var__subcommands_ var_app \
var_list var_ls var_new var_backup var_deploy var_check var_version var_config \
var_cp var_logs var_ps var_restore var_rm var_delete var_run var_rollback \
var_secret var_generate var_insert var_undeploy var_volume var_recipe \
switch __skip_version_check 19; }; node_20(){ switch __no_domain_poll 20; }
node_21(){ switch __volumes 21; }; node_22(){ switch __no_tty 22; }; node_23(){
value __user 23; }; node_24(){ switch __dev 24; }; node_25(){ value _type_ a; }
node_26(){ value _app_ a; }; node_27(){ value _service_ a; }; node_28(){
value _version_ a; }; node_29(){ value _src_ a; }; node_30(){ value _dst_ a; }
node_31(){ value _backup_file_ a; }; node_32(){ value _args_ a true; }
node_33(){ value _secret_ a; }; node_34(){ value _cmd_ a; }; node_35(){
value _data_ a; }; node_36(){ value _command_ a; }; node_37(){ value _recipe_ a
}; node_38(){ value _host_ a; }; node_39(){ value _user_ a; }; node_40(){
value _port_ a; }; node_41(){ value _provider_ a; }; node_42(){
value _subcommands_ a true; }; node_43(){ _command app; }; node_44(){
_command list; }; node_45(){ _command ls; }; node_46(){ _command new; }
node_47(){ _command backup; }; node_48(){ _command deploy; }; node_49(){
_command check; }; node_50(){ _command version; }; node_51(){ _command config; }
node_52(){ _command cp; }; node_53(){ _command logs; }; node_54(){ _command ps
}; node_55(){ _command restore; }; node_56(){ _command rm; }; node_57(){
_command delete; }; node_58(){ _command run; }; node_59(){ _command rollback; }
node_60(){ _command secret; }; node_61(){ _command generate; }; node_62(){
_command insert; }; node_63(){ _command undeploy; }; node_64(){ _command recipe
}; node_65(){ _command create; }; node_66(){ _command release; }; node_67(){
_command versions; }; node_68(){ _command server; }; node_69(){ _command add; }
node_70(){ _command __ --; }; node_71(){ _command init; }; node_72(){
_command apps; }; node_73(){ _command upgrade; }; node_74(){ _command doctor; }
node_75(){ _command help; }; node_76(){ optional 0 1 2 3 4 5 6 7 8; }
node_77(){ optional 76; }; node_78(){ either 44 45; }; node_79(){ required 78; }
node_80(){ optional 9; }; node_81(){ optional 10; }; node_82(){ optional 11; }
node_83(){ required 77 43 79 80 81 82; }; node_84(){ optional 12; }; node_85(){
optional 13; }; node_86(){ optional 14; }; node_87(){ optional 15; }; node_88(){
required 77 43 46 81 84 85 86 87 25; }; node_89(){ either 27 16; }; node_90(){
required 89; }; node_91(){ required 77 43 26 47 90; }; node_92(){ optional 17; }
node_93(){ optional 18; }; node_94(){ optional 19; }; node_95(){ optional 20; }
node_96(){ optional 28; }; node_97(){ required 77 43 26 48 92 93 94 95 96; }
node_98(){ required 77 43 26 49; }; node_99(){ required 77 43 26 50; }
node_100(){ required 77 43 26 51; }; node_101(){ required 77 43 26 52 29 30; }
node_102(){ optional 27; }; node_103(){ required 77 43 26 53 102; }; node_104(){
required 77 43 26 54; }; node_105(){ required 77 43 26 55 90; }; node_106(){
either 56 57; }; node_107(){ required 106; }; node_108(){ optional 21; }
node_109(){ required 77 43 26 107 108 87; }; node_110(){ optional 31; }
node_111(){ required 77 43 26 55 27 110; }; node_112(){ optional 22; }
node_113(){ optional 23; }; node_114(){ oneormore 32; }; node_115(){
required 77 43 26 58 112 113 27 114; }; node_116(){ required 77 43 26 59 96; }
node_117(){ required 33 28; }; node_118(){ either 117 16; }; node_119(){
required 118; }; node_120(){ optional 34; }; node_121(){
required 77 43 26 60 61 119 120 86; }; node_122(){
required 77 43 26 60 62 33 28 35 86; }; node_123(){ either 33 16; }; node_124(){
required 123; }; node_125(){ required 77 43 26 60 107 124 86; }; node_126(){
required 77 43 26 63; }; node_127(){ optional 114; }; node_128(){
required 77 43 26 36 127; }; node_129(){ required 77 64 45; }; node_130(){
required 77 64 65 37; }; node_131(){ required 77 64 37 66 93; }; node_132(){
required 77 64 37 67; }; node_133(){ optional 39; }; node_134(){ optional 40; }
node_135(){ required 77 68 69 38 133 134; }; node_136(){
required 77 68 46 41 70 32; }; node_137(){ required 77 68 79; }; node_138(){
required 77 68 38 56; }; node_139(){ required 77 68 38 71; }; node_140(){
required 77 68 38 72 80; }; node_141(){ optional 24; }; node_142(){
required 77 73 141; }; node_143(){ required 77 50; }; node_144(){ required 77 74
}; node_145(){ oneormore 42; }; node_146(){ optional 145; }; node_147(){
required 77 75 146; }; node_148(){ required 77; }; node_149(){
either 83 88 91 97 98 99 100 101 103 104 105 109 111 115 116 121 122 125 126 128 129 130 131 132 135 136 137 138 139 140 142 143 144 147 148
}; node_150(){ required 149; }; cat <<<' docopt_exit() {
[[ -n $1 ]] && printf "%s\n" "$1" >&2; printf "%s\n" "${DOC:40:1756}" >&2
exit 1; }'; unset var___stack var___branch var___debug var___skip_check \
var___env var___no_prompt var___help var___verbose var___skip_update \
var___status var___server var___type var___domain var___app_name var___pass \
var___secrets var___all var___update var___force var___skip_version_check \
var___no_domain_poll var___volumes var___no_tty var___user var___dev \
var__type_ var__app_ var__service_ var__version_ var__src_ var__dst_ \
var__backup_file_ var__args_ var__secret_ var__cmd_ var__data_ var__command_ \
var__recipe_ var__host_ var__user_ var__port_ var__provider_ var__subcommands_ \
var_app var_list var_ls var_new var_backup var_deploy var_check var_version \
var_config var_cp var_logs var_ps var_restore var_rm var_delete var_run \
var_rollback var_secret var_generate var_insert var_undeploy var_recipe \
var_create var_release var_versions var_server var_add var___ var_init \
var_apps var_upgrade var_doctor var_help; parse 158 "$@"
local prefix=${DOCOPT_PREFIX:-''}; unset "${prefix}__skip_check" \
"${prefix}__no_prompt" "${prefix}__skip_update" "${prefix}__env" \
"${prefix}__branch" "${prefix}__debug" "${prefix}__help" "${prefix}__stack" \
"${prefix}__verbose" "${prefix}__status" "${prefix}__server" "${prefix}__type" \
"${prefix}__domain" "${prefix}__app_name" "${prefix}__pass" \
var_apps var_upgrade var_doctor var_help; parse 150 "$@"
local prefix=${DOCOPT_PREFIX:-''}; unset "${prefix}__stack" \
"${prefix}__branch" "${prefix}__debug" "${prefix}__skip_check" \
"${prefix}__env" "${prefix}__no_prompt" "${prefix}__help" "${prefix}__verbose" \
"${prefix}__skip_update" "${prefix}__status" "${prefix}__server" \
"${prefix}__type" "${prefix}__domain" "${prefix}__app_name" "${prefix}__pass" \
"${prefix}__secrets" "${prefix}__all" "${prefix}__update" "${prefix}__force" \
"${prefix}__fast" "${prefix}__chaos" "${prefix}__volumes" "${prefix}__no_tty" \
"${prefix}__user" "${prefix}__bump" "${prefix}__dev" "${prefix}_type_" \
"${prefix}_app_" "${prefix}_service_" "${prefix}_version_" "${prefix}_src_" \
"${prefix}_dst_" "${prefix}_backup_file_" "${prefix}_args_" \
"${prefix}_secret_" "${prefix}_cmd_" "${prefix}_data_" "${prefix}_volume_" \
"${prefix}_command_" "${prefix}_recipe_" "${prefix}_host_" "${prefix}_user_" \
"${prefix}_port_" "${prefix}_provider_" "${prefix}_subcommands_" \
"${prefix}app" "${prefix}list" "${prefix}ls" "${prefix}new" "${prefix}backup" \
"${prefix}deploy" "${prefix}check" "${prefix}version" "${prefix}config" \
"${prefix}cp" "${prefix}logs" "${prefix}ps" "${prefix}restore" "${prefix}rm" \
"${prefix}__skip_version_check" "${prefix}__no_domain_poll" \
"${prefix}__volumes" "${prefix}__no_tty" "${prefix}__user" "${prefix}__dev" \
"${prefix}_type_" "${prefix}_app_" "${prefix}_service_" "${prefix}_version_" \
"${prefix}_src_" "${prefix}_dst_" "${prefix}_backup_file_" "${prefix}_args_" \
"${prefix}_secret_" "${prefix}_cmd_" "${prefix}_data_" "${prefix}_command_" \
"${prefix}_recipe_" "${prefix}_host_" "${prefix}_user_" "${prefix}_port_" \
"${prefix}_provider_" "${prefix}_subcommands_" "${prefix}app" "${prefix}list" \
"${prefix}ls" "${prefix}new" "${prefix}backup" "${prefix}deploy" \
"${prefix}check" "${prefix}version" "${prefix}config" "${prefix}cp" \
"${prefix}logs" "${prefix}ps" "${prefix}restore" "${prefix}rm" \
"${prefix}delete" "${prefix}run" "${prefix}rollback" "${prefix}secret" \
"${prefix}generate" "${prefix}insert" "${prefix}undeploy" "${prefix}volume" \
"${prefix}recipe" "${prefix}create" "${prefix}release" "${prefix}versions" \
"${prefix}server" "${prefix}add" "${prefix}__" "${prefix}init" "${prefix}apps" \
"${prefix}generate" "${prefix}insert" "${prefix}undeploy" "${prefix}recipe" \
"${prefix}create" "${prefix}release" "${prefix}versions" "${prefix}server" \
"${prefix}add" "${prefix}__" "${prefix}init" "${prefix}apps" \
"${prefix}upgrade" "${prefix}doctor" "${prefix}help"
eval "${prefix}"'__skip_check=${var___skip_check:-false}'
eval "${prefix}"'__no_prompt=${var___no_prompt:-false}'
eval "${prefix}"'__skip_update=${var___skip_update:-false}'
eval "${prefix}"'__env=${var___env:-}'
eval "${prefix}"'__stack=${var___stack:-}'
eval "${prefix}"'__branch=${var___branch:-}'
eval "${prefix}"'__debug=${var___debug:-false}'
eval "${prefix}"'__skip_check=${var___skip_check:-false}'
eval "${prefix}"'__env=${var___env:-}'
eval "${prefix}"'__no_prompt=${var___no_prompt:-false}'
eval "${prefix}"'__help=${var___help:-false}'
eval "${prefix}"'__stack=${var___stack:-}'
eval "${prefix}"'__verbose=${var___verbose:-false}'
eval "${prefix}"'__skip_update=${var___skip_update:-false}'
eval "${prefix}"'__status=${var___status:-false}'
eval "${prefix}"'__server=${var___server:-}'
eval "${prefix}"'__type=${var___type:-}'
@ -298,12 +292,11 @@ eval "${prefix}"'__secrets=${var___secrets:-false}'
eval "${prefix}"'__all=${var___all:-false}'
eval "${prefix}"'__update=${var___update:-false}'
eval "${prefix}"'__force=${var___force:-false}'
eval "${prefix}"'__fast=${var___fast:-false}'
eval "${prefix}"'__chaos=${var___chaos:-false}'
eval "${prefix}"'__skip_version_check=${var___skip_version_check:-false}'
eval "${prefix}"'__no_domain_poll=${var___no_domain_poll:-false}'
eval "${prefix}"'__volumes=${var___volumes:-false}'
eval "${prefix}"'__no_tty=${var___no_tty:-false}'
eval "${prefix}"'__user=${var___user:-}'
eval "${prefix}"'__bump=${var___bump:-false}'
eval "${prefix}"'__dev=${var___dev:-false}'
eval "${prefix}"'_type_=${var__type_:-}'; eval "${prefix}"'_app_=${var__app_:-}'
eval "${prefix}"'_service_=${var__service_:-}'
@ -314,7 +307,6 @@ if declare -p var__args_ >/dev/null 2>&1; then
eval "${prefix}"'_args_=("${var__args_[@]}")'; else eval "${prefix}"'_args_=()'
fi; eval "${prefix}"'_secret_=${var__secret_:-}'
eval "${prefix}"'_cmd_=${var__cmd_:-}'; eval "${prefix}"'_data_=${var__data_:-}'
eval "${prefix}"'_volume_=${var__volume_:-}'
eval "${prefix}"'_command_=${var__command_:-}'
eval "${prefix}"'_recipe_=${var__recipe_:-}'
eval "${prefix}"'_host_=${var__host_:-}'
@ -342,7 +334,6 @@ eval "${prefix}"'secret=${var_secret:-false}'
eval "${prefix}"'generate=${var_generate:-false}'
eval "${prefix}"'insert=${var_insert:-false}'
eval "${prefix}"'undeploy=${var_undeploy:-false}'
eval "${prefix}"'volume=${var_volume:-false}'
eval "${prefix}"'recipe=${var_recipe:-false}'
eval "${prefix}"'create=${var_create:-false}'
eval "${prefix}"'release=${var_release:-false}'
@ -355,27 +346,27 @@ eval "${prefix}"'upgrade=${var_upgrade:-false}'
eval "${prefix}"'doctor=${var_doctor:-false}'
eval "${prefix}"'help=${var_help:-false}'; local docopt_i=1
[[ $BASH_VERSION =~ ^4.3 ]] && docopt_i=2; for ((;docopt_i>0;docopt_i--)); do
declare -p "${prefix}__skip_check" "${prefix}__no_prompt" \
"${prefix}__skip_update" "${prefix}__env" "${prefix}__branch" \
"${prefix}__debug" "${prefix}__help" "${prefix}__stack" "${prefix}__verbose" \
declare -p "${prefix}__stack" "${prefix}__branch" "${prefix}__debug" \
"${prefix}__skip_check" "${prefix}__env" "${prefix}__no_prompt" \
"${prefix}__help" "${prefix}__verbose" "${prefix}__skip_update" \
"${prefix}__status" "${prefix}__server" "${prefix}__type" "${prefix}__domain" \
"${prefix}__app_name" "${prefix}__pass" "${prefix}__secrets" "${prefix}__all" \
"${prefix}__update" "${prefix}__force" "${prefix}__fast" "${prefix}__chaos" \
"${prefix}__volumes" "${prefix}__no_tty" "${prefix}__user" "${prefix}__bump" \
"${prefix}__dev" "${prefix}_type_" "${prefix}_app_" "${prefix}_service_" \
"${prefix}_version_" "${prefix}_src_" "${prefix}_dst_" \
"${prefix}__update" "${prefix}__force" "${prefix}__skip_version_check" \
"${prefix}__no_domain_poll" "${prefix}__volumes" "${prefix}__no_tty" \
"${prefix}__user" "${prefix}__dev" "${prefix}_type_" "${prefix}_app_" \
"${prefix}_service_" "${prefix}_version_" "${prefix}_src_" "${prefix}_dst_" \
"${prefix}_backup_file_" "${prefix}_args_" "${prefix}_secret_" \
"${prefix}_cmd_" "${prefix}_data_" "${prefix}_volume_" "${prefix}_command_" \
"${prefix}_recipe_" "${prefix}_host_" "${prefix}_user_" "${prefix}_port_" \
"${prefix}_provider_" "${prefix}_subcommands_" "${prefix}app" "${prefix}list" \
"${prefix}ls" "${prefix}new" "${prefix}backup" "${prefix}deploy" \
"${prefix}check" "${prefix}version" "${prefix}config" "${prefix}cp" \
"${prefix}logs" "${prefix}ps" "${prefix}restore" "${prefix}rm" \
"${prefix}delete" "${prefix}run" "${prefix}rollback" "${prefix}secret" \
"${prefix}generate" "${prefix}insert" "${prefix}undeploy" "${prefix}volume" \
"${prefix}recipe" "${prefix}create" "${prefix}release" "${prefix}versions" \
"${prefix}server" "${prefix}add" "${prefix}__" "${prefix}init" "${prefix}apps" \
"${prefix}upgrade" "${prefix}doctor" "${prefix}help"; done; }
"${prefix}_cmd_" "${prefix}_data_" "${prefix}_command_" "${prefix}_recipe_" \
"${prefix}_host_" "${prefix}_user_" "${prefix}_port_" "${prefix}_provider_" \
"${prefix}_subcommands_" "${prefix}app" "${prefix}list" "${prefix}ls" \
"${prefix}new" "${prefix}backup" "${prefix}deploy" "${prefix}check" \
"${prefix}version" "${prefix}config" "${prefix}cp" "${prefix}logs" \
"${prefix}ps" "${prefix}restore" "${prefix}rm" "${prefix}delete" \
"${prefix}run" "${prefix}rollback" "${prefix}secret" "${prefix}generate" \
"${prefix}insert" "${prefix}undeploy" "${prefix}recipe" "${prefix}create" \
"${prefix}release" "${prefix}versions" "${prefix}server" "${prefix}add" \
"${prefix}__" "${prefix}init" "${prefix}apps" "${prefix}upgrade" \
"${prefix}doctor" "${prefix}help"; done; }
# docopt parser above, complete command for generating this parser is `docopt.sh abra`
PROGRAM_NAME=$(basename "$0")
@ -487,16 +478,6 @@ require_docker_version() {
done
}
require_valid_json() {
require_jq
$JQ "$1" > /dev/null || error "Invalid JSON '$1'"
}
require_valid_yaml() {
require_yq
$YQ e "$1" > /dev/null || error "Invalid YAML '$1'"
}
###### Download and update data
require_apps_json() {
@ -515,13 +496,13 @@ require_apps_json() {
if [ "$local_ctime" -lt "$remote_ctime" ]; then
info "Downloading new apps.json"
curl -sLo "$ABRA_APPS_JSON" "$ABRA_APPS_URL"
wget -qO "$ABRA_APPS_JSON" "$ABRA_APPS_URL"
else
debug "No apps.json update needed"
fi
else
info "Downloading apps.json"
curl -sLo "$ABRA_APPS_JSON" "$ABRA_APPS_URL"
wget -qO "$ABRA_APPS_JSON" "$ABRA_APPS_URL"
fi
}
@ -564,7 +545,7 @@ require_app (){
fi
# shellcheck disable=SC2086
if ! git clone ${git_extra_args:-} "$GIT_URL$APP.git" "$ABRA_DIR/apps/$APP" > /dev/null 2>&1 ; then
if ! git clone ${git_extra_args:-} "$GIT_URL/$APP.git" "$ABRA_DIR/apps/$APP" > /dev/null 2>&1 ; then
error "Could not retrieve app type '$APP', this app type doesn't exist?"
fi
@ -599,16 +580,13 @@ require_app_version() {
if [ -z "$VERSION" ]; then
warning "No version specified, dangerously using latest git 😨"
else
if [ "$abra___chaos" = "false" ]; then
git checkout -q "$VERSION" || error "Can't find version $VERSION"
else
warning "Chaos deploy specified, dangerously using latest git 😨"
fi
git checkout -q "$VERSION" || error "Can't find version $VERSION"
fi
}
vendor_binary() {
require_vendor_dir
require_binary wget
local REPO="$1"
local VERSION="$2"
@ -635,7 +613,7 @@ vendor_binary() {
;;
esac
curl -sLo "$ABRA_VENDOR_DIR/$BINARY" "$RELEASE_URL"
wget -qO "$ABRA_VENDOR_DIR/$BINARY" "$RELEASE_URL"
chmod +x "$ABRA_VENDOR_DIR/$BINARY"
success "$BINARY is now vendored ☮"
}
@ -682,7 +660,7 @@ get_recipe_versions() {
get_recipe_version_latest() {
if [ "${#RECIPE_VERSIONS[@]}" = 0 ]; then
VERSION=""
info "No versions found"
warning "No version specified, dangerously using latest git 😨"
else
VERSION="${RECIPE_VERSIONS[-1]}"
info "Chose version $VERSION"
@ -712,7 +690,6 @@ output_version_summary() {
CONSENT_TO_UPDATE=$abra___update
FORCE_DEPLOY=$abra___force
CHAOS_DEPLOY=$abra___chaos
local -a IS_AN_UPDATE="false"
local -a UNABLE_TO_DETECT="false"
@ -725,7 +702,6 @@ output_version_summary() {
IFS=':' read -ra COMPOSE_FILES <<< "$COMPOSE_FILE"
for COMPOSE in "${COMPOSE_FILES[@]}"; do
require_valid_yaml "$APP_DIR/$COMPOSE"
SERVICES=$($YQ e '.services | keys | .[]' "${APP_DIR}/${COMPOSE}")
for SERVICE in $SERVICES; do
@ -754,11 +730,7 @@ output_version_summary() {
if [ "$live_version" != "$service_tag" ] || [ "$live_digest" != "$service_digest" ]; then
IS_AN_UPDATE="true"
fi
if [ "$abra___chaos" = "true" ]; then
echo " to be deployed: $(tput setaf 1)$service_tag ($service_digest) (+ latest git)$(tput sgr0)"
else
echo " to be deployed: $(tput setaf 1)$service_tag ($service_digest)$(tput sgr0)"
fi
echo " to be deployed: $(tput setaf 1)$service_tag ($service_digest)$(tput sgr0)"
fi
else
if [[ $UNDEPLOYED_STATE == "true" ]]; then
@ -784,76 +756,69 @@ output_version_summary() {
else
if [[ $UNABLE_TO_DETECT == "false" ]] && \
[[ $UNDEPLOYED_STATE == "false" ]] && \
[[ $FORCE_DEPLOY == "false" ]] && \
[[ $CHAOS_DEPLOY = "false" ]]; then
[[ $FORCE_DEPLOY == "false" ]]; then
success "Nothing to deploy, you're on latest (use --force to re-deploy anyway)"
exit 0
fi
fi
}
# Note(decentral1se): inspired by https://github.com/vitalets/docker-stack-wait-deploy
ensure_stack_deployed() {
local -a HEALTHY # mapping
local -a MISSING # mapping
STACK_NAME=$1
TIMEOUT=60
idx=0
info "Waiting for deployment to succeed"
IFS=' ' read -r -a SERVICES <<< "$(docker stack services "${STACK_NAME}" --format "{{.ID}}" | tr '\n' ' ')"
while true; do
all_services_done=1
has_errors=0
while [ ! $(( ${#HEALTHY[@]} + ${#MISSING[@]} )) -eq ${#SERVICES[@]} ]; do
for service in $(docker ps -f "name=$STACK_NAME" -q); do
healthcheck=$(docker inspect --format "{{ json .State }}" "$service" | jq "try(.Health.Status // \"missing\")")
name=$(docker inspect --format '{{ index .Config.Labels "com.docker.swarm.service.name" }}' "$service")
service_ids=$(docker stack services -q "$STACK_NAME")
if [[ ${MISSING[*]} =~ ${name} ]] || [[ ${HEALTHY[*]} =~ ${name} ]]; then
continue
fi
for service_id in $service_ids; do
# see: https://github.com/moby/moby/issues/28012
service_state=$(docker service inspect --format "{{if .UpdateStatus}}{{.UpdateStatus.State}}{{else}}created{{end}}" "$service_id")
if [[ "$healthcheck" == "\"missing\"" ]] && [[ ! "${MISSING[*]}" =~ $name ]]; then
MISSING+=("$name")
info "$name has no healthcheck configured, cannot guarantee this service comes up successfully..."
continue
fi
debug "$service_id has state: $service_state"
if [[ "$healthcheck" == "\"healthy\"" ]] && [[ ! "${HEALTHY[*]}" =~ $name ]]; then
HEALTHY+=("$name")
info "$name is healthy!"
continue
fi
if [[ "$healthcheck" == \""unhealthy"\" ]]; then
logs=$(docker inspect --format "{{ json .State.Health.Log }}" "$service")
exitcode="$(echo "$logs" | $JQ '.[-1] | .ExitCode')"
warning "Healthcheck for new instance of $name is failing (exit code: $exitcode)"
warning "$(echo "$logs" | $JQ -r '.[-1] | .Output')"
error "healthcheck for $name is failing, this deployment did not succeed :("
fi
case "$service_state" in
created|completed)
;;
paused|rollback_completed)
has_errors=1
;;
*)
all_services_done=0
;;
esac
done
idx=$(("$idx" + 1))
if [[ $idx -eq "$TIMEOUT" ]]; then
error "Waiting for healthy status timed out, this deployment did not succeed :("
if [ "$all_services_done" == "1" ]; then
if [ "$has_errors" == "1" ]; then
debug "Deployment appears to have failed"
break
else
debug "Deployment appears to have suceeded"
break
fi
else
sleep 1
fi
sleep 1
info "Deploying: $(( ${#HEALTHY[@]} + ${#MISSING[@]} ))/${#SERVICES[@]} (timeout: $idx/$TIMEOUT)"
done
success "All services up! Deployment succeeded!"
}
ensure_domain_deployed() {
local domain=$1
DOMAIN=$1
warning "Waiting for $domain to come up..."
warning "Waiting for $DOMAIN to come up..."
idx=1
until curl --output /dev/null --silent --head --fail "$domain"; do
debug "Polled $domain $idx time(s) already"
until curl --output /dev/null --silent --head --fail "$DOMAIN"; do
debug "Polled $DOMAIN $idx time(s) already"
sleep 3
idx=$(("$idx" + 1))
if [[ $idx -gt 10 ]]; then
error "$domain still isn't up, check status by running \"abra app ${STACK_NAME} ps\""
error "$DOMAIN still isn't up, check status by running \"abra app ${STACK_NAME} ps\""
fi
done
}
@ -867,14 +832,14 @@ get_servers() {
get_app_secrets() {
# FIXME 3wc: requires bash 4, use for loop instead
mapfile -t PASSWORDS < <(grep "^SECRET.*VERSION.*" "$ENV_FILE")
mapfile -t PASSWORDS < <(grep "SECRET.*VERSION.*" "$ENV_FILE")
}
load_instance() {
APP="$abra__app_"
# load all files matching "$APP.env" into ENV_FILES array
mapfile -t ENV_FILES < <(find -L "$ABRA_DIR/servers/" -name "$APP.env")
mapfile -t ENV_FILES < <(find -L "$ABRA_DIR" -name "$APP.env")
# FIXME 3wc: requires bash 4, use for loop instead
case "${#ENV_FILES[@]}" in
@ -934,7 +899,7 @@ prompt_confirm() {
return
fi
read -rp "Continue? [y/N]? " choice
read -rp "Continue? (y/[n])? " choice
case "$choice" in
y|Y ) return ;;
@ -1313,15 +1278,15 @@ _abra_backup_mysql() {
###### .. app deploy
help_app_deploy (){
echo "abra [options] app <app> deploy [--update] [--force] [--fast]
echo "abra [options] app <app> deploy [--update] [--force] [--skip-version-check] [--no-domain-poll]
Deploy app <app> to the configured server.
OPTIONS
--update Consent to deploying an updated app version
--force Force a deployment regardless of state
--fast Run deployment without various safety checks
--chaos Deploy straight from latest Git version (potentially chaotic!)
--skip-version-check Don't try and detect deployed version
--no-domain-poll Don't wait for the configured domain to come up
POWERED BY
docker stack deploy -c compose.yml <app>"
@ -1330,15 +1295,8 @@ POWERED BY
sub_app_deploy (){
require_yq
if [ "$abra___fast" = "true" ]; then
SKIP_VERSION_CHECK=true
NO_DOMAIN_POLL=true
NO_STATE_POLL=true
else
SKIP_VERSION_CHECK=false
NO_DOMAIN_POLL=false
NO_STATE_POLL=false
fi
SKIP_VERSION_CHECK=$abra___skip_version_check
NO_DOMAIN_POLL=$abra___no_domain_poll
if [ ! "$abra__version_" = "dev" ]; then
get_recipe_versions "$TYPE"
@ -1346,7 +1304,7 @@ sub_app_deploy (){
if [ -n "$abra__version_" ]; then
VERSION="$abra__version_"
if ! printf '%s\0' "${RECIPE_VERSIONS[@]}" | grep -Fqxz -- "$VERSION"; then
error "'$VERSION' doesn't appear to be a valid version of $TYPE"
error "'$version' doesn't appear to be a valid version of $TYPE"
fi
info "Chose version $VERSION"
else
@ -1388,13 +1346,7 @@ sub_app_deploy (){
(cd "$APP_DIR" || error "\$APP_DIR '$APP_DIR' not found")
# shellcheck disable=SC2086
if (cd "$APP_DIR" && docker stack deploy -c ${COMPOSE_FILE//:/ -c } "$STACK_NAME"); then
if [ "$abra___fast" = "true" ]; then
success "Something happened! Hope it was good 🙏"
exit 0
fi
if [[ $NO_STATE_POLL == "false" ]]; then
ensure_stack_deployed "$STACK_NAME"
fi
ensure_stack_deployed "$STACK_NAME"
if [ -n "$DOMAIN" ]; then
if [[ $NO_DOMAIN_POLL == "false" ]]; then
ensure_domain_deployed "https://${DOMAIN}"
@ -1558,8 +1510,16 @@ sub_app_delete (){
rm "$ENV_FILE"
if [ "$abra___volumes" = "true" ]; then
abra___all="true"
sub_app_volume_delete
volumes="$(docker volume ls --filter "name=${STACK_NAME}" --quiet)"
if [ "$abra___volumes" = "true" ]; then
# shellcheck disable=SC2086
warning "SCARY: About to remove all volumes associated with ${STACK_NAME}: $(echo $volumes | tr -d '\n')"
prompt_confirm
fi
docker volume rm --force "$volumes"
fi
@ -1601,7 +1561,6 @@ sub_app_secret_insert() {
# shellcheck disable=SC2059
printf "$PW" | docker secret create "${STACK_NAME}_${SECRET}_${VERSION}" - > /dev/null
if [[ $? != 0 ]]; then exit 1; fi # exit if secret wasn't created
if [ "$STORE_WITH_PASS" == "true" ] && type pass > /dev/null 2>&1; then
echo "$PW" | pass insert "hosts/$DOCKER_CONTEXT/${STACK_NAME}/${SECRET}" -m > /dev/null
@ -1674,18 +1633,9 @@ POWERED BY
}
sub_app_secret_generate(){
local secret="$abra__secret_"
local version="$abra__version_"
local length="$abra__length_"
local msg_already_outputted=${msg_already_outputted:-"false"}
if [ "$msg_already_outputted" == "false" ]; then
warning "These generated secrets are now stored as encrypted data on your server"
warning "Please take a moment to make sure you have saved a copy of the passwords"
warning "Abra is not able to show the password values in plain text again"
warning "See https://docs.cloud.autonomic.zone/secrets/ for more on secrets"
msg_already_outputted="true"
fi
SECRET="$abra__secret_"
VERSION="$abra__version_"
LENGTH="$abra__length_"
if [ "$abra___all" == "true" ]; then
# Note(decentral1se): we need to reset the flag here to avoid the infinite
@ -1696,18 +1646,22 @@ sub_app_secret_generate(){
return
fi
if [[ -n "$length" ]]; then
require_binary pwgen
abra__cmd_="pwgen -s $length 1"
if [[ -n "$LENGTH" ]]; then
if [ -z "$abra__cmd_" ]; then
require_binary pwgen
abra__cmd_="pwgen -s $LENGTH 1"
fi
else
require_binary pwqgen
abra__cmd_=pwqgen
if [ -z "$abra__cmd_" ]; then
require_binary pwqgen
abra__cmd_=pwqgen
fi
fi
PWGEN=${abra__cmd_}
debug "SECRET: $SECRET, VERSION $VERSION, PW $PWGEN, ALL $abra___all"
if [ -z "$secret" ] || [ -z "$version" ] && [ "$abra___all" == "false" ]; then
if [ -z "$SECRET" ] || [ -z "$VERSION" ] && [ "$abra___all" == "false" ]; then
error "Required arguments missing"
fi
@ -1720,71 +1674,11 @@ sub_app_secret_generate(){
abra__data_="$PW"
sub_app_secret_insert
}
###### .. app volume
###### .. app volume ls
help_app_volume_ls (){
help_app_volume_list
}
sub_app_volume_ls(){
sub_app_volume_list
}
help_app_volume_list (){
echo "abra [options] app <app> volume (ls|list)
Show all volumes associated with <app>.
POWERED BY
docker volume ls"
}
sub_app_volume_list(){
docker volume ls --filter "name=${STACK_NAME}"
}
###### .. app volume rm
help_app_volume_rm (){
help_app_volume_delete
}
sub_app_volume_rm(){
sub_app_volume_delete
}
help_app_volume_delete (){
echo "abra [options] app <app> volume (delete|rm) (<volume>|--all)
Remove <app>'s Docker volume <volume>, or all volumes with --all.
OPTIONS
--pass Remove volume(s) from \`pass\` as well
--all Delete all volumes for <app>
POWERED BY
docker volume rm
docker volume ls # for --all"
}
sub_app_volume_delete(){
# if --all is provided then $abra__secret_ will be blank and this will work
# auto-magically
NAMES=$(docker volume ls --filter "name=${STACK_NAME}_${abra__volume_}" --format "{{.Name}}")
if [ -z "$NAMES" ]; then
error "Could not find any volumes under ${STACK_NAME}_${abra__volume_}"
fi
warning "About to delete volume(s) $(echo "$NAMES" | paste -d "")"
prompt_confirm
for NAME in ${NAMES}; do
docker volume rm "$NAME" > /dev/null
done
warning "These generated secrets are now stored as encrypted data on your server"
warning "Please take a moment to make sure you have saved a copy of the passwords"
warning "Abra is not able to show the password values in plain text again"
warning "See https://docs.docker.com/engine/swarm/secrets/ for more on secrets"
}
###### .. app run
@ -2061,7 +1955,7 @@ sub_recipe_versions() {
###### .. recipe <recipe> release
help_recipe_release() {
echo "abra [options] recipe <recipe> release [--force] [--bump]
echo "abra [options] recipe <recipe> release
(For recipe maintainers)
@ -2074,7 +1968,6 @@ any of the images in <recipe>.
OPTIONS
--force Over-write existing tag; use this if you have a git tag for the
recipe version already, to make sure labels are in sync.
--bump Make an n+1 release (packager specific changes to recipe)
POWERED BY
skopeo inspect docker://image:tag
@ -2089,13 +1982,8 @@ sub_recipe_release() {
recipe="$abra__recipe_"
force="$abra___force"
bump="$abra___bump"
recipe_dir="$ABRA_DIR/apps/$recipe"
if [ "$bump" = "true" ] && [ "$force" = "true" ]; then
error "--bump and --force don't play nicely together"
fi
cd "$recipe_dir" || error "Can't find recipe dir '$recipe_dir'"
get_recipe_versions "$recipe"
@ -2108,29 +1996,22 @@ sub_recipe_release() {
latest_version_message=$(git show -s --format=%s)
fi
info "Latest available version: '$latest_version'"
info "Latest version message: '$latest_version_message'"
info "Latest verion message: '$latest_version_message'"
else
latest_version=""
latest_version_message="Initial tagged release"
info "No previous releases found"
if [ "$bump" = "true" ]; then
error "--bump can't do its work when there are no existing release versions"
fi
fi
current_tag=$(git tag --points-at HEAD)
if [ "$force" = "false" ] && [ -n "$current_tag" ] && [ "$bump" = "false" ]; then
success "$recipe is already on $current_tag, no release needed"
if [ "$force" = "false" ] && [ -n "$current_tag" ]; then
error "$recipe is already on $current_tag, no release needed"
fi
if [ "$(git rev-parse --abbrev-ref --symbolic-full-name HEAD)" = "HEAD" ]; then
warning "It looks like $recipe_dir is in 'detached HEAD' state"
if [ "$abra___no_prompt" = "false" ]; then
read -rp "Check out main/master branch first? [Y/n] "
if [ "${choice,,}" != "n" ]; then
checkout_main_or_master
fi
else
read -rp "Check out main/master branch first? [Y/n] "
if [ "${choice,,}" != "n" ]; then
checkout_main_or_master
fi
fi
@ -2142,12 +2023,7 @@ sub_recipe_release() {
new_version="false"
for compose_file in "${compose_files[@]}"; do
if [ "$bump" = "true" ]; then
continue # skip trying to upgrade labels for --bump logic
fi
mapfile -t services < <($YQ e -N '.services | keys | .[]' "$compose_file" | sort -u)
if [ "$compose_file" = "compose.yml" ] && ! printf '%s\0' "${services[@]}" | grep -Fqxz -- "app"; then
# shellcheck disable=SC2016
warning 'No `app` service found; which one should we use for the version number?'
@ -2188,7 +2064,7 @@ sub_recipe_release() {
fi
info "Fetching $service_image metadata from Docker Hub"
service_data=$(skopeo inspect "docker://$service_image")
service_digest=$(echo "$service_data" | $JQ -r '.Digest' | cut -d':' -f2 | cut -c-8)
service_digest=$(echo "$service_data" | jq -r '.Digest' | cut -d':' -f2 | cut -c-8)
label="coop-cloud.\${STACK_NAME}.$service.version=${service_tag}-${service_digest}"
@ -2223,49 +2099,22 @@ sub_recipe_release() {
success "All compose files updated; new version is $new_version"
if [ "$abra___no_prompt" = "false" ] && [ "$bump" = "false" ]; then
read -rp "Commit your changes to git? [y/N]? " choice
read -rp "Commit your changes to git? (y/[n])? " choice
if [ "${choice,,}" != "y" ]; then
return
fi
if [ "${choice,,}" != "y" ]; then
return
fi
if [ "$abra___no_prompt" = "false" ] && [ "$bump" = "false" ]; then
git commit -avem "Version $new_version; sync labels" || exit
else
git commit -am "Version $new_version; sync labels" || true
git commit -avem "Version $new_version; sync labels" || exit
read -rp "Tag this as \`$new_version\`? (y/[n])? " choice
if [ "${choice,,}" != "y" ]; then
return
fi
if [ "$abra___no_prompt" = "false" ]; then
read -rp "Tag this as \`$new_version\`? [y/N]? " choice
if [ "${choice,,}" != "y" ]; then
return
fi
fi
if [ "$force" = "true" ]; then
git tag -d "$new_version" || true
git push origin --delete "$new_version" || true
debug "Deleted local tag and remote tag if present"
fi
if [ "$abra___no_prompt" = "false" ]; then
git tag -aem "$latest_version_message" "$new_version"
else
git tag -am "$latest_version_message" "$new_version" || true
fi
if [ "$abra___no_prompt" = "false" ]; then
read -rp "Git push this new tag? [y/N]? " choice
if [ "${choice,,}" = "y" ]; then
git push && git push --tags
fi
else
git push && git push --tags
fi
test "$force" = "true" && git tag -d "$new_version"
git tag -aem "$latest_version_message" "$new_version"
}
#######################################
@ -2462,9 +2311,9 @@ OPTIONS
sub_upgrade() {
if [[ "$abra___dev" == "true" ]]; then
curl https://install.abra.coopcloud.tech | bash -s -- --dev
curl https://install.abra.autonomic.zone | bash -s -- --dev
else
curl https://install.abra.coopcloud.tech | bash
curl https://install.abra.autonomic.zone | bash
fi
}
@ -2557,7 +2406,7 @@ abra() {
abra___skip_check abra__backup_file_ abra___verbose abra___debug \
abra___help abra___branch abra___volumes abra__provider_ abra___type \
abra___dev abra___update abra___no_prompt abra___force \
abra__recipe_ abra___fast abra__volume_ abra___bump abra___chaos
abra___skip_version_check abra__recipe_ abra___no_domain_poll
if ! type tput > /dev/null 2>&1; then
tput() {

View File

@ -1,108 +0,0 @@
"""Shared utilities for bin/*.py scripts."""
from logging import DEBUG, basicConfig, getLogger
from os import chdir, mkdir
from os.path import exists, expanduser
from pathlib import Path
from shlex import split
from subprocess import check_output
from sys import exit
from requests import get
HOME_PATH = expanduser("~/")
CLONES_PATH = Path(f"{HOME_PATH}/.abra/apps").absolute()
REPOS_TO_SKIP = (
"abra",
"abra-apps",
"abra-gandi",
"abra-hetzner",
"auto-apps-json",
"auto-mirror",
"backup-bot",
"coopcloud.tech",
"coturn",
"docker-cp-deploy",
"docker-dind-bats-kcov",
"docs.coopcloud.tech",
"example",
"gardening",
"organising",
"pyabra",
"radicle-seed-node",
"stack-ssh-deploy",
"swarm-cronjob",
)
YQ_PATH = Path(f"{HOME_PATH}/.abra/vendor/yq")
JQ_PATH = Path(f"{HOME_PATH}/.abra/vendor/jq")
log = getLogger(__name__)
basicConfig()
log.setLevel(DEBUG)
def _run_cmd(cmd, shell=False, **kwargs):
"""Run a shell command."""
args = [split(cmd)]
if shell:
args = [cmd]
kwargs = {"shell": shell}
try:
return check_output(*args, **kwargs).decode("utf-8").strip()
except Exception as exception:
log.error(f"Failed to run {cmd}, saw {str(exception)}")
exit(1)
def get_repos_json():
""" Retrieve repo list from Gitea """
url = "https://git.autonomic.zone/api/v1/orgs/coop-cloud/repos"
log.info(f"Retrieving {url}")
repos = []
response = True
page = 1
try:
while response:
log.info(f"Trying to fetch page {page}")
response = get(url + f"?page={page}", timeout=10).json()
repos.extend(response)
page += 1
return repos
except Exception as exception:
log.error(f"Failed to retrieve {url}, saw {str(exception)}")
exit(1)
def clone_all_apps(repos_json, ssh=False):
"""Clone all Co-op Cloud apps to ~/.abra/apps."""
if not exists(CLONES_PATH):
mkdir(CLONES_PATH)
if ssh:
repos = [[p["name"], p["ssh_url"]] for p in repos_json]
else:
repos = [[p["name"], p["clone_url"]] for p in repos_json]
for name, url in repos:
if name in REPOS_TO_SKIP:
continue
if not exists(f"{CLONES_PATH}/{name}"):
log.info(f"Retrieving {url}")
_run_cmd(f"git clone {url} {CLONES_PATH}/{name}")
chdir(f"{CLONES_PATH}/{name}")
if not int(_run_cmd("git branch --list | wc -l", shell=True)):
log.info(f"Guessing main branch is HEAD for {name}")
_run_cmd("git checkout main")
else:
log.info(f"Updating {name}")
chdir(f"{CLONES_PATH}/{name}")
_run_cmd("git fetch -a")

View File

@ -6,44 +6,84 @@
# ~/.abra/apps), and format it as JSON so that it can be hosted here:
# https://apps.coopcloud.tech
import argparse
from json import dump
from os import chdir, environ, getcwd, listdir
from os.path import basename
from logging import DEBUG, basicConfig, getLogger
from os import chdir, listdir, mkdir
from os.path import basename, exists, expanduser
from pathlib import Path
from re import findall, search
from subprocess import DEVNULL
from shlex import split
from subprocess import DEVNULL, check_output
from sys import exit
from requests import get
from abralib import (
CLONES_PATH,
JQ_PATH,
REPOS_TO_SKIP,
YQ_PATH,
_run_cmd,
clone_all_apps,
get_repos_json,
log,
HOME_PATH = expanduser("~/")
CLONES_PATH = Path(f"{HOME_PATH}/.abra/apps").absolute()
YQ_PATH = Path(f"{HOME_PATH}/.abra/vendor/yq")
SCRIPT_PATH = Path(__file__).absolute().parent
REPOS_TO_SKIP = (
"abra",
"abra-apps",
"abra-gandi",
"abra-hetzner",
"backup-bot",
"coopcloud.tech",
"coturn",
"docker-cp-deploy",
"docker-dind-bats-kcov",
"docs.coopcloud.tech",
"example",
"gardening",
"organising",
"pyabra",
"radicle-seed-node",
"stack-ssh-deploy",
"swarm-cronjob",
)
parser = argparse.ArgumentParser(description="Generate a new apps.json")
parser.add_argument("--output", type=Path, default=f"{getcwd()}/apps.json")
log = getLogger(__name__)
basicConfig()
log.setLevel(DEBUG)
def skopeo_login():
"""Log into the docker registry to avoid rate limits."""
user = environ.get("SKOPEO_USER")
password = environ.get("SKOPEO_PASSWORD")
registry = environ.get("SKOPEO_REGISTRY", "docker.io")
def _run_cmd(cmd, shell=False, **kwargs):
"""Run a shell command."""
args = [split(cmd)]
if not user or not password:
log.info("Failed to log in via Skopeo due to missing env vars")
return
if shell:
args = [cmd]
kwargs = {"shell": shell}
login_cmd = f"skopeo login {registry} -u {user} -p {password}"
output = _run_cmd(login_cmd, shell=True)
log.info(f"Skopeo login attempt: {output}")
try:
return check_output(*args, **kwargs).decode("utf-8").strip()
except Exception as exception:
log.error(f"Failed to run {cmd}, saw {str(exception)}")
exit(1)
def get_repos_json():
""" Retrieve repo list from Gitea """
url = "https://git.autonomic.zone/api/v1/orgs/coop-cloud/repos"
log.info(f"Retrieving {url}")
repos = []
response = True
page = 1
try:
while response:
log.info(f"Trying to fetch page {page}")
response = get(url + f"?page={page}", timeout=10).json()
repos.extend(response)
page += 1
return repos
except Exception as exception:
log.error(f"Failed to retrieve {url}, saw {str(exception)}")
exit(1)
def get_published_apps_json():
@ -59,6 +99,31 @@ def get_published_apps_json():
return {}
def clone_all_apps(repos_json):
"""Clone all Co-op Cloud apps to ~/.abra/apps."""
if not exists(CLONES_PATH):
mkdir(CLONES_PATH)
repos = [[p["name"], p["ssh_url"]] for p in repos_json]
for name, url in repos:
if name in REPOS_TO_SKIP:
continue
if not exists(f"{CLONES_PATH}/{name}"):
log.info(f"Retrieving {url}")
_run_cmd(f"git clone {url} {CLONES_PATH}/{name}")
chdir(f"{CLONES_PATH}/{name}")
if not int(_run_cmd("git branch --list | wc -l", shell=True)):
log.info(f"Guessing main branch is HEAD for {name}")
_run_cmd("git checkout main")
else:
log.info(f"Updating {name}")
chdir(f"{CLONES_PATH}/{name}")
_run_cmd("git fetch -a")
def generate_apps_json(repos_json):
"""Generate the abra-apps.json application versions file."""
apps_json = {}
@ -75,8 +140,7 @@ def generate_apps_json(repos_json):
chdir(app_path)
metadata = get_app_metadata(app_path)
name = metadata.pop("name", app)
name = metadata.pop("name", "")
log.info(f"Processing {app}")
apps_json[app] = {
@ -109,7 +173,7 @@ def get_app_metadata(app_path):
return {}
try:
for match in findall(r"\*\*.*", contents):
for match in findall(r"\*\*.*\s\*", contents):
title = search(r"(?<=\*\*).*(?=\*\*)", match).group().lower()
if title == "image":
@ -192,7 +256,7 @@ def get_app_versions(app_path, cached_apps_json):
if image in ("null", "---"):
continue
images_cmd = f"skopeo inspect docker://{image} | {JQ_PATH} '.Digest'"
images_cmd = f"skopeo inspect docker://{image} | jq '.Digest'"
output = _run_cmd(images_cmd, shell=True)
service_version_info = {
@ -215,14 +279,11 @@ def get_app_versions(app_path, cached_apps_json):
def main():
"""Run the script."""
args = parser.parse_args()
skopeo_login()
repos_json = get_repos_json()
clone_all_apps(repos_json)
with open(args.output, "w", encoding="utf-8") as handle:
target = f"{SCRIPT_PATH}/../deploy/apps.coopcloud.tech/apps.json"
with open(target, "w", encoding="utf-8") as handle:
dump(
generate_apps_json(repos_json),
handle,
@ -231,7 +292,7 @@ def main():
sort_keys=True,
)
log.info(f"Successfully generated {args.output}")
log.info(f"Successfully generated {target}")
main()

View File

@ -1,16 +0,0 @@
#!/usr/bin/env python3
# Usage: ./clone-all-apps.py
#
# Clone all available apps into ~/.abra/apps using ssh:// URLs
from abralib import clone_all_apps, get_repos_json
def main():
"""Run the script."""
repos_json = get_repos_json()
clone_all_apps(repos_json, ssh=True)
main()

View File

@ -1,47 +0,0 @@
#!/usr/bin/env python3
# Usage: ./github-sync.py
#
# Mirror repositories to Github (Fuck M$, get it straight)
from os import chdir, environ, listdir
from abralib import CLONES_PATH, _run_cmd, clone_all_apps, get_repos_json, log
REPOS_TO_SKIP = (
"backup-bot",
"docker-dind-bats-kcov",
"docs.coopcloud.tech",
"pyabra",
"radicle-seed-node",
"swarm-cronjob",
)
def main():
"""Run the script."""
repos_json = get_repos_json()
clone_all_apps(repos_json)
for app in listdir(CLONES_PATH):
if app in REPOS_TO_SKIP:
log.info(f"Skipping {app}")
continue
app_path = f"{CLONES_PATH}/{app}"
chdir(app_path)
log.info(f"Mirroring {app}...")
token = environ.get("GITHUB_ACCESS_TOKEN")
remote = f"https://coopcloudbot:{token}@github.com/Coop-Cloud/{app}.git"
_run_cmd(
f"git remote add github {remote} || true",
shell=True,
)
_run_cmd("git push github --all")
main()

View File

@ -1,20 +0,0 @@
#!/usr/bin/env python3
# Usage: ./renovate-ls-apps.py
#
# Output list of apps for Renovate bot configuration
from abralib import REPOS_TO_SKIP, get_repos_json
def main():
"""Run the script."""
repos = [p["full_name"] for p in get_repos_json()]
repos.sort()
for repo in repos:
if repo.split("/")[-1] in REPOS_TO_SKIP:
continue
print(f'"{repo}",')
main()

View File

@ -38,22 +38,6 @@ _abra_complete_apps()
mapfile -t COMPREPLY < <(compgen -W "$(_abra_apps)" -- "$1")
}
_abra_recipes()
{
shopt -s nullglob dotglob
local RECIPES=(~/.abra/apps/*)
shopt -u nullglob dotglob
for RECIPE in "${RECIPES[@]}"; do
_abra_basename "${RECIPE%.env}"
done
}
_abra_complete_recipes()
{
mapfile -t COMPREPLY < <(compgen -W "$(_abra_recipes)" -- "$1")
}
_abra_complete()
{
compopt +o default +o nospace
@ -62,7 +46,6 @@ _abra_complete()
local -r cmds='
app
server
recipe
'
local -r short_opts='-e -h -s -v'
local -r long_opts='--env --help --stack --version'
@ -118,12 +101,6 @@ _abra_complete()
_abra_complete_apps "$cur"
fi
;;
recipe)
# Offer exactly one app completion.
if (( COMP_CWORD == cmd_index + 1 )); then
_abra_complete_recipes "$cur"
fi
;;
#help)
# # Offer exactly one command name completion.
# if (( COMP_CWORD == cmd_index + 1 )); then

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,41 @@
---
version: "3.8"
services:
app:
image: "nginx:stable"
configs:
- source: abra_conf
target: /etc/nginx/conf.d/abra.conf
- source: abra_apps_json
target: /var/www/abra-apps/apps.json
volumes:
- "public:/var/www/abra-apps"
networks:
- proxy
deploy:
update_config:
failure_action: rollback
order: start-first
labels:
- "traefik.enable=true"
- "traefik.http.services.abra-apps.loadbalancer.server.port=80"
- "traefik.http.routers.abra-apps.rule=Host(`apps.coopcloud.tech`, `abra-apps.cloud.autonomic.zone`)"
- "traefik.http.routers.abra-apps.entrypoints=web-secure"
- "traefik.http.routers.abra-apps.tls.certresolver=production"
- "traefik.http.routers.abra-apps.middlewares=abra-apps-redirect"
- "traefik.http.middlewares.abra-apps-redirect.headers.SSLForceHost=true"
- "traefik.http.middlewares.abra-apps-redirect.headers.SSLHost=apps.coopcloud.tech"
configs:
abra_apps_json:
file: apps.json
abra_conf:
file: nginx.conf
networks:
proxy:
external: true
volumes:
public:

View File

@ -0,0 +1,10 @@
server {
listen 80 default_server;
server_name apps.coopcloud.tech;
location / {
root /var/www/abra-apps;
add_header Content-Type application/json;
index apps.json;
}
}

View File

@ -20,7 +20,7 @@ services:
labels:
- "traefik.enable=true"
- "traefik.http.services.abra-installer.loadbalancer.server.port=80"
- "traefik.http.routers.abra-installer.rule=Host(`install.abra.autonomic.zone`,`install.abra.coopcloud.tech`)"
- "traefik.http.routers.abra-installer.rule=Host(`install.abra.autonomic.zone`)"
- "traefik.http.routers.abra-installer.entrypoints=web-secure"
- "traefik.http.routers.abra-installer.tls.certresolver=production"

View File

@ -1,9 +1,9 @@
#!/bin/bash
ABRA_VERSION="9.0.0"
ABRA_VERSION="0.7.3"
GIT_URL="https://git.autonomic.zone/coop-cloud/abra"
ABRA_SRC="$GIT_URL/raw/tag/$ABRA_VERSION/abra"
ABRA_DIR="${ABRA_DIR:-$HOME/.abra}"
ABRA_DIR="${ABRA_DIR:-$HOME/.abra/}"
function install_abra_release {
mkdir -p "$HOME/.local/bin"

View File

@ -1,6 +1,6 @@
server {
listen 80 default_server;
server_name install.abra.autonomic.zone install.abra.coopcloud.tech;
server_name install.abra.autonomic.zone;
location / {
root /var/www/abra-installer;

View File

@ -1,4 +1,4 @@
.PHONY: test shellcheck docopt release-installer build push
.PHONY: test shellcheck docopt kcov codecov release-installer
test:
@sudo DOCKER_CONTEXT=default docker run \
@ -7,13 +7,13 @@ test:
-d \
--name=abra-test-dind \
-e DOCKER_TLS_CERTDIR="" \
decentral1se/docker-dind-bats-kcov \
@DOCKER_CONTEXT=default sudo docker exec \
decentral1se/docker-dind-bats-kcov
@DOCKER_CONTEXT=default docker exec \
-it \
abra-test-dind \
sh -c "cd /workdir && bats /workdir/tests"
@DOCKER_CONTEXT=default sudo docker stop abra-test-dind
@DOCKER_CONTEXT=default sudo docker rm abra-test-dind
@DOCKER_CONTEXT=default docker stop abra-test-dind
@DOCKER_CONTEXT=default docker rm abra-test-dind
shellcheck:
@docker run \
@ -32,19 +32,27 @@ docopt:
fi
.venv/bin/docopt.sh abra
kcov:
@docker run \
-it \
--rm \
-v $$(pwd):/workdir \
kcov/kcov:latest \
sh -c "kcov /workdir/coverage /workdir/abra || true"
codecov: SHELL:=/bin/bash
codecov:
@bash <(curl -s https://codecov.io/bash) \
-s coverage -t $$(pass show hosts/swarm.autonomic.zone/drone/codecov/token)
release-installer:
@DOCKER_CONTEXT=swarm.autonomic.zone \
docker stack rm abra-installer-script && \
cd deploy/install.abra.coopcloud.tech && \
cd deploy/install.abra.autonomic.zone && \
DOCKER_CONTEXT=swarm.autonomic.zone docker stack deploy -c compose.yml abra-installer-script
build:
@docker build -t thecoopcloud/abra .
push: build
@docker push thecoopcloud/abra
symlink:
@mkdir -p ~/.abra/servers/ && \
ln -srf tests/default ~/.abra/servers && \
ln -srf tests/apps/* ~/.abra/apps
release-apps:
@DOCKER_CONTEXT=swarm.autonomic.zone \
docker stack rm abra-apps-json && \
cd deploy/apps.coopcloud.tech && \
DOCKER_CONTEXT=swarm.autonomic.zone docker stack deploy -c compose.yml abra-apps-json

View File

@ -1,84 +0,0 @@
---
# The goal of this compose file is to have a testing ground for understanding
# what cases we need to handle to get stable deployments. For that, we need to
# work with healthchecks and deploy configurations quite closely. If you run
# the `make symlink` target then this will be loaded into a "fake" app on your
# local machine which you can deploy with `abra`.
version: "3.8"
services:
r1_should_work:
image: redis:alpine
deploy:
update_config:
failure_action: rollback
order: start-first
rollback_config:
order: start-first
restart_policy:
max_attempts: 1
healthcheck:
test: redis-cli ping
interval: 2s
retries: 3
start_period: 1s
timeout: 3s
r2_broken_health_check:
image: redis:alpine
deploy:
update_config:
failure_action: rollback
order: start-first
rollback_config:
order: start-first
restart_policy:
max_attempts: 3
healthcheck:
test: foobar
interval: 2s
retries: 3
start_period: 1s
timeout: 3s
r3_no_health_check:
image: redis:alpine
deploy:
update_config:
failure_action: rollback
order: start-first
rollback_config:
order: start-first
restart_policy:
max_attempts: 3
r4_disabled_health_check:
image: redis:alpine
deploy:
update_config:
failure_action: rollback
order: start-first
rollback_config:
order: start-first
restart_policy:
max_attempts: 3
healthcheck:
disable: true
r5_should_also_work:
image: redis:alpine
deploy:
update_config:
failure_action: rollback
order: start-first
rollback_config:
order: start-first
restart_policy:
max_attempts: 1
healthcheck:
test: redis-cli ping
interval: 2s
retries: 3
start_period: 1s
timeout: 3s

View File

@ -1 +0,0 @@
TYPE=works