23 Commits

Author SHA1 Message Date
3wc
e1f2318273 Merge branch 'tests' into publicapi-tests 2021-07-17 13:37:43 +02:00
3wc
7b9e9debab Disable VM creation check for the moment 2021-07-17 13:37:28 +02:00
3wc
bef379273f Fix botched merge 2021-07-17 13:32:54 +02:00
3wc
82c0c63ff4 Add tests for public API 2021-07-17 13:29:16 +02:00
3wc
d5e0e069d1 Merge branch 'tests' into publicapi-tests 2021-07-17 13:28:50 +02:00
3wc
b8d149e862 Updates for upstream IP handling 2021-07-17 13:23:43 +02:00
3wc
cf42ac5e4d Add basic "create" API..
.. using server-side API tokens
2021-07-17 12:18:16 +02:00
3wc
9823bda4a2 Add SSH key tests 2021-07-17 12:10:28 +02:00
3wc
4c7caa1a38 Initial console tests
NB capsul create isn't working properly, see #83
2021-07-17 11:54:15 +02:00
3wc
665e087bd4 Basic testing using flask-testing
This commit makes it possible to override settings during tests, by
switching capsulflask/__init__.py to a "create_app" pattern, and using
`dotenv_values` instead of `load_dotenv`.

The create_app() method returns a Flask app instance, to give
more control over when to initialise the app. This allows setting
environment variables in test files.

Then, use dotenv_values to override loaded .env variables with ones from
the environment, so that tests can set `POSTGRES_CONNECTION_PARAMETERS`
and `SPOKE_MODEL` (possibly others in future..).

Inital tests for the "landing" pages, and login / activation, are
included.
2021-07-17 10:38:04 +02:00
908d02803f move hardcoding public3 to the right place 2021-07-12 16:10:28 -05:00
6e6bd2b143 fix syntax error 2021-07-12 16:00:37 -05:00
47fbaab403 hardcode network_name=public3 to sidestep phantom dhcp lease issues 2021-07-12 15:59:20 -05:00
06a2bd3a6f add public_ipv4_first_usable_ip, public_ipv4_last_usable_ip 2021-07-12 14:38:56 -05:00
fbe9c7fca4 fix last IP address hanging off the end of the display on admin page 2021-07-12 12:27:07 -05:00
6bdb133153 make admin display work better on phones 2021-07-12 12:23:27 -05:00
b459e56f3a use the provided network_name when creating a capsul 2021-07-12 12:19:56 -05:00
be54117736 migration 17: network_name -> virtual_bridge_name, add network_name col 2021-07-12 12:16:32 -05:00
aaf33a245b point capsul at new public3/virbr3 network 2021-07-12 11:29:37 -05:00
ad9c3476c7 simplify cidr block logic a little bit 2021-07-12 11:29:33 -05:00
fcbea1e29b fixing capsul creation after I broke it with the pre-allocated IP
address changes
2021-07-11 12:18:58 -05:00
a2f2e744e4 MAIL_USE_TLS=False, MAIL_USE_SSL=True defaults 2021-07-11 10:28:47 -05:00
79ef90c380 hub allocate capsul IP addr when the create operation is being claimed
create.sh will now be passed two extra arguments from the web app:

network_name and public_ipv4_address

network_name will be virbr1 or virbr2 or whatever the network is called
and public_ipv4_address will be an ipv4 from that network which is not
currently being used
2021-07-09 17:08:51 -05:00
32 changed files with 1071 additions and 330 deletions

View File

@ -9,6 +9,7 @@ blinker = "==1.4"
click = "==7.1.2"
Flask = "==1.1.2"
Flask-Mail = "==0.9.1"
Flask-Testing = "==0.8.1"
gunicorn = "==20.0.4"
isort = "==4.3.21"
itsdangerous = "==1.1.0"

4
app.py
View File

@ -1,2 +1,4 @@
from capsulflask import app
from capsulflask import create_app
create_app()

View File

@ -8,7 +8,7 @@ import requests
import sys
import stripe
from dotenv import load_dotenv, find_dotenv
from dotenv import find_dotenv, dotenv_values
from flask import Flask
from flask_mail import Mail, Message
from flask import render_template
@ -22,60 +22,66 @@ from capsulflask import hub_model, spoke_model, cli
from capsulflask.btcpay import client as btcpay
from capsulflask.http_client import MyHTTPClient
class StdoutMockFlaskMail:
def send(self, message: Message):
current_app.logger.info(f"Email would have been sent if configured:\n\nto: {','.join(message.recipients)}\nsubject: {message.subject}\nbody:\n\n{message.body}\n\n")
load_dotenv(find_dotenv())
def create_app():
config = {
**dotenv_values(find_dotenv()),
**os.environ, # override loaded values with environment variables
}
app = Flask(__name__)
app.config.from_mapping(
BASE_URL=os.environ.get("BASE_URL", default="http://localhost:5000"),
SECRET_KEY=os.environ.get("SECRET_KEY", default="dev"),
HUB_MODE_ENABLED=os.environ.get("HUB_MODE_ENABLED", default="True").lower() in ['true', '1', 't', 'y', 'yes'],
SPOKE_MODE_ENABLED=os.environ.get("SPOKE_MODE_ENABLED", default="True").lower() in ['true', '1', 't', 'y', 'yes'],
INTERNAL_HTTP_TIMEOUT_SECONDS=os.environ.get("INTERNAL_HTTP_TIMEOUT_SECONDS", default="300"),
HUB_MODEL=os.environ.get("HUB_MODEL", default="capsul-flask"),
SPOKE_MODEL=os.environ.get("SPOKE_MODEL", default="mock"),
LOG_LEVEL=os.environ.get("LOG_LEVEL", default="INFO"),
SPOKE_HOST_ID=os.environ.get("SPOKE_HOST_ID", default="baikal"),
SPOKE_HOST_TOKEN=os.environ.get("SPOKE_HOST_TOKEN", default="changeme"),
HUB_TOKEN=os.environ.get("HUB_TOKEN", default="changeme"),
TESTING=config.get("TESTING", False),
BASE_URL=config.get("BASE_URL", "http://localhost:5000"),
SECRET_KEY=config.get("SECRET_KEY", "dev"),
HUB_MODE_ENABLED=config.get("HUB_MODE_ENABLED", "True").lower() in ['true', '1', 't', 'y', 'yes'],
SPOKE_MODE_ENABLED=config.get("SPOKE_MODE_ENABLED", "True").lower() in ['true', '1', 't', 'y', 'yes'],
INTERNAL_HTTP_TIMEOUT_SECONDS=config.get("INTERNAL_HTTP_TIMEOUT_SECONDS", "300"),
HUB_MODEL=config.get("HUB_MODEL", "capsul-flask"),
SPOKE_MODEL=config.get("SPOKE_MODEL", "mock"),
LOG_LEVEL=config.get("LOG_LEVEL", "INFO"),
SPOKE_HOST_ID=config.get("SPOKE_HOST_ID", "baikal"),
SPOKE_HOST_TOKEN=config.get("SPOKE_HOST_TOKEN", "changeme"),
HUB_TOKEN=config.get("HUB_TOKEN", "changeme"),
# https://www.postgresql.org/docs/9.1/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS
# https://stackoverflow.com/questions/56332906/where-to-put-ssl-certificates-when-trying-to-connect-to-a-remote-database-using
# TLS example: sslmode=verify-full sslrootcert=letsencrypt-root-ca.crt host=db.example.com port=5432 user=postgres password=dev dbname=postgres
POSTGRES_CONNECTION_PARAMETERS=os.environ.get(
POSTGRES_CONNECTION_PARAMETERS=config.get(
"POSTGRES_CONNECTION_PARAMETERS",
default="host=localhost port=5432 user=postgres password=dev dbname=postgres"
"host=localhost port=5432 user=postgres password=dev dbname=postgres"
),
DATABASE_SCHEMA=os.environ.get("DATABASE_SCHEMA", default="public"),
DATABASE_SCHEMA=config.get("DATABASE_SCHEMA", "public"),
MAIL_SERVER=os.environ.get("MAIL_SERVER", default=""),
MAIL_PORT=os.environ.get("MAIL_PORT", default="465"),
MAIL_USE_TLS=os.environ.get("MAIL_USE_TLS", default="True").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USE_SSL=os.environ.get("MAIL_USE_SSL", default="True").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USERNAME=os.environ.get("MAIL_USERNAME", default=""),
MAIL_PASSWORD=os.environ.get("MAIL_PASSWORD", default=""),
MAIL_DEFAULT_SENDER=os.environ.get("MAIL_DEFAULT_SENDER", default="no-reply@capsul.org"),
ADMIN_EMAIL_ADDRESSES=os.environ.get("ADMIN_EMAIL_ADDRESSES", default="ops@cyberia.club"),
ADMIN_PANEL_ALLOW_EMAIL_ADDRESSES=os.environ.get("ADMIN_PANEL_ALLOW_EMAIL_ADDRESSES", default="forest.n.johnson@gmail.com,capsul@cyberia.club"),
MAIL_SERVER=config.get("MAIL_SERVER", ""),
MAIL_PORT=config.get("MAIL_PORT", "465"),
MAIL_USE_TLS=config.get("MAIL_USE_TLS", "False").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USE_SSL=config.get("MAIL_USE_SSL", "True").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USERNAME=config.get("MAIL_USERNAME", ""),
MAIL_PASSWORD=config.get("MAIL_PASSWORD", ""),
MAIL_DEFAULT_SENDER=config.get("MAIL_DEFAULT_SENDER", "no-reply@capsul.org"),
ADMIN_EMAIL_ADDRESSES=config.get("ADMIN_EMAIL_ADDRESSES", "ops@cyberia.club"),
ADMIN_PANEL_ALLOW_EMAIL_ADDRESSES=config.get("ADMIN_PANEL_ALLOW_EMAIL_ADDRESSES", "forest.n.johnson@gmail.com,capsul@cyberia.club"),
PROMETHEUS_URL=os.environ.get("PROMETHEUS_URL", default="https://prometheus.cyberia.club"),
PROMETHEUS_URL=config.get("PROMETHEUS_URL", "https://prometheus.cyberia.club"),
STRIPE_API_VERSION=os.environ.get("STRIPE_API_VERSION", default="2020-03-02"),
STRIPE_SECRET_KEY=os.environ.get("STRIPE_SECRET_KEY", default=""),
STRIPE_PUBLISHABLE_KEY=os.environ.get("STRIPE_PUBLISHABLE_KEY", default=""),
#STRIPE_WEBHOOK_SECRET=os.environ.get("STRIPE_WEBHOOK_SECRET", default="")
STRIPE_API_VERSION=config.get("STRIPE_API_VERSION", "2020-03-02"),
STRIPE_SECRET_KEY=config.get("STRIPE_SECRET_KEY", ""),
STRIPE_PUBLISHABLE_KEY=config.get("STRIPE_PUBLISHABLE_KEY", ""),
#STRIPE_WEBHOOK_SECRET=config.get("STRIPE_WEBHOOK_SECRET", "")
BTCPAY_PRIVATE_KEY=os.environ.get("BTCPAY_PRIVATE_KEY", default="").replace("\\n", "\n"),
BTCPAY_URL=os.environ.get("BTCPAY_URL", default="https://btcpay.cyberia.club")
BTCPAY_PRIVATE_KEY=config.get("BTCPAY_PRIVATE_KEY", "").replace("\\n", "\n"),
BTCPAY_URL=config.get("BTCPAY_URL", "https://btcpay.cyberia.club")
)
app.config['HUB_URL'] = os.environ.get("HUB_URL", default=app.config['BASE_URL'])
app.config['HUB_URL'] = config.get("HUB_URL", app.config['BASE_URL'])
class SetLogLevelToDebugForHeartbeatRelatedMessagesFilter(logging.Filter):
def isHeartbeatRelatedString(self, thing):
@ -148,18 +154,21 @@ except:
# only start the scheduler and attempt to migrate the database if we are running the app.
# otherwise we are running a CLI command.
command_line = ' '.join(sys.argv)
is_running_server = ('flask run' in command_line) or ('gunicorn' in command_line)
is_running_server = (
('flask run' in command_line) or
('gunicorn' in command_line) or
('test' in command_line)
)
app.logger.info(f"is_running_server: {is_running_server}")
if app.config['HUB_MODE_ENABLED']:
if app.config['HUB_MODEL'] == "capsul-flask":
app.config['HUB_MODEL'] = hub_model.CapsulFlaskHub()
# debug mode (flask reloader) runs two copies of the app. When running in debug mode,
# we only want to start the scheduler one time.
if is_running_server and (not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true'):
if is_running_server and (not app.debug or config.get('WERKZEUG_RUN_MAIN') == 'true'):
scheduler = BackgroundScheduler()
heartbeat_task_url = f"{app.config['HUB_URL']}/hub/heartbeat-task"
heartbeat_task_headers = {'Authorization': f"Bearer {app.config['HUB_TOKEN']}"}
@ -175,23 +184,23 @@ if app.config['HUB_MODE_ENABLED']:
from capsulflask import db
db.init_app(app, is_running_server)
from capsulflask import auth, landing, console, payment, metrics, cli, hub_api, admin
from capsulflask import (
auth, landing, console, payment, metrics, cli, hub_api, publicapi, admin
)
app.register_blueprint(landing.bp)
app.register_blueprint(auth.bp)
app.register_blueprint(landing.bp)
app.register_blueprint(console.bp)
app.register_blueprint(payment.bp)
app.register_blueprint(metrics.bp)
app.register_blueprint(cli.bp)
app.register_blueprint(hub_api.bp)
app.register_blueprint(admin.bp)
app.register_blueprint(publicapi.bp)
app.add_url_rule("/", endpoint="index")
if app.config['SPOKE_MODE_ENABLED']:
if app.config['SPOKE_MODEL'] == "shell-scripts":
app.config['SPOKE_MODEL'] = spoke_model.ShellScriptSpoke()
else:
@ -219,7 +228,6 @@ def override_url_for():
return dict(url_for=url_for_with_cache_bust)
def url_for_with_cache_bust(endpoint, **values):
"""
Add a query parameter based on the hash of the file, this acts as a cache bust
@ -245,6 +253,4 @@ def url_for_with_cache_bust(endpoint, **values):
return url_for(endpoint, **values)
return app

View File

@ -17,9 +17,9 @@ bp = Blueprint("admin", __name__, url_prefix="/admin")
@bp.route("/")
@admin_account_required
def index():
hosts = get_model().list_hosts_with_networks()
vms_by_host_and_network = get_model().all_non_deleted_vms_by_host_and_network()
network_display_width_px = float(500);
hosts = get_model().list_hosts_with_networks(None)
vms_by_host_and_network = get_model().non_deleted_vms_by_host_and_network(None)
network_display_width_px = float(270)
#operations = get_model().list_all_operations()
display_hosts = []
@ -35,26 +35,18 @@ def index():
display_host = dict(name=host_id, networks=value['networks'])
for network in display_host['networks']:
ipv4_network = ipaddress.ip_network(network["public_ipv4_cidr_block"], False)
network_start_int = -1
network_end_int = -1
i = 0
for ipv4_address in ipv4_network:
i += 1
if i > 2:
if network_start_int == -1:
network_start_int = int(ipv4_address)
network_end_int = int(ipv4_address)
network_start_int = int(ipaddress.ip_address(network["public_ipv4_first_usable_ip"]))
network_end_int = int(ipaddress.ip_address(network["public_ipv4_last_usable_ip"]))
network['allocations'] = []
network_addresses_width = float((network_end_int-network_start_int))
network_addresses_width = float((network_end_int-network_start_int)+1)
if host_id in vms_by_host_and_network:
if network['network_name'] in vms_by_host_and_network[host_id]:
for vm in vms_by_host_and_network[host_id][network['network_name']]:
ip_address_int = int(ipaddress.ip_address(vm['public_ipv4']))
if network_start_int < ip_address_int and ip_address_int < network_end_int:
if network_start_int <= ip_address_int and ip_address_int <= network_end_int:
allocation = f"{host_id}_{network['network_name']}_{len(network['allocations'])}"
inline_styles.append(
f"""

View File

@ -1,3 +1,4 @@
from base64 import b64decode
import functools
import re
@ -24,6 +25,15 @@ def account_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
api_token = request.headers.get('authorization', None)
if api_token is not None:
email = get_model().authenticate_token(b64decode(api_token).decode('utf-8'))
if email is not None:
session.clear()
session["account"] = email
session["csrf-token"] = generate()
if session.get("account") is None or session.get("csrf-token") is None :
return redirect(url_for("auth.login"))
@ -31,7 +41,6 @@ def account_required(view):
return wrapped_view
def admin_account_required(view):
"""View decorator that redirects non-admin users to the login page."""

View File

@ -1,7 +1,9 @@
from base64 import b64encode
from datetime import datetime, timedelta
import json
import re
import sys
import json
from datetime import datetime, timedelta
from flask import Blueprint
from flask import flash
from flask import current_app
@ -98,7 +100,6 @@ def index():
@bp.route("/<string:id>", methods=("GET", "POST"))
@account_required
def detail(id):
duration=request.args.get('duration')
if not duration:
duration = "5m"
@ -188,6 +189,67 @@ def detail(id):
duration=duration
)
def _create(vm_sizes, operating_systems, public_keys_for_account, server_data):
errors = list()
size = server_data.get("size")
os = server_data.get("os")
posted_keys_count = int(server_data.get("ssh_authorized_key_count"))
if not size:
errors.append("Size is required")
elif size not in vm_sizes:
errors.append(f"Invalid size {size}")
if not os:
errors.append("OS is required")
elif os not in operating_systems:
errors.append(f"Invalid os {os}")
posted_keys = list()
if posted_keys_count > 1000:
errors.append("something went wrong with ssh keys")
else:
for i in range(0, posted_keys_count):
if f"ssh_key_{i}" in server_data:
posted_name = server_data.get(f"ssh_key_{i}")
key = None
for x in public_keys_for_account:
if x['name'] == posted_name:
key = x
if key:
posted_keys.append(key)
else:
errors.append(f"SSH Key \"{posted_name}\" doesn't exist")
if len(posted_keys) == 0:
errors.append("At least one SSH Public Key is required")
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(
vm_sizes[size]['memory_mb']*1024*1024
)
if not capacity_avaliable:
errors.append("""
host(s) at capacity. no capsuls can be created at this time. sorry.
""")
if len(errors) == 0:
id = make_capsul_id()
current_app.config["HUB_MODEL"].create(
email = session["account"],
id=id,
os=os,
size=size,
template_image_file_name=operating_systems[os]['template_image_file_name'],
vcpus=vm_sizes[size]['vcpus'],
memory_mb=vm_sizes[size]['memory_mb'],
ssh_authorized_keys=list(map(lambda x: x["content"], posted_keys))
)
return id, errors
return None, errors
@bp.route("/create", methods=("GET", "POST"))
@account_required
@ -202,65 +264,12 @@ def create():
if request.method == "POST":
if "csrf-token" not in request.form or request.form['csrf-token'] != session['csrf-token']:
return abort(418, f"u want tea")
size = request.form["size"]
os = request.form["os"]
if not size:
errors.append("Size is required")
elif size not in vm_sizes:
errors.append(f"Invalid size {size}")
if not os:
errors.append("OS is required")
elif os not in operating_systems:
errors.append(f"Invalid os {os}")
posted_keys_count = int(request.form["ssh_authorized_key_count"])
posted_keys = list()
if posted_keys_count > 1000:
errors.append("something went wrong with ssh keys")
else:
for i in range(0, posted_keys_count):
if f"ssh_key_{i}" in request.form:
posted_name = request.form[f"ssh_key_{i}"]
key = None
for x in public_keys_for_account:
if x['name'] == posted_name:
key = x
if key:
posted_keys.append(key)
else:
errors.append(f"SSH Key \"{posted_name}\" doesn't exist")
if len(posted_keys) == 0:
errors.append("At least one SSH Public Key is required")
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(vm_sizes[size]['memory_mb']*1024*1024)
if not capacity_avaliable:
errors.append("""
host(s) at capacity. no capsuls can be created at this time. sorry.
""")
id, errors = _create(
vm_sizes,
operating_systems,
public_keys_for_account,
request.form)
if len(errors) == 0:
id = make_capsul_id()
get_model().create_vm(
email=session["account"],
id=id,
size=size,
os=os,
ssh_authorized_keys=list(map(lambda x: x["name"], posted_keys))
)
current_app.config["HUB_MODEL"].create(
email = session["account"],
id=id,
template_image_file_name=operating_systems[os]['template_image_file_name'],
vcpus=vm_sizes[size]['vcpus'],
memory_mb=vm_sizes[size]['memory_mb'],
ssh_authorized_keys=list(map(lambda x: x["content"], posted_keys))
)
return redirect(f"{url_for('console.index')}?created={id}")
affordable_vm_sizes = dict()
@ -290,23 +299,25 @@ def create():
vm_sizes=affordable_vm_sizes
)
@bp.route("/ssh", methods=("GET", "POST"))
@bp.route("/keys", methods=("GET", "POST"))
@account_required
def ssh_public_keys():
def ssh_api_keys():
errors = list()
token = None
if request.method == "POST":
if "csrf-token" not in request.form or request.form['csrf-token'] != session['csrf-token']:
return abort(418, f"u want tea")
method = request.form["method"]
action = request.form["action"]
if action == 'upload_ssh_key':
content = None
if method == "POST":
content = request.form["content"].replace("\r", " ").replace("\n", " ").strip()
name = request.form["name"]
if not name or len(name.strip()) < 1:
if method == "POST":
parts = re.split(" +", content)
if len(parts) > 2 and len(parts[2].strip()) > 0:
name = parts[2].strip()
@ -317,7 +328,6 @@ def ssh_public_keys():
if not re.match(r"^[0-9A-Za-z_@:. -]+$", name):
errors.append(f"Key name '{name}' must match \"^[0-9A-Za-z_@:. -]+$\"")
if method == "POST":
if not content or len(content.strip()) < 1:
errors.append("Content is required")
else:
@ -330,24 +340,36 @@ def ssh_public_keys():
if len(errors) == 0:
get_model().create_ssh_public_key(session["account"], name, content)
elif method == "DELETE":
if len(errors) == 0:
elif action == "delete_ssh_key":
get_model().delete_ssh_public_key(session["account"], name)
elif action == "generate_api_token":
name = request.form["name"]
if name == '':
name = datetime.utcnow().strftime('%y-%m-%d %H:%M:%S')
token = b64encode(
get_model().generate_api_token(session["account"], name).encode('utf-8')
).decode('utf-8')
elif action == "delete_api_token":
get_model().delete_api_token(session["account"], request.form["id"])
for error in errors:
flash(error)
keys_list=list(map(
ssh_keys_list=list(map(
lambda x: dict(name=x['name'], content=f"{x['content'][:20]}...{x['content'][len(x['content'])-20:]}"),
get_model().list_ssh_public_keys_for_account(session["account"])
))
api_tokens_list = get_model().list_api_tokens(session["account"])
return render_template(
"ssh-public-keys.html",
"keys.html",
csrf_token = session["csrf-token"],
ssh_public_keys=keys_list,
has_ssh_public_keys=len(keys_list) > 0
api_tokens=api_tokens_list,
ssh_public_keys=ssh_keys_list,
generated_api_token=token,
)
def get_vms():
@ -371,7 +393,6 @@ def get_vm_months_float(vm, as_of):
return days / average_number_of_days_in_a_month
def get_account_balance(vms, payments, as_of):
vm_cost_dollars = 0.0
for vm in vms:
vm_months = get_vm_months_float(vm, as_of)
@ -384,7 +405,6 @@ def get_account_balance(vms, payments, as_of):
@bp.route("/account-balance")
@account_required
def account_balance():
payment_sessions = get_model().list_payment_sessions_for_account(session['account'])
for payment_session in payment_sessions:
if payment_session['type'] == 'btcpay':

View File

@ -33,7 +33,7 @@ def init_app(app, is_running_server):
result = re.search(r"^\d+_(up|down)", filename)
if not result:
app.logger.error(f"schemaVersion {filename} must match ^\\d+_(up|down). exiting.")
exit(1)
continue
key = result.group()
with open(join(schemaMigrationsPath, filename), 'rb') as file:
schemaMigrations[key] = file.read().decode("utf8")
@ -43,7 +43,7 @@ def init_app(app, is_running_server):
hasSchemaVersionTable = False
actionWasTaken = False
schemaVersion = 0
desiredSchemaVersion = 16
desiredSchemaVersion = 19
cursor = connection.cursor()
@ -128,4 +128,3 @@ def close_db(e=None):
if db_model is not None:
db_model.cursor.close()
current_app.config['PSYCOPG2_CONNECTION_POOL'].putconn(db_model.connection)

View File

@ -1,6 +1,8 @@
import re
# I was never able to get this type hinting to work correctly
# from psycopg2.extensions import connection as Psycopg2Connection, cursor as Psycopg2Cursor
import hashlib
from nanoid import generate
from flask import current_app
from typing import List
@ -15,7 +17,6 @@ class DBModel:
self.cursor = cursor
# ------ LOGIN ---------
@ -42,6 +43,16 @@ class DBModel:
return (token, ignoreCaseMatches)
def authenticate_token(self, token):
m = hashlib.md5()
m.update(token.encode('utf-8'))
hash_token = m.hexdigest()
self.cursor.execute("SELECT email FROM api_tokens WHERE token = %s", (hash_token, ))
result = self.cursor.fetchall()
if len(result) == 1:
return result[0]
return None
def consume_token(self, token):
self.cursor.execute("SELECT email FROM login_tokens WHERE token = %s and created > (NOW() - INTERVAL '20 min')", (token, ))
row = self.cursor.fetchone()
@ -56,8 +67,18 @@ class DBModel:
# ------ VM & ACCOUNT MANAGEMENT ---------
def all_non_deleted_vms_by_host_and_network(self):
self.cursor.execute("SELECT id, host, network_name, public_ipv4, public_ipv6 FROM vms WHERE deleted IS NULL")
def non_deleted_vms_by_host_and_network(self, host_id):
query = "SELECT id, host, network_name, public_ipv4, public_ipv6 FROM vms WHERE deleted IS NULL"
if host_id is None:
self.cursor.execute(query)
else:
if not re.match(r"^[a-zA-Z0-9_-]+$", host_id):
raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"")
# I kept getting "TypeError: not all arguments converted during string formatting"
# when I was trying to mix python string templating with psycopg2 safe parameter passing.
# so i just did all of it in python and check the user-provided data for safety myself (no sql injection).
self.cursor.execute(f"{query} AND host = '{host_id}'")
hosts = dict()
for row in self.cursor.fetchall():
@ -120,6 +141,32 @@ class DBModel:
self.cursor.execute( "DELETE FROM ssh_public_keys where email = %s AND name = %s", (email, name) )
self.connection.commit()
def list_api_tokens(self, email):
self.cursor.execute(
"SELECT id, token, name, created FROM api_tokens WHERE email = %s",
(email, )
)
return list(map(
lambda x: dict(id=x[0], token=x[1], name=x[2], created=x[3]),
self.cursor.fetchall()
))
def generate_api_token(self, email, name):
token = generate()
m = hashlib.md5()
m.update(token.encode('utf-8'))
hash_token = m.hexdigest()
self.cursor.execute(
"INSERT INTO api_tokens (email, name, token) VALUES (%s, %s, %s)",
(email, name, hash_token)
)
self.connection.commit()
return token
def delete_api_token(self, email, id_):
self.cursor.execute( "DELETE FROM api_tokens where email = %s AND id = %s", (email, id_))
self.connection.commit()
def list_vms_for_account(self, email):
self.cursor.execute("""
SELECT vms.id, vms.public_ipv4, vms.public_ipv6, vms.size, vms.os, vms.created, vms.deleted, vm_sizes.dollars_per_month
@ -146,12 +193,12 @@ class DBModel:
)
self.connection.commit()
def create_vm(self, email, id, size, os, ssh_authorized_keys):
def create_vm(self, email, id, size, os, host, network_name, public_ipv4, ssh_authorized_keys):
self.cursor.execute("""
INSERT INTO vms (email, id, size, os)
VALUES (%s, %s, %s, %s)
INSERT INTO vms (email, id, size, os, host, network_name, public_ipv4)
VALUES (%s, %s, %s, %s, %s, %s, %s)
""",
(email, id, size, os)
(email, id, size, os, host, network_name, public_ipv4)
)
for ssh_authorized_key in ssh_authorized_keys:
@ -318,18 +365,35 @@ class DBModel:
# ------ HOSTS ---------
def list_hosts_with_networks(self):
self.cursor.execute("""
SELECT hosts.id, hosts.last_health_check, host_network.network_name, host_network.public_ipv4_cidr_block FROM hosts
def list_hosts_with_networks(self, host_id: str):
query = """
SELECT hosts.id, hosts.last_health_check, host_network.network_name,
host_network.public_ipv4_cidr_block, host_network.public_ipv4_first_usable_ip, host_network.public_ipv4_last_usable_ip
FROM hosts
JOIN host_network ON host_network.host = hosts.id
""")
"""
if host_id is None:
self.cursor.execute(query)
else:
if not re.match(r"^[a-zA-Z0-9_-]+$", host_id):
raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"")
# I kept getting "TypeError: not all arguments converted during string formatting"
# when I was trying to mix python query string templating with psycopg2 safe parameter passing.
# so i just did all of it in python and check the user-provided data for safety myself (no sql injection).
self.cursor.execute(f"{query} WHERE hosts.id = '{host_id}'")
hosts = dict()
for row in self.cursor.fetchall():
if row[0] not in hosts:
hosts[row[0]] = dict(last_health_check=row[1], networks=[])
hosts[row[0]]["networks"].append(dict(network_name=row[2], public_ipv4_cidr_block=row[3]))
hosts[row[0]]["networks"].append(dict(
network_name=row[2],
public_ipv4_cidr_block=row[3],
public_ipv4_first_usable_ip=row[4],
public_ipv4_last_usable_ip=row[5]
))
return hosts
@ -379,6 +443,13 @@ class DBModel:
self.connection.commit()
return operation_id
def update_operation(self, operation_id: int, payload: str):
self.cursor.execute(
"UPDATE operations SET payload = %s WHERE id = %s",
(payload, operation_id)
)
self.connection.commit()
def update_host_operation(self, host_id: str, operation_id: int, assignment_status: str, result: str):
if assignment_status and not result:
self.cursor.execute(
@ -405,9 +476,20 @@ class DBModel:
else:
return None
def host_operation_exists(self, operation_id: int, host_id: str) -> bool:
self.cursor.execute("SELECT operation FROM host_operation WHERE host = %s AND operation = %s",(host_id, operation_id))
return len(self.cursor.fetchall()) != 0
def get_payload_json_from_host_operation(self, operation_id: int, host_id: str) -> str:
self.cursor.execute(
"""
SELECT operations.payload FROM operations
JOIN host_operation ON host_operation.operation = operations.id
WHERE host_operation.host = %s AND host_operation.operation = %s
""",
(host_id, operation_id)
)
row = self.cursor.fetchone()
if row:
return row[0]
else:
return None
def claim_operation(self, operation_id: int, host_id: str) -> bool:
# have to make a new cursor to set isolation level
@ -432,8 +514,3 @@ class DBModel:
#cursor.close()
return to_return

View File

@ -1,7 +1,9 @@
import json
import ipaddress
from flask import Blueprint
from flask import current_app
from flask import request
from flask import request, make_response
from werkzeug.exceptions import abort
from capsulflask.db import get_model
@ -46,15 +48,134 @@ def heartbeat(host_id):
@bp.route("/claim-operation/<int:operation_id>/<string:host_id>", methods=("POST",))
def claim_operation(operation_id: int, host_id: str):
if authorized_for_host(host_id):
exists = get_model().host_operation_exists(operation_id, host_id)
if not exists:
return abort(404, "host operation not found")
payload_json = get_model().get_payload_json_from_host_operation(operation_id, host_id)
if payload_json is None:
error_message = f"{host_id} can't claim operation {operation_id} because host_operation row not found"
current_app.logger.error(error_message)
return abort(404, error_message)
# right now if there is a can_claim_handler there needs to be a corresponding on_claimed_handler.
# this is sole-ly due to laziness in error handling.
can_claim_handlers = {
"create": can_claim_create,
}
on_claimed_handlers = {
"create": on_create_claimed,
}
error_message = ""
payload = None
payload_is_dict = False
payload_has_type = False
payload_has_valid_type = False
try:
payload = json.loads(payload_json)
payload_is_dict = isinstance(payload, dict)
payload_has_type = payload_is_dict and 'type' in payload
payload_has_valid_type = payload_has_type and payload['type'] in can_claim_handlers
if not payload_is_dict:
error_message = "invalid json: expected an object"
elif not payload_has_type:
error_message = "invalid json: 'type' field is required"
elif not payload_has_valid_type:
error_message = f"invalid json: expected type \"{payload['type']}\" to be one of [{', '.join(can_claim_handlers.keys())}]"
except:
error_message = "could not parse payload as json"
if error_message != "":
error_message = f"{host_id} can't claim operation {operation_id} because {error_message}"
current_app.logger.error(error_message)
return abort(400, error_message)
# we will only return this payload as json if claiming succeeds, so might as well do this now...
payload['assignment_status'] = 'assigned'
# invoke the appropriate can_claim_handler for this operation type
result_tuple = can_claim_handlers[payload['type']](payload, host_id)
modified_payload = result_tuple[0]
error_message = result_tuple[1]
if error_message != "":
error_message = f"{host_id} can't claim operation {operation_id} because {error_message}"
current_app.logger.error(error_message)
return abort(400, error_message)
claimed = get_model().claim_operation(operation_id, host_id)
if claimed:
return "ok"
modified_payload_json = json.dumps(modified_payload)
get_model().update_operation(operation_id, modified_payload_json)
on_claimed_handlers[payload['type']](modified_payload, host_id)
response = make_response(modified_payload_json)
response.headers.set("Content-Type", "application/json")
return response
else:
return abort(409, "operation was already assigned to another host")
return abort(409, f"operation was already assigned to another host")
else:
current_app.logger.warning(f"/hub/claim-operation/{operation_id}/{host_id} returned 401: invalid token")
return abort(401, "invalid host id or token")
def can_claim_create(payload, host_id) -> (str, str):
hosts = get_model().list_hosts_with_networks(host_id)
if host_id not in hosts:
return "", f"the host \"{host_id}\" does not appear to have any networks."
networks = hosts[host_id]['networks']
vms_by_host_and_network = get_model().non_deleted_vms_by_host_and_network(host_id)
vms_by_network = dict()
if host_id in vms_by_host_and_network:
vms_by_network = vms_by_host_and_network[host_id]
allocated_ipv4_address = None
allocated_network_name = None
for network in networks:
vms = []
if network["network_name"] in vms_by_network:
vms = vms_by_network[network["network_name"]]
claimed_ipv4s = dict()
for vm in vms:
claimed_ipv4s[vm['public_ipv4']] = True
ipv4_network = ipaddress.ip_network(network["public_ipv4_cidr_block"], False)
ipv4_first_usable_ip = network["public_ipv4_first_usable_ip"]
ipv4_last_usable_ip = network["public_ipv4_last_usable_ip"]
for ipv4_address in ipv4_network:
within_usable_range = ipv4_first_usable_ip <= str(ipv4_address) and str(ipv4_address) <= ipv4_last_usable_ip
if within_usable_range and str(ipv4_address) not in claimed_ipv4s:
allocated_ipv4_address = str(ipv4_address)
break
if allocated_ipv4_address is not None:
allocated_network_name = network["network_name"]
break
if allocated_network_name is None or allocated_ipv4_address is None:
return "", f"host \"{host_id}\" does not have any avaliable IP addresses on any of its networks."
# payload["network_name"] = allocated_network_name
# hard-code the network name for now until we can fix the phantom dhcp lease issues.
payload["network_name"] = 'public3'
payload["public_ipv4"] = allocated_ipv4_address
return payload, ""
def on_create_claimed(payload, host_id):
get_model().create_vm(
email=payload['email'],
id=payload['id'],
size=payload['size'],
os=payload['os'],
host=host_id,
network_name=payload['network_name'],
public_ipv4=payload['public_ipv4'],
ssh_authorized_keys=list(map(lambda x: x["name"], payload['ssh_authorized_keys'])),
)

View File

@ -36,7 +36,7 @@ class MockHub(VirtualizationInterface):
def list_ids(self) -> list:
return get_model().all_non_deleted_vm_ids()
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
def create(self, email: str, id: str, os: str, size: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
validate_capsul_id(id)
current_app.logger.info(f"mock create: {id} for {email}")
sleep(1)
@ -180,7 +180,7 @@ class CapsulFlaskHub(VirtualizationInterface):
return to_return
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
def create(self, email: str, id: str, os: str, size: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
validate_capsul_id(id)
online_hosts = get_model().get_online_hosts()
#current_app.logger.debug(f"hub_model.create(): ${len(online_hosts)} hosts")
@ -188,6 +188,8 @@ class CapsulFlaskHub(VirtualizationInterface):
type="create",
email=email,
id=id,
os=os,
size=size,
template_image_file_name=template_image_file_name,
vcpus=vcpus,
memory_mb=memory_mb,

40
capsulflask/publicapi.py Normal file
View File

@ -0,0 +1,40 @@
import datetime
from flask import Blueprint
from flask import current_app
from flask import jsonify
from flask import request
from flask import session
from nanoid import generate
from capsulflask.auth import account_required
from capsulflask.db import get_model
bp = Blueprint("publicapi", __name__, url_prefix="/api")
@bp.route("/capsul/create", methods=["POST"])
@account_required
def capsul_create():
email = session["account"]
from .console import _create,get_account_balance, get_payments, get_vms
vm_sizes = get_model().vm_sizes_dict()
operating_systems = get_model().operating_systems_dict()
public_keys_for_account = get_model().list_ssh_public_keys_for_account(session["account"])
account_balance = get_account_balance(get_vms(), get_payments(), datetime.datetime.utcnow())
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(512*1024*1024)
request.json['ssh_authorized_key_count'] = 1
id, errors = _create(
vm_sizes,
operating_systems,
public_keys_for_account,
request.json)
if id is not None:
return jsonify(
id=id,
)
return jsonify(errors=errors)

View File

@ -3,12 +3,12 @@ CREATE TABLE host_network (
public_ipv4_cidr_block TEXT NOT NULL,
network_name TEXT NOT NULL,
host TEXT NOT NULL REFERENCES hosts(id) ON DELETE RESTRICT,
PRIMARY KEY (host, network_name)
CONSTRAINT host_network_pkey PRIMARY KEY (host, network_name)
);
INSERT INTO host_network (host, network_name, public_ipv4_cidr_block) VALUES ('baikal', 'virbr1', '69.61.2.162/27'),
('baikal', 'virbr2', '69.61.2.194/26');
('baikal', 'virbr2', '69.61.2.194/26'),
('baikal', 'virbr3', '69.61.38.193/26');
ALTER TABLE vms RENAME COLUMN last_seen_ipv4 TO public_ipv4;
ALTER TABLE vms RENAME COLUMN last_seen_ipv6 TO public_ipv6;
ALTER TABLE vms ADD COLUMN network_name TEXT;
@ -18,7 +18,8 @@ UPDATE vms SET network_name = 'virbr2' WHERE public_ipv4 >= '69.61.2.192';
ALTER TABLE vms ALTER COLUMN network_name SET NOT NULL;
ALTER TABLE vms ADD FOREIGN KEY (host, network_name) REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
ALTER TABLE vms ADD CONSTRAINT vms_host_network_name_fkey FOREIGN KEY (host, network_name)
REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
UPDATE schemaversion SET version = 16;

View File

@ -0,0 +1,18 @@
ALTER TABLE vms DROP CONSTRAINT vms_host_network_name_fkey;
UPDATE host_network SET network_name = 'virbr1' WHERE virtual_bridge_name = "virbr1";
UPDATE host_network SET network_name = 'virbr2' WHERE virtual_bridge_name = "virbr1";
UPDATE host_network SET network_name = 'virbr3' WHERE virtual_bridge_name = "virbr2";
UPDATE vms SET network_name = 'virbr1' WHERE network_name = "public1";
UPDATE vms SET network_name = 'virbr2' WHERE network_name = "public2";
UPDATE vms SET network_name = 'virbr3' WHERE network_name = "public3";
ALTER TABLE host_network DROP COLUMN virtual_bridge_name;
ALTER TABLE vms ADD CONSTRAINT vms_host_network_name_fkey FOREIGN KEY (host, network_name)
REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
UPDATE schemaversion SET version = 16;

View File

@ -0,0 +1,26 @@
ALTER TABLE vms DROP CONSTRAINT vms_host_network_name_fkey;
ALTER TABLE host_network RENAME COLUMN network_name TO virtual_bridge_name;
ALTER TABLE host_network ADD COLUMN network_name TEXT;
UPDATE host_network SET network_name = 'public1' WHERE virtual_bridge_name = 'virbr1';
UPDATE host_network SET network_name = 'public2' WHERE virtual_bridge_name = 'virbr2';
UPDATE host_network SET network_name = 'public3' WHERE virtual_bridge_name = 'virbr3';
UPDATE vms SET network_name = 'public1' WHERE network_name = 'virbr1';
UPDATE vms SET network_name = 'public2' WHERE network_name = 'virbr2';
UPDATE vms SET network_name = 'public3' WHERE network_name = 'virbr3';
ALTER TABLE host_network ALTER COLUMN network_name SET NOT NULL;
ALTER TABLE host_network DROP CONSTRAINT host_network_pkey;
ALTER TABLE host_network ADD CONSTRAINT host_network_pkey PRIMARY KEY (host, network_name);
ALTER TABLE vms ADD CONSTRAINT vms_host_network_name_fkey FOREIGN KEY (host, network_name)
REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
UPDATE schemaversion SET version = 17;

View File

@ -0,0 +1,6 @@
ALTER TABLE host_network DROP COLUMN public_ipv4_first_usable_ip;
ALTER TABLE host_network DROP COLUMN public_ipv4_last_usable_ip;
UPDATE schemaversion SET version = 17;

View File

@ -0,0 +1,25 @@
ALTER TABLE host_network ADD COLUMN public_ipv4_first_usable_ip TEXT;
ALTER TABLE host_network ADD COLUMN public_ipv4_last_usable_ip TEXT;
-- public1, 69.61.2.162/27
UPDATE host_network
SET public_ipv4_first_usable_ip = '69.61.2.163', public_ipv4_last_usable_ip = '69.61.2.190'
WHERE network_name = 'public1';
-- public2, 69.61.2.194/26
UPDATE host_network
SET public_ipv4_first_usable_ip = '69.61.2.195', public_ipv4_last_usable_ip = '69.61.2.254'
WHERE network_name = 'public2';
-- public3, 69.61.38.193/26
UPDATE host_network
SET public_ipv4_first_usable_ip = '69.61.38.194', public_ipv4_last_usable_ip = '69.61.38.254'
WHERE network_name = 'public3';
ALTER TABLE host_network ALTER COLUMN public_ipv4_first_usable_ip SET NOT NULL;
ALTER TABLE host_network ALTER COLUMN public_ipv4_last_usable_ip SET NOT NULL;
UPDATE schemaversion SET version = 18;

View File

@ -0,0 +1,2 @@
DROP TABLE api_keys;
UPDATE schemaversion SET version = 18;

View File

@ -0,0 +1,9 @@
CREATE TABLE api_tokens (
id SERIAL PRIMARY KEY,
email TEXT REFERENCES accounts(email) ON DELETE RESTRICT,
name TEXT NOT NULL,
created TIMESTAMP NOT NULL DEFAULT NOW(),
token TEXT NOT NULL
);
UPDATE schemaversion SET version = 19;

View File

@ -19,9 +19,9 @@ if [ "$ram_bytes_remainder" -le $((20 * 1024 * 1024 * 1024)) ]; then
exit 1
fi
ipv4_limit=28
used_ips=$(grep ip-add "/var/lib/libvirt/dnsmasq/virbr1.status" | cut -d '"' -f 4)
reserved_ips=$(cat "/var/lib/libvirt/dnsmasq/public1.hostsfile" | cut -d ',' -f 2)
ipv4_limit=61
used_ips=$(grep ip-add "/var/lib/libvirt/dnsmasq/virbr3.status" | cut -d '"' -f 4)
reserved_ips=$(cat "/var/lib/libvirt/dnsmasq/public3.hostsfile" | cut -d ',' -f 2)
total_addresses_used=$(printf "$used_ips\n$reserved_ips" | sort | uniq | wc -l)
ipv4_count=$(printf "$total_addresses_used")

View File

@ -9,6 +9,8 @@ template_file="/tank/img/$2"
vcpus="$3"
memory="$4"
pubkeys="$5"
network_name="$6"
public_ipv4="$7"
root_volume_size="25G"
if echo "$vmname" | grep -vqE '^capsul-[a-z0-9]{10}$'; then
@ -38,6 +40,16 @@ echo "$pubkeys" | while IFS= read -r line; do
fi
done
if echo "$network_name" | grep -vqE "^[a-zA-Z0-9_-]+"; then
echo "network_name \"$network_name\" must match ^[a-zA-Z0-9_-]+"
exit 1
fi
if echo "$public_ipv4" | grep -vqE "^[0-9.]+$"; then
echo "public_ipv4 \"$public_ipv4\" must match ^[0-9.]+$"
exit 1
fi
disk="/tank/vm/$vmname.qcow2"
cdrom="/tank/vm/$vmname.iso"
xml="/tank/vm/$vmname.xml"
@ -66,7 +78,7 @@ virt-install \
--os-variant generic \
--virt-type kvm \
--graphics vnc,listen=127.0.0.1 \
--network network=public1,filterref=clean-traffic,model=virtio \
--network network=$network_name,filterref=clean-traffic,model=virtio \
--import \
--print-xml > "$xml"

View File

@ -104,7 +104,7 @@ def handle_create(operation_id, request_body):
current_app.logger.error(f"/hosts/operation returned 400: operation_id is required for create ")
return abort(400, f"bad request; operation_id is required. try POST /spoke/operation/<id>")
parameters = ["email", "id", "template_image_file_name", "vcpus", "memory_mb", "ssh_authorized_keys"]
parameters = ["email", "id", "os", "size", "template_image_file_name", "vcpus", "memory_mb", "ssh_authorized_keys"]
error_message = ""
for parameter in parameters:
if parameter not in request_body:
@ -115,14 +115,28 @@ def handle_create(operation_id, request_body):
return abort(400, f"bad request; {error_message}")
# only one host should create the vm, so we first race to assign this create operation to ourselves.
# only one host will win this race
# only one host will win this race.
authorization_header = f"Bearer {current_app.config['SPOKE_HOST_TOKEN']}"
url = f"{current_app.config['HUB_URL']}/hub/claim-operation/{operation_id}/{current_app.config['SPOKE_HOST_ID']}"
result = current_app.config['HTTP_CLIENT'].do_http_sync(url, body=None, authorization_header=authorization_header)
assignment_status = ""
if result.status_code == 200:
try:
assignment_info = json.loads(result.body)
if not isinstance(assignment_info, dict):
return abort(503, f"hub at '{url}' returned 200, but did not return assignment_info json object")
if 'network_name' not in assignment_info:
return abort(503, f"hub at '{url}' returned 200, but the returned assignment_info object did not include network_name")
if 'public_ipv4' not in assignment_info:
return abort(503, f"hub at '{url}' returned 200, but the returned assignment_info object did not include public_ipv4")
assignment_status = "assigned"
request_body['network_name'] = assignment_info['network_name']
request_body['public_ipv4'] = assignment_info['public_ipv4']
except:
return abort(503, f"hub at '{url}' returned 200, but did not return valid json")
elif result.status_code == 409:
assignment_status = "assigned_to_other_host"
else:
@ -137,7 +151,9 @@ def handle_create(operation_id, request_body):
template_image_file_name=request_body['template_image_file_name'],
vcpus=request_body['vcpus'],
memory_mb=request_body['memory_mb'],
ssh_authorized_keys=request_body['ssh_authorized_keys'],
ssh_authorized_keys=list(map(lambda x: x['content'], request_body['ssh_authorized_keys'])),
network_name=request_body['network_name'],
public_ipv4=request_body['public_ipv4'],
)
except:
error_message = my_exec_info_message(sys.exc_info())
@ -147,7 +163,11 @@ def handle_create(operation_id, request_body):
params= f"{params} vcpus='{request_body['vcpus'] if 'vcpus' in request_body else 'KeyError'}', "
params= f"{params} memory_mb='{request_body['memory_mb'] if 'memory_mb' in request_body else 'KeyError'}', "
params= f"{params} ssh_authorized_keys='{request_body['ssh_authorized_keys'] if 'ssh_authorized_keys' in request_body else 'KeyError'}', "
params= f"{params} network_name='{request_body['network_name'] if 'network_name' in request_body else 'KeyError'}', "
params= f"{params} public_ipv4='{request_body['public_ipv4'] if 'public_ipv4' in request_body else 'KeyError'}', "
current_app.logger.error(f"spoke_model.create({params}) failed: {error_message}")
return jsonify(dict(assignment_status=assignment_status, error_message=error_message))
return jsonify(dict(assignment_status=assignment_status))

View File

@ -14,28 +14,37 @@ from capsulflask.shared import VirtualizationInterface, VirtualMachine, validate
class MockSpoke(VirtualizationInterface):
def __init__(self):
self.capsuls = dict()
def capacity_avaliable(self, additional_ram_bytes):
return True
def get(self, id, get_ssh_host_keys):
validate_capsul_id(id)
ipv4 = "1.1.1.1"
if id in self.capsuls:
ipv4 = self.capsuls[id]['public_ipv4']
if get_ssh_host_keys:
ssh_host_keys = json.loads("""[
{"key_type":"ED25519", "content":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN8cna0zeKSKl/r8whdn/KmDWhdzuWRVV0GaKIM+eshh", "sha256":"V4X2apAF6btGAfS45gmpldknoDX0ipJ5c6DLfZR2ttQ"},
{"key_type":"RSA", "content":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvotgzgEP65JUQ8S8OoNKy1uEEPEAcFetSp7QpONe6hj4wPgyFNgVtdoWdNcU19dX3hpdse0G8OlaMUTnNVuRlbIZXuifXQ2jTtCFUA2mmJ5bF+XjGm3TXKMNGh9PN+wEPUeWd14vZL+QPUMev5LmA8cawPiU5+vVMLid93HRBj118aCJFQxLgrdP48VPfKHFRfCR6TIjg1ii3dH4acdJAvlmJ3GFB6ICT42EmBqskz2MPe0rIFxH8YohCBbAbrbWYcptHt4e48h4UdpZdYOhEdv89GrT8BF2C5cbQ5i9qVpI57bXKrj8hPZU5of48UHLSpXG8mbH0YDiOQOfKX/Mt", "sha256":"ghee6KzRnBJhND2kEUZSaouk7CD6o6z2aAc8GPkV+GQ"},
{"key_type":"ECDSA", "content":"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLLgOoATz9R4aS2kk7vWoxX+lshK63t9+5BIHdzZeFE1o+shlcf0Wji8cN/L1+m3bi0uSETZDOAWMP3rHLJj9Hk=", "sha256":"aCYG1aD8cv/TjzJL0bi9jdabMGksdkfa7R8dCGm1yYs"}
]""")
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4="1.1.1.1", state="running", ssh_host_keys=ssh_host_keys)
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4=ipv4, state="running", ssh_host_keys=ssh_host_keys)
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4="1.1.1.1", state="running")
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4=ipv4, state="running")
def list_ids(self) -> list:
return get_model().all_non_deleted_vm_ids()
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list, network_name: str, public_ipv4: str):
validate_capsul_id(id)
current_app.logger.info(f"mock create: {id} for {email}")
self.capsuls[id] = dict(email=email, id=id, network_name=network_name, public_ipv4=public_ipv4)
sleep(1)
def destroy(self, email: str, id: str):
@ -129,7 +138,7 @@ class ShellScriptSpoke(VirtualizationInterface):
self.validate_completed_process(completedProcess)
return list(map(lambda x: x.decode("utf-8"), completedProcess.stdout.splitlines() ))
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list, network_name: str, public_ipv4: str):
validate_capsul_id(id)
if not re.match(r"^[a-zA-Z0-9/_.-]+$", template_image_file_name):
@ -139,12 +148,18 @@ class ShellScriptSpoke(VirtualizationInterface):
if not re.match(r"^(ssh|ecdsa)-[0-9A-Za-z+/_=@:. -]+$", ssh_authorized_key):
raise ValueError(f"ssh_authorized_key \"{ssh_authorized_key}\" must match \"^(ssh|ecdsa)-[0-9A-Za-z+/_=@:. -]+$\"")
if vcpus < 1 or vcpus > 8:
if isinstance(vcpus, int) and (vcpus < 1 or vcpus > 8):
raise ValueError(f"vcpus \"{vcpus}\" must match 1 <= vcpus <= 8")
if memory_mb < 512 or memory_mb > 16384:
if isinstance(memory_mb, int) and (memory_mb < 512 or memory_mb > 16384):
raise ValueError(f"memory_mb \"{memory_mb}\" must match 512 <= memory_mb <= 16384")
if not re.match(r"^[a-zA-Z0-9_-]+$", network_name):
raise ValueError(f"network_name \"{network_name}\" must match \"^[a-zA-Z0-9_-]+\"")
if not re.match(r"^[0-9.]+$", public_ipv4):
raise ValueError(f"public_ipv4 \"{public_ipv4}\" must match \"^[0-9.]+$\"")
ssh_keys_string = "\n".join(ssh_authorized_keys)
completedProcess = run([
@ -153,7 +168,9 @@ class ShellScriptSpoke(VirtualizationInterface):
template_image_file_name,
str(vcpus),
str(memory_mb),
ssh_keys_string
ssh_keys_string,
network_name,
public_ipv4
], capture_output=True)
self.validate_completed_process(completedProcess, email)
@ -166,6 +183,8 @@ class ShellScriptSpoke(VirtualizationInterface):
vcpus={str(vcpus)}
memory={str(memory_mb)}
ssh_authorized_keys={ssh_keys_string}
network_name={network_name}
public_ipv4={public_ipv4}
"""
if not status == "success":

View File

@ -31,7 +31,7 @@
{% if session["account"] %}
<a href="/console">Capsuls</a>
<a href="/console/ssh">SSH Public Keys</a>
<a href="/console/keys">SSH &amp; API Keys</a>
<a href="/console/account-balance">Account Balance</a>
{% endif %}

View File

@ -101,7 +101,7 @@
</div>
<div class="row justify-start">
<label class="align" for="ssh_authorized_keys">SSH Authorized Keys</label>
<a id="ssh_authorized_keys" href="/console/ssh">{{ vm['ssh_authorized_keys'] }}</a>
<a id="ssh_authorized_keys" href="/console/keys">{{ vm['ssh_authorized_keys'] }}</a>
</div>
</div>

View File

@ -31,7 +31,7 @@
<p>(At least one month of funding is required)</p>
{% elif no_ssh_public_keys %}
<p>You don't have any ssh public keys yet.</p>
<p>You must <a href="/console/ssh">upload one</a> before you can create a Capsul.</p>
<p>You must <a href="/console/keys">upload one</a> before you can create a Capsul.</p>
{% elif not capacity_avaliable %}
<p>Host(s) at capacity. No capsuls can be created at this time. sorry. </p>
{% else %}

View File

@ -1,17 +1,18 @@
{% extends 'base.html' %}
{% block title %}SSH Public Keys{% endblock %}
{% block title %}SSH &amp; API Keys{% endblock %}
{% block content %}
<div class="row third-margin">
<h1>SSH PUBLIC KEYS</h1>
</div>
<div class="row third-margin"><div>
{% if has_ssh_public_keys %} <hr/> {% endif %}
{% if ssh_public_keys|length > 0 %} <hr/> {% endif %}
{% for ssh_public_key in ssh_public_keys %}
<form method="post">
<input type="hidden" name="method" value="DELETE"></input>
<input type="hidden" name="action" value="delete_ssh_key"></input>
<input type="hidden" name="name" value="{{ ssh_public_key['name'] }}"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="row">
@ -22,13 +23,14 @@
</form>
{% endfor %}
{% if has_ssh_public_keys %} <hr/> {% endif %}
{% if ssh_public_keys|length > 0 %} <hr/> {% endif %}
<div class="third-margin">
<h1>UPLOAD A NEW SSH PUBLIC KEY</h1>
</div>
<form method="post">
<input type="hidden" name="method" value="POST"></input>
<input type="hidden" name="action" value="upload_ssh_key"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="row justify-start">
<label class="align" for="content">File Contents</label>
@ -54,6 +56,51 @@
</div>
</form>
</div></div>
<hr/>
<div class="row third-margin">
<h1>API KEYS</h1>
</div>
<div class="row third-margin"><div>
{% if generated_api_token %}
<hr/>
Generated key:
<span class="code">{{ generated_api_token }}</span>
{% endif %}
{% if api_tokens|length >0 %} <hr/>{% endif %}
{% for api_token in api_tokens %}
<form method="post">
<input type="hidden" name="method" value="DELETE"></input>
<input type="hidden" name="action" value="delete_api_token"></input>
<input type="hidden" name="id" value="{{ api_token['id'] }}"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="row">
<span class="code">{{ api_token['name'] }}</span>
created {{ api_token['created'].strftime("%b %d %Y") }}
<input type="submit" value="Delete">
</div>
</form>
{% endfor %}
{% if api_tokens|length >0 %} <hr/>{% endif %}
<div class="third-margin">
<h1>GENERATE A NEW API KEY</h1>
</div>
<form method="post">
<input type="hidden" name="method" value="POST"></input>
<input type="hidden" name="action" value="generate_api_token"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="smalltext">
<p>Generate a new API key, to integrate with other systems.</p>
</div>
<div class="row justify-start">
<label class="align" for="name">Key Name</label>
<input type="text" id="name" name="name"></input> (defaults to creation time)
</div>
<div class="row justify-end">
<input type="submit" value="Generate">
</div>
</form>
</div></div>
{% endblock %}
{% block pagesource %}/templates/ssh-public-keys.html{% endblock %}

View File

View File

@ -0,0 +1,23 @@
from flask import url_for, session
from capsulflask.db import get_model
from capsulflask.tests_base import BaseTestCase
class LoginTests(BaseTestCase):
render_templates = False
def test_login_request(self):
with self.client as client:
response = client.get(url_for("auth.login"))
self.assert_200(response)
# FIXME test generated login link
def test_login_magiclink(self):
token, ignoreCaseMatches = get_model().login('test@example.com')
with self.client as client:
response = client.get(url_for("auth.magiclink", token=token))
self.assertRedirects(response, url_for("console.index"))
self.assertEqual(session['account'], 'test@example.com')

View File

@ -0,0 +1,170 @@
from unittest.mock import patch
from flask import url_for
from capsulflask.hub_model import MockHub
from capsulflask.db import get_model
from capsulflask.tests_base import BaseTestCase
class ConsoleTests(BaseTestCase):
capsul_data = {
"size": "f1-xs",
"os": "debian10",
"ssh_authorized_key_count": 1,
"ssh_key_0": "key"
}
ssh_key_data = {
"name": "key2",
"action": "upload_ssh_key",
"content": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDntq1t8Ddsa2q4p+PM7W4CLYYmxakokRRVLlf7AQlsTJFPsgBe9u0zuoOaKDMkBr0dlnuLm4Eub1Mj+BrdqAokto0YDiAnxUKRuYQKuHySKK8bLkisi2k47jGBDikx/jihgiuFTawo1mYsJJepC7PPwZGsoCImJEgq1L+ug0p3Zrj3QkUx4h25MpCSs2yvfgWjDyN8hEC76O42P+4ETezYrzrd1Kj26hdzHRnrxygvIUOtfau+5ydlaz8xQBEPrEY6/+pKDuwtXg1pBL7GmoUxBXVfHQSgq5s9jIJH+G0CR0ZoHMB25Ln4X/bsCQbLOu21+IGYKSDVM5TIMLtkKUkERQMVWvnpOp1LZKir4dC0m7SW74wpA8+2b1IsURIr9ARYGJpCEv1Q1Wz/X3yTf6Mfey7992MjUc9HcgjgU01/+kYomoXHprzolk+22Gjfgo3a4dRIoTY82GO8kkUKiaWHvDkkVURCY5dpteLA05sk3Z9aRMYsNXPLeOOPfzTlDA0="
}
def test_index(self):
self._login('test@example.com')
with self.client as client:
response = client.get(url_for("console.index"))
self.assert_200(response)
def test_create_loads(self):
self._login('test@example.com')
with self.client as client:
response = client.get(url_for("console.create"))
self.assert_200(response)
def test_create_fails_capacity(self):
with self.client as client:
client.get(url_for("console.create"))
csrf_token = self.get_context_variable('csrf_token')
data = self.capsul_data
data['csrf-token'] = csrf_token
# Override MockHub.capacity_avaliable to always return False
with patch.object(MockHub, 'capacity_avaliable', return_value=False) as mock_method:
self.app.config["HUB_MODEL"] = MockHub()
client.post(url_for("console.create"), data=data)
capacity_message = \
'\n host(s) at capacity. no capsuls can be created at this time. sorry. \n '
self.assert_message_flashed(capacity_message, category='message')
self.assertEqual(
len(get_model().list_vms_for_account('test@example.com')),
0
)
mock_method.assert_called_with(512 * 1024 * 1024)
def test_create_fails_invalid(self):
with self.client as client:
client.get(url_for("console.create"))
csrf_token = self.get_context_variable('csrf_token')
data = self.capsul_data
data['csrf-token'] = csrf_token
data['os'] = ''
client.post(url_for("console.create"), data=data)
self.assert_message_flashed(
'OS is required',
category='message'
)
self.assertEqual(
len(get_model().list_vms_for_account('test@example.com')),
0
)
def test_create_succeeds(self):
with self.client as client:
client.get(url_for("console.create"))
csrf_token = self.get_context_variable('csrf_token')
data = self.capsul_data
data['csrf-token'] = csrf_token
response = client.post(url_for("console.create"), data=data)
# FIXME: mock create doesn't create, see #83
# vms = get_model().list_vms_for_account('test@example.com')
# self.assertEqual(
# len(vms),
# 1
# )
#
# vm_id = vms[0].id
#
# self.assertRedirects(
# response,
# url_for("console.index") + f'?{vm_id}'
# )
def test_keys_loads(self):
self._login('test@example.com')
with self.client as client:
response = client.get(url_for("console.ssh_api_keys"))
self.assert_200(response)
keys = self.get_context_variable('ssh_public_keys')
self.assertEqual(keys[0]['name'], 'key')
def test_keys_add_ssh_fails_invalid(self):
self._login('test@example.com')
with self.client as client:
client.get(url_for("console.ssh_api_keys"))
csrf_token = self.get_context_variable('csrf_token')
data = self.ssh_key_data
data['csrf-token'] = csrf_token
data_invalid_content = data
data_invalid_content['content'] = 'foo'
client.post(
url_for("console.ssh_api_keys"),
data=data_invalid_content
)
self.assert_message_flashed(
'Content must match "^(ssh|ecdsa)-[0-9A-Za-z+/_=@:. -]+$"',
category='message'
)
data_missing_content = data
data_missing_content['content'] = ''
client.post(url_for("console.ssh_api_keys"), data=data_missing_content)
self.assert_message_flashed(
'Content is required', category='message'
)
def test_keys_add_ssh_fails_duplicate(self):
self._login('test@example.com')
with self.client as client:
client.get(url_for("console.ssh_api_keys"))
csrf_token = self.get_context_variable('csrf_token')
data = self.ssh_key_data
data['csrf-token'] = csrf_token
data['name'] = 'key'
client.post(url_for("console.ssh_api_keys"), data=data)
self.assert_message_flashed(
'A key with that name already exists',
category='message'
)
data = self.ssh_key_data
data['csrf-token'] = csrf_token
data['name'] = 'key'
client.post(url_for("console.ssh_api_keys"), data=data)
self.assert_message_flashed(
'A key with that name already exists',
category='message'
)
def setUp(self):
self._login('test@example.com')
get_model().create_ssh_public_key('test@example.com', 'key', 'foo')
def tearDown(self):
get_model().delete_ssh_public_key('test@example.com', 'key')

View File

@ -0,0 +1,14 @@
from capsulflask.tests_base import BaseTestCase
class LandingTests(BaseTestCase):
#: Do not render templates, we're only testing logic here.
render_templates = False
def test_landing(self):
pages = ['/', 'pricing', 'faq', 'about-ssh', 'changelog', 'support']
with self.client as client:
for page in pages:
response = client.get(page)
self.assert_200(response)

View File

@ -0,0 +1,45 @@
from base64 import b64encode
import requests
from flask import url_for
from capsulflask.db import get_model
from capsulflask.tests_base import BaseLiveServerTestCase
class PublicAPITests(BaseLiveServerTestCase):
def test_server_is_up_and_running(self):
response = requests.get(self.get_server_url())
self.assertEqual(response.status_code, 200)
def test_capsul_create_succeeds(self):
response = requests.post(
self.get_server_url() +
url_for('publicapi.capsul_create'),
headers={'Authorization': self.token},
json={
'size': 'f1-xs',
'os': 'openbsd68',
'ssh_key_0': 'key'
}
)
self.assertEqual(response.status_code, 200)
# FIXME: mock create doesn't create, see #83
# vms = get_model().list_vms_for_account('test@example.com')
#
# self.assertEqual(
# len(vms),
# 1
# )
def setUp(self):
get_model().create_ssh_public_key('test@example.com', 'key', 'foo')
self.token = b64encode(
get_model().generate_api_token('test@example.com', 'apikey').encode('utf-8')
).decode('utf-8')
def tearDown(self):
get_model().delete_ssh_public_key('test@example.com', 'key')
get_model().delete_api_token('test@example.com', 1)

35
capsulflask/tests_base.py Normal file
View File

@ -0,0 +1,35 @@
import os
from nanoid import generate
from flask_testing import TestCase, LiveServerTestCase
from capsulflask import create_app
from capsulflask.db import get_model
class BaseSharedTestCase(object):
def create_app(self):
# Use default connection paramaters
os.environ['POSTGRES_CONNECTION_PARAMETERS'] = "host=localhost port=5432 user=postgres password=dev dbname=capsulflask_test"
os.environ['TESTING'] = '1'
os.environ['SPOKE_MODEL'] = 'mock'
os.environ['HUB_MODEL'] = 'mock'
return create_app()
def setUp(self):
pass
def tearDown(self):
pass
class BaseTestCase(BaseSharedTestCase, TestCase):
def _login(self, user_email):
get_model().login(user_email)
with self.client.session_transaction() as session:
session['account'] = user_email
session['csrf-token'] = generate()
class BaseLiveServerTestCase(BaseSharedTestCase, LiveServerTestCase):
pass