4 Commits

Author SHA1 Message Date
3wc
0d9d51c780 Merge branch 'docker' into docker-api
All checks were successful
continuous-integration/drone/push Build is passing
2021-07-11 00:57:58 +02:00
3wc
647a19bfa7 Multi-stage build oh my! 2021-07-11 00:57:40 +02:00
3wc
541bf7d2d7 Initial attempt at Docker 2021-07-11 00:57:40 +02:00
3wc
3b978b781f Add basic "create" API..
.. using server-side API tokens
2021-07-11 00:57:27 +02:00
26 changed files with 344 additions and 521 deletions

View File

@ -1,17 +1,8 @@
FROM python:3.8-alpine as build
RUN apk add --no-cache \
build-base \
gcc \
gettext \
git \
jpeg-dev \
libffi-dev \
libjpeg \
musl-dev \
postgresql-dev \
python3-dev \
zlib-dev
RUN apk add gettext git gcc python3-dev musl-dev \
libffi-dev zlib-dev jpeg-dev libjpeg postgresql-dev build-base \
--virtual .build-dependencies
RUN mkdir -p /app/{code,venv}
WORKDIR /app/code
@ -26,14 +17,7 @@ RUN pipenv install --deploy --verbose
FROM python:3.8-alpine
RUN apk add --no-cache \
cloud-utils \
libjpeg \
libpq \
libstdc++ \
libvirt-client \
openssh-client \
virt-install
RUN apk add --no-cache libpq libstdc++ libjpeg
COPY . /app/code/
WORKDIR /app/code

View File

@ -26,24 +26,8 @@ class StdoutMockFlaskMail:
def send(self, message: Message):
current_app.logger.info(f"Email would have been sent if configured:\n\nto: {','.join(message.recipients)}\nsubject: {message.subject}\nbody:\n\n{message.body}\n\n")
load_dotenv(find_dotenv())
for var_name in [
"SPOKE_HOST_TOKEN", "HUB_TOKEN", "STRIPE_SECRET_KEY",
"BTCPAY_PRIVATE_KEY", "MAIL_PASSWORD"
]:
var = os.environ.get(f"{var_name}_FILE")
if not var:
continue
if not os.path.isfile(var):
continue
with open(var) as secret_file:
os.environ[var_name] = secret_file.read().rstrip('\n')
del os.environ[f"{var_name}_FILE"]
app = Flask(__name__)
app.config.from_mapping(
@ -72,7 +56,7 @@ app.config.from_mapping(
MAIL_SERVER=os.environ.get("MAIL_SERVER", default=""),
MAIL_PORT=os.environ.get("MAIL_PORT", default="465"),
MAIL_USE_TLS=os.environ.get("MAIL_USE_TLS", default="False").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USE_TLS=os.environ.get("MAIL_USE_TLS", default="True").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USE_SSL=os.environ.get("MAIL_USE_SSL", default="True").lower() in ['true', '1', 't', 'y', 'yes'],
MAIL_USERNAME=os.environ.get("MAIL_USERNAME", default=""),
MAIL_PASSWORD=os.environ.get("MAIL_PASSWORD", default=""),
@ -169,7 +153,6 @@ is_running_server = ('flask run' in command_line) or ('gunicorn' in command_line
app.logger.info(f"is_running_server: {is_running_server}")
if app.config['HUB_MODE_ENABLED']:
if app.config['HUB_MODEL'] == "capsul-flask":
app.config['HUB_MODEL'] = hub_model.CapsulFlaskHub()
@ -191,7 +174,9 @@ if app.config['HUB_MODE_ENABLED']:
from capsulflask import db
db.init_app(app, is_running_server)
from capsulflask import auth, landing, console, payment, metrics, cli, hub_api, admin
from capsulflask import (
auth, landing, console, payment, metrics, cli, hub_api, publicapi, admin
)
app.register_blueprint(landing.bp)
app.register_blueprint(auth.bp)
@ -201,13 +186,13 @@ if app.config['HUB_MODE_ENABLED']:
app.register_blueprint(cli.bp)
app.register_blueprint(hub_api.bp)
app.register_blueprint(admin.bp)
app.register_blueprint(publicapi.bp)
app.add_url_rule("/", endpoint="index")
if app.config['SPOKE_MODE_ENABLED']:
if app.config['SPOKE_MODEL'] == "shell-scripts":
app.config['SPOKE_MODEL'] = spoke_model.ShellScriptSpoke()
else:
@ -235,7 +220,6 @@ def override_url_for():
return dict(url_for=url_for_with_cache_bust)
def url_for_with_cache_bust(endpoint, **values):
"""
Add a query parameter based on the hash of the file, this acts as a cache bust
@ -260,7 +244,3 @@ def url_for_with_cache_bust(endpoint, **values):
values['q'] = current_app.config['STATIC_FILE_HASH_CACHE'][filename]
return url_for(endpoint, **values)

View File

@ -17,9 +17,9 @@ bp = Blueprint("admin", __name__, url_prefix="/admin")
@bp.route("/")
@admin_account_required
def index():
hosts = get_model().list_hosts_with_networks(None)
vms_by_host_and_network = get_model().non_deleted_vms_by_host_and_network(None)
network_display_width_px = float(270)
hosts = get_model().list_hosts_with_networks()
vms_by_host_and_network = get_model().all_non_deleted_vms_by_host_and_network()
network_display_width_px = float(500);
#operations = get_model().list_all_operations()
display_hosts = []
@ -35,18 +35,26 @@ def index():
display_host = dict(name=host_id, networks=value['networks'])
for network in display_host['networks']:
ipv4_network = ipaddress.ip_network(network["public_ipv4_cidr_block"], False)
network_start_int = -1
network_end_int = -1
i = 0
for ipv4_address in ipv4_network:
i += 1
if i > 2:
if network_start_int == -1:
network_start_int = int(ipv4_address)
network_start_int = int(ipaddress.ip_address(network["public_ipv4_first_usable_ip"]))
network_end_int = int(ipaddress.ip_address(network["public_ipv4_last_usable_ip"]))
network_end_int = int(ipv4_address)
network['allocations'] = []
network_addresses_width = float((network_end_int-network_start_int)+1)
network_addresses_width = float((network_end_int-network_start_int))
if host_id in vms_by_host_and_network:
if network['network_name'] in vms_by_host_and_network[host_id]:
for vm in vms_by_host_and_network[host_id][network['network_name']]:
ip_address_int = int(ipaddress.ip_address(vm['public_ipv4']))
if network_start_int <= ip_address_int and ip_address_int <= network_end_int:
if network_start_int < ip_address_int and ip_address_int < network_end_int:
allocation = f"{host_id}_{network['network_name']}_{len(network['allocations'])}"
inline_styles.append(
f"""

View File

@ -1,3 +1,4 @@
from base64 import b64decode
import functools
import re
@ -24,6 +25,15 @@ def account_required(view):
@functools.wraps(view)
def wrapped_view(**kwargs):
api_token = request.headers.get('authorization', None)
if api_token is not None:
email = get_model().authenticate_token(b64decode(api_token).decode('utf-8'))
if email is not None:
session.clear()
session["account"] = email
session["csrf-token"] = generate()
if session.get("account") is None or session.get("csrf-token") is None :
return redirect(url_for("auth.login"))
@ -56,7 +66,7 @@ def login():
if not email:
errors.append("email is required")
elif len(email.strip()) < 6 or email.count('@') != 1 or email.count('.') == 0:
errors.append("enter a valid email address")
errors.append("enter a valid email address")
if len(errors) == 0:
result = get_model().login(email)

View File

@ -1,7 +1,9 @@
from base64 import b64encode
from datetime import datetime, timedelta
import json
import re
import sys
import json
from datetime import datetime, timedelta
from flask import Blueprint
from flask import flash
from flask import current_app
@ -98,7 +100,6 @@ def index():
@bp.route("/<string:id>", methods=("GET", "POST"))
@account_required
def detail(id):
duration=request.args.get('duration')
if not duration:
duration = "5m"
@ -188,6 +189,72 @@ def detail(id):
duration=duration
)
def _create(vm_sizes, operating_systems, public_keys_for_account, server_data):
errors = list()
size = server_data.get("size")
os = server_data.get("os")
posted_keys_count = int(server_data.get("ssh_authorized_key_count"))
if not size:
errors.append("Size is required")
elif size not in vm_sizes:
errors.append(f"Invalid size {size}")
if not os:
errors.append("OS is required")
elif os not in operating_systems:
errors.append(f"Invalid os {os}")
posted_keys = list()
if posted_keys_count > 1000:
errors.append("something went wrong with ssh keys")
else:
for i in range(0, posted_keys_count):
if f"ssh_key_{i}" in server_data:
posted_name = server_data.get(f"ssh_key_{i}")
key = None
for x in public_keys_for_account:
if x['name'] == posted_name:
key = x
if key:
posted_keys.append(key)
else:
errors.append(f"SSH Key \"{posted_name}\" doesn't exist")
if len(posted_keys) == 0:
errors.append("At least one SSH Public Key is required")
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(
vm_sizes[size]['memory_mb']*1024*1024
)
if not capacity_avaliable:
errors.append("""
host(s) at capacity. no capsuls can be created at this time. sorry.
""")
if len(errors) == 0:
id = makeCapsulId()
get_model().create_vm(
email=session["account"],
id=id,
size=size,
os=os,
ssh_authorized_keys=list(map(lambda x: x["name"], posted_keys))
)
current_app.config["HUB_MODEL"].create(
email = session["account"],
id=id,
template_image_file_name=operating_systems[os]['template_image_file_name'],
vcpus=vm_sizes[size]['vcpus'],
memory_mb=vm_sizes[size]['memory_mb'],
ssh_authorized_keys=list(map(lambda x: x["content"], posted_keys))
)
return id, errors
return None, errors
@bp.route("/create", methods=("GET", "POST"))
@account_required
@ -197,67 +264,16 @@ def create():
public_keys_for_account = get_model().list_ssh_public_keys_for_account(session["account"])
account_balance = get_account_balance(get_vms(), get_payments(), datetime.utcnow())
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(512*1024*1024)
errors = list()
if request.method == "POST":
if "csrf-token" not in request.form or request.form['csrf-token'] != session['csrf-token']:
return abort(418, f"u want tea")
size = request.form["size"]
os = request.form["os"]
if not size:
errors.append("Size is required")
elif size not in vm_sizes:
errors.append(f"Invalid size {size}")
if not os:
errors.append("OS is required")
elif os not in operating_systems:
errors.append(f"Invalid os {os}")
posted_keys_count = int(request.form["ssh_authorized_key_count"])
posted_keys = list()
if posted_keys_count > 1000:
errors.append("something went wrong with ssh keys")
else:
for i in range(0, posted_keys_count):
if f"ssh_key_{i}" in request.form:
posted_name = request.form[f"ssh_key_{i}"]
key = None
for x in public_keys_for_account:
if x['name'] == posted_name:
key = x
if key:
posted_keys.append(key)
else:
errors.append(f"SSH Key \"{posted_name}\" doesn't exist")
if len(posted_keys) == 0:
errors.append("At least one SSH Public Key is required")
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(vm_sizes[size]['memory_mb']*1024*1024)
if not capacity_avaliable:
errors.append("""
host(s) at capacity. no capsuls can be created at this time. sorry.
""")
id, errors = _create(
vm_sizes,
operating_systems,
public_keys_for_account,
request.form)
if len(errors) == 0:
id = make_capsul_id()
# we can't create the vm record in the DB yet because its IP address needs to be allocated first.
# so it will be created when the allocation happens inside the hub_api.
current_app.config["HUB_MODEL"].create(
email = session["account"],
id=id,
os=os,
size=size,
template_image_file_name=operating_systems[os]['template_image_file_name'],
vcpus=vm_sizes[size]['vcpus'],
memory_mb=vm_sizes[size]['memory_mb'],
ssh_authorized_keys=list(map(lambda x: dict(name=x['name'], content=x['content']), posted_keys))
)
return redirect(f"{url_for('console.index')}?created={id}")
affordable_vm_sizes = dict()
@ -287,23 +303,25 @@ def create():
vm_sizes=affordable_vm_sizes
)
@bp.route("/ssh", methods=("GET", "POST"))
@bp.route("/keys", methods=("GET", "POST"))
@account_required
def ssh_public_keys():
def ssh_api_keys():
errors = list()
token = None
if request.method == "POST":
if "csrf-token" not in request.form or request.form['csrf-token'] != session['csrf-token']:
return abort(418, f"u want tea")
method = request.form["method"]
content = None
if method == "POST":
action = request.form["action"]
if action == 'upload_ssh_key':
content = None
content = request.form["content"].replace("\r", " ").replace("\n", " ").strip()
name = request.form["name"]
if not name or len(name.strip()) < 1:
if method == "POST":
name = request.form["name"]
if not name or len(name.strip()) < 1:
parts = re.split(" +", content)
if len(parts) > 2 and len(parts[2].strip()) > 0:
name = parts[2].strip()
@ -311,10 +329,9 @@ def ssh_public_keys():
name = parts[0].strip()
else:
errors.append("Name is required")
if not re.match(r"^[0-9A-Za-z_@:. -]+$", name):
errors.append(f"Key name '{name}' must match \"^[0-9A-Za-z_@:. -]+$\"")
if not re.match(r"^[0-9A-Za-z_@:. -]+$", name):
errors.append(f"Key name '{name}' must match \"^[0-9A-Za-z_@:. -]+$\"")
if method == "POST":
if not content or len(content.strip()) < 1:
errors.append("Content is required")
else:
@ -327,24 +344,36 @@ def ssh_public_keys():
if len(errors) == 0:
get_model().create_ssh_public_key(session["account"], name, content)
elif method == "DELETE":
elif action == "delete_ssh_key":
get_model().delete_ssh_public_key(session["account"], name)
if len(errors) == 0:
get_model().delete_ssh_public_key(session["account"], name)
elif action == "generate_api_token":
name = request.form["name"]
if name == '':
name = datetime.utcnow().strftime('%y-%m-%d %H:%M:%S')
token = b64encode(
get_model().generate_api_token(session["account"], name).encode('utf-8')
).decode('utf-8')
elif action == "delete_api_token":
get_model().delete_api_token(session["account"], request.form["id"])
for error in errors:
flash(error)
keys_list=list(map(
ssh_keys_list=list(map(
lambda x: dict(name=x['name'], content=f"{x['content'][:20]}...{x['content'][len(x['content'])-20:]}"),
get_model().list_ssh_public_keys_for_account(session["account"])
))
api_tokens_list = get_model().list_api_tokens(session["account"])
return render_template(
"ssh-public-keys.html",
"keys.html",
csrf_token = session["csrf-token"],
ssh_public_keys=keys_list,
has_ssh_public_keys=len(keys_list) > 0
api_tokens=api_tokens_list,
ssh_public_keys=ssh_keys_list,
generated_api_token=token,
)
def get_vms():
@ -368,7 +397,6 @@ def get_vm_months_float(vm, as_of):
return days / average_number_of_days_in_a_month
def get_account_balance(vms, payments, as_of):
vm_cost_dollars = 0.0
for vm in vms:
vm_months = get_vm_months_float(vm, as_of)
@ -381,7 +409,6 @@ def get_account_balance(vms, payments, as_of):
@bp.route("/account-balance")
@account_required
def account_balance():
payment_sessions = get_model().list_payment_sessions_for_account(session['account'])
for payment_session in payment_sessions:
if payment_session['type'] == 'btcpay':

View File

@ -33,7 +33,7 @@ def init_app(app, is_running_server):
result = re.search(r"^\d+_(up|down)", filename)
if not result:
app.logger.error(f"schemaVersion {filename} must match ^\\d+_(up|down). exiting.")
exit(1)
continue
key = result.group()
with open(join(schemaMigrationsPath, filename), 'rb') as file:
schemaMigrations[key] = file.read().decode("utf8")
@ -43,7 +43,7 @@ def init_app(app, is_running_server):
hasSchemaVersionTable = False
actionWasTaken = False
schemaVersion = 0
desiredSchemaVersion = 18
desiredSchemaVersion = 16
cursor = connection.cursor()
@ -128,4 +128,3 @@ def close_db(e=None):
if db_model is not None:
db_model.cursor.close()
current_app.config['PSYCOPG2_CONNECTION_POOL'].putconn(db_model.connection)

View File

@ -1,8 +1,6 @@
import re
# I was never able to get this type hinting to work correctly
# from psycopg2.extensions import connection as Psycopg2Connection, cursor as Psycopg2Cursor
import hashlib
from nanoid import generate
from flask import current_app
from typing import List
@ -17,7 +15,6 @@ class DBModel:
self.cursor = cursor
# ------ LOGIN ---------
@ -44,6 +41,16 @@ class DBModel:
return (token, ignoreCaseMatches)
def authenticate_token(self, token):
m = hashlib.md5()
m.update(token.encode('utf-8'))
hash_token = m.hexdigest()
self.cursor.execute("SELECT email FROM api_tokens WHERE token = %s", (hash_token, ))
result = self.cursor.fetchall()
if len(result) == 1:
return result[0]
return None
def consume_token(self, token):
self.cursor.execute("SELECT email FROM login_tokens WHERE token = %s and created > (NOW() - INTERVAL '20 min')", (token, ))
row = self.cursor.fetchone()
@ -58,18 +65,8 @@ class DBModel:
# ------ VM & ACCOUNT MANAGEMENT ---------
def non_deleted_vms_by_host_and_network(self, host_id):
query = "SELECT id, host, network_name, public_ipv4, public_ipv6 FROM vms WHERE deleted IS NULL"
if host_id is None:
self.cursor.execute(query)
else:
if not re.match(r"^[a-zA-Z0-9_-]+$", host_id):
raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"")
# I kept getting "TypeError: not all arguments converted during string formatting"
# when I was trying to mix python string templating with psycopg2 safe parameter passing.
# so i just did all of it in python and check the user-provided data for safety myself (no sql injection).
self.cursor.execute(f"{query} AND host = '{host_id}'")
def all_non_deleted_vms_by_host_and_network(self):
self.cursor.execute("SELECT id, host, network_name, public_ipv4, public_ipv6 FROM vms WHERE deleted IS NULL")
hosts = dict()
for row in self.cursor.fetchall():
@ -132,6 +129,32 @@ class DBModel:
self.cursor.execute( "DELETE FROM ssh_public_keys where email = %s AND name = %s", (email, name) )
self.connection.commit()
def list_api_tokens(self, email):
self.cursor.execute(
"SELECT id, token, name, created FROM api_tokens WHERE email = %s",
(email, )
)
return list(map(
lambda x: dict(id=x[0], token=x[1], name=x[2], created=x[3]),
self.cursor.fetchall()
))
def generate_api_token(self, email, name):
token = generate()
m = hashlib.md5()
m.update(token.encode('utf-8'))
hash_token = m.hexdigest()
self.cursor.execute(
"INSERT INTO api_tokens (email, name, token) VALUES (%s, %s, %s)",
(email, name, hash_token)
)
self.connection.commit()
return token
def delete_api_token(self, email, id_):
self.cursor.execute( "DELETE FROM api_tokens where email = %s AND id = %s", (email, id_))
self.connection.commit()
def list_vms_for_account(self, email):
self.cursor.execute("""
SELECT vms.id, vms.public_ipv4, vms.public_ipv6, vms.size, vms.os, vms.created, vms.deleted, vm_sizes.dollars_per_month
@ -158,12 +181,12 @@ class DBModel:
)
self.connection.commit()
def create_vm(self, email, id, size, os, host, network_name, public_ipv4, ssh_authorized_keys):
def create_vm(self, email, id, size, os, ssh_authorized_keys):
self.cursor.execute("""
INSERT INTO vms (email, id, size, os, host, network_name, public_ipv4)
VALUES (%s, %s, %s, %s, %s, %s, %s)
INSERT INTO vms (email, id, size, os)
VALUES (%s, %s, %s, %s)
""",
(email, id, size, os, host, network_name, public_ipv4)
(email, id, size, os)
)
for ssh_authorized_key in ssh_authorized_keys:
@ -330,35 +353,18 @@ class DBModel:
# ------ HOSTS ---------
def list_hosts_with_networks(self, host_id: str):
query = """
SELECT hosts.id, hosts.last_health_check, host_network.network_name,
host_network.public_ipv4_cidr_block, host_network.public_ipv4_first_usable_ip, host_network.public_ipv4_last_usable_ip
FROM hosts
def list_hosts_with_networks(self):
self.cursor.execute("""
SELECT hosts.id, hosts.last_health_check, host_network.network_name, host_network.public_ipv4_cidr_block FROM hosts
JOIN host_network ON host_network.host = hosts.id
"""
if host_id is None:
self.cursor.execute(query)
else:
if not re.match(r"^[a-zA-Z0-9_-]+$", host_id):
raise ValueError(f"host_id \"{host_id}\" must match \"^[a-zA-Z0-9_-]+\"")
# I kept getting "TypeError: not all arguments converted during string formatting"
# when I was trying to mix python query string templating with psycopg2 safe parameter passing.
# so i just did all of it in python and check the user-provided data for safety myself (no sql injection).
self.cursor.execute(f"{query} WHERE hosts.id = '{host_id}'")
""")
hosts = dict()
for row in self.cursor.fetchall():
if row[0] not in hosts:
hosts[row[0]] = dict(last_health_check=row[1], networks=[])
hosts[row[0]]["networks"].append(dict(
network_name=row[2],
public_ipv4_cidr_block=row[3],
public_ipv4_first_usable_ip=row[4],
public_ipv4_last_usable_ip=row[5]
))
hosts[row[0]]["networks"].append(dict(network_name=row[2], public_ipv4_cidr_block=row[3]))
return hosts
@ -408,13 +414,6 @@ class DBModel:
self.connection.commit()
return operation_id
def update_operation(self, operation_id: int, payload: str):
self.cursor.execute(
"UPDATE operations SET payload = %s WHERE id = %s",
(payload, operation_id)
)
self.connection.commit()
def update_host_operation(self, host_id: str, operation_id: int, assignment_status: str, result: str):
if assignment_status and not result:
self.cursor.execute(
@ -441,20 +440,9 @@ class DBModel:
else:
return None
def get_payload_json_from_host_operation(self, operation_id: int, host_id: str) -> str:
self.cursor.execute(
"""
SELECT operations.payload FROM operations
JOIN host_operation ON host_operation.operation = operations.id
WHERE host_operation.host = %s AND host_operation.operation = %s
""",
(host_id, operation_id)
)
row = self.cursor.fetchone()
if row:
return row[0]
else:
return None
def host_operation_exists(self, operation_id: int, host_id: str) -> bool:
self.cursor.execute("SELECT operation FROM host_operation WHERE host = %s AND operation = %s",(host_id, operation_id))
return len(self.cursor.fetchall()) != 0
def claim_operation(self, operation_id: int, host_id: str) -> bool:
# have to make a new cursor to set isolation level
@ -479,8 +467,3 @@ class DBModel:
#cursor.close()
return to_return

View File

@ -1,9 +1,7 @@
import json
import ipaddress
from flask import Blueprint
from flask import current_app
from flask import request, make_response
from flask import request
from werkzeug.exceptions import abort
from capsulflask.db import get_model
@ -48,134 +46,15 @@ def heartbeat(host_id):
@bp.route("/claim-operation/<int:operation_id>/<string:host_id>", methods=("POST",))
def claim_operation(operation_id: int, host_id: str):
if authorized_for_host(host_id):
payload_json = get_model().get_payload_json_from_host_operation(operation_id, host_id)
if payload_json is None:
error_message = f"{host_id} can't claim operation {operation_id} because host_operation row not found"
current_app.logger.error(error_message)
return abort(404, error_message)
# right now if there is a can_claim_handler there needs to be a corresponding on_claimed_handler.
# this is sole-ly due to laziness in error handling.
can_claim_handlers = {
"create": can_claim_create,
}
on_claimed_handlers = {
"create": on_create_claimed,
}
error_message = ""
payload = None
payload_is_dict = False
payload_has_type = False
payload_has_valid_type = False
try:
payload = json.loads(payload_json)
payload_is_dict = isinstance(payload, dict)
payload_has_type = payload_is_dict and 'type' in payload
payload_has_valid_type = payload_has_type and payload['type'] in can_claim_handlers
if not payload_is_dict:
error_message = "invalid json: expected an object"
elif not payload_has_type:
error_message = "invalid json: 'type' field is required"
elif not payload_has_valid_type:
error_message = f"invalid json: expected type \"{payload['type']}\" to be one of [{', '.join(can_claim_handlers.keys())}]"
except:
error_message = "could not parse payload as json"
if error_message != "":
error_message = f"{host_id} can't claim operation {operation_id} because {error_message}"
current_app.logger.error(error_message)
return abort(400, error_message)
# we will only return this payload as json if claiming succeeds, so might as well do this now...
payload['assignment_status'] = 'assigned'
# invoke the appropriate can_claim_handler for this operation type
result_tuple = can_claim_handlers[payload['type']](payload, host_id)
modified_payload = result_tuple[0]
error_message = result_tuple[1]
if error_message != "":
error_message = f"{host_id} can't claim operation {operation_id} because {error_message}"
current_app.logger.error(error_message)
return abort(400, error_message)
exists = get_model().host_operation_exists(operation_id, host_id)
if not exists:
return abort(404, "host operation not found")
claimed = get_model().claim_operation(operation_id, host_id)
if claimed:
modified_payload_json = json.dumps(modified_payload)
get_model().update_operation(operation_id, modified_payload_json)
on_claimed_handlers[payload['type']](modified_payload, host_id)
response = make_response(modified_payload_json)
response.headers.set("Content-Type", "application/json")
return response
return "ok"
else:
return abort(409, f"operation was already assigned to another host")
return abort(409, "operation was already assigned to another host")
else:
current_app.logger.warning(f"/hub/claim-operation/{operation_id}/{host_id} returned 401: invalid token")
return abort(401, "invalid host id or token")
def can_claim_create(payload, host_id) -> (str, str):
hosts = get_model().list_hosts_with_networks(host_id)
if host_id not in hosts:
return "", f"the host \"{host_id}\" does not appear to have any networks."
networks = hosts[host_id]['networks']
vms_by_host_and_network = get_model().non_deleted_vms_by_host_and_network(host_id)
vms_by_network = dict()
if host_id in vms_by_host_and_network:
vms_by_network = vms_by_host_and_network[host_id]
allocated_ipv4_address = None
allocated_network_name = None
for network in networks:
vms = []
if network["network_name"] in vms_by_network:
vms = vms_by_network[network["network_name"]]
claimed_ipv4s = dict()
for vm in vms:
claimed_ipv4s[vm['public_ipv4']] = True
ipv4_network = ipaddress.ip_network(network["public_ipv4_cidr_block"], False)
ipv4_first_usable_ip = network["public_ipv4_first_usable_ip"]
ipv4_last_usable_ip = network["public_ipv4_last_usable_ip"]
for ipv4_address in ipv4_network:
within_usable_range = ipv4_first_usable_ip <= str(ipv4_address) and str(ipv4_address) <= ipv4_last_usable_ip
if within_usable_range and str(ipv4_address) not in claimed_ipv4s:
allocated_ipv4_address = str(ipv4_address)
break
if allocated_ipv4_address is not None:
allocated_network_name = network["network_name"]
break
if allocated_network_name is None or allocated_ipv4_address is None:
return "", f"host \"{host_id}\" does not have any avaliable IP addresses on any of its networks."
# payload["network_name"] = allocated_network_name
# hard-code the network name for now until we can fix the phantom dhcp lease issues.
payload["network_name"] = 'public3'
payload["public_ipv4"] = allocated_ipv4_address
return payload, ""
def on_create_claimed(payload, host_id):
get_model().create_vm(
email=payload['email'],
id=payload['id'],
size=payload['size'],
os=payload['os'],
host=host_id,
network_name=payload['network_name'],
public_ipv4=payload['public_ipv4'],
ssh_authorized_keys=list(map(lambda x: x["name"], payload['ssh_authorized_keys'])),
)

View File

@ -36,7 +36,7 @@ class MockHub(VirtualizationInterface):
def list_ids(self) -> list:
return get_model().all_non_deleted_vm_ids()
def create(self, email: str, id: str, os: str, size: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
validate_capsul_id(id)
current_app.logger.info(f"mock create: {id} for {email}")
sleep(1)
@ -180,7 +180,7 @@ class CapsulFlaskHub(VirtualizationInterface):
return to_return
def create(self, email: str, id: str, os: str, size: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
validate_capsul_id(id)
online_hosts = get_model().get_online_hosts()
#current_app.logger.debug(f"hub_model.create(): ${len(online_hosts)} hosts")
@ -188,8 +188,6 @@ class CapsulFlaskHub(VirtualizationInterface):
type="create",
email=email,
id=id,
os=os,
size=size,
template_image_file_name=template_image_file_name,
vcpus=vcpus,
memory_mb=memory_mb,
@ -215,12 +213,11 @@ class CapsulFlaskHub(VirtualizationInterface):
# no need to do anything here since if it cant be parsed then generic_operation will handle it.
pass
if error_message != "":
raise ValueError(f"create capsul operation {operation_id} on {assigned_hosts} failed with {error_message}")
if number_of_assigned != 1:
assigned_hosts_string = ", ".join(assigned_hosts)
raise ValueError(f"expected create capsul operation {operation_id} to be assigned to one host, it was assigned to {number_of_assigned} ({assigned_hosts_string})")
if error_message != "":
raise ValueError(f"create capsul operation {operation_id} on {assigned_hosts_string} failed with {error_message}")
def destroy(self, email: str, id: str):

38
capsulflask/publicapi.py Normal file
View File

@ -0,0 +1,38 @@
import datetime
from flask import Blueprint
from flask import current_app
from flask import jsonify
from flask import request
from flask import session
from nanoid import generate
from capsulflask.auth import account_required
from capsulflask.db import get_model
bp = Blueprint("webapi", __name__, url_prefix="/api")
@bp.route("/capsul/create", methods=["POST"])
@account_required
def capsul_create():
email = session["account"]
from .console import _create,get_account_balance, get_payments, get_vms
vm_sizes = get_model().vm_sizes_dict()
operating_systems = get_model().operating_systems_dict()
public_keys_for_account = get_model().list_ssh_public_keys_for_account(session["account"])
account_balance = get_account_balance(get_vms(), get_payments(), datetime.datetime.utcnow())
capacity_avaliable = current_app.config["HUB_MODEL"].capacity_avaliable(512*1024*1024)
id, errors = _create(
vm_sizes,
operating_systems,
public_keys_for_account,
request.json)
if id is not None:
return jsonify(
id=id,
)
return jsonify(errors=errors)

View File

@ -0,0 +1,2 @@
DROP TABLE api_keys;
UPDATE schemaversion SET version = 15;

View File

@ -0,0 +1,9 @@
CREATE TABLE api_tokens (
id SERIAL PRIMARY KEY,
email TEXT REFERENCES accounts(email) ON DELETE RESTRICT,
name TEXT NOT NULL,
created TIMESTAMP NOT NULL DEFAULT NOW(),
token TEXT NOT NULL
);
UPDATE schemaversion SET version = 16;

View File

@ -3,12 +3,12 @@ CREATE TABLE host_network (
public_ipv4_cidr_block TEXT NOT NULL,
network_name TEXT NOT NULL,
host TEXT NOT NULL REFERENCES hosts(id) ON DELETE RESTRICT,
CONSTRAINT host_network_pkey PRIMARY KEY (host, network_name)
PRIMARY KEY (host, network_name)
);
INSERT INTO host_network (host, network_name, public_ipv4_cidr_block) VALUES ('baikal', 'virbr1', '69.61.2.162/27'),
('baikal', 'virbr2', '69.61.2.194/26'),
('baikal', 'virbr3', '69.61.38.193/26');
('baikal', 'virbr2', '69.61.2.194/26');
ALTER TABLE vms RENAME COLUMN last_seen_ipv4 TO public_ipv4;
ALTER TABLE vms RENAME COLUMN last_seen_ipv6 TO public_ipv6;
ALTER TABLE vms ADD COLUMN network_name TEXT;
@ -18,8 +18,7 @@ UPDATE vms SET network_name = 'virbr2' WHERE public_ipv4 >= '69.61.2.192';
ALTER TABLE vms ALTER COLUMN network_name SET NOT NULL;
ALTER TABLE vms ADD CONSTRAINT vms_host_network_name_fkey FOREIGN KEY (host, network_name)
REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
ALTER TABLE vms ADD FOREIGN KEY (host, network_name) REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
UPDATE schemaversion SET version = 16;

View File

@ -1,18 +0,0 @@
ALTER TABLE vms DROP CONSTRAINT vms_host_network_name_fkey;
UPDATE host_network SET network_name = 'virbr1' WHERE virtual_bridge_name = "virbr1";
UPDATE host_network SET network_name = 'virbr2' WHERE virtual_bridge_name = "virbr1";
UPDATE host_network SET network_name = 'virbr3' WHERE virtual_bridge_name = "virbr2";
UPDATE vms SET network_name = 'virbr1' WHERE network_name = "public1";
UPDATE vms SET network_name = 'virbr2' WHERE network_name = "public2";
UPDATE vms SET network_name = 'virbr3' WHERE network_name = "public3";
ALTER TABLE host_network DROP COLUMN virtual_bridge_name;
ALTER TABLE vms ADD CONSTRAINT vms_host_network_name_fkey FOREIGN KEY (host, network_name)
REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
UPDATE schemaversion SET version = 16;

View File

@ -1,26 +0,0 @@
ALTER TABLE vms DROP CONSTRAINT vms_host_network_name_fkey;
ALTER TABLE host_network RENAME COLUMN network_name TO virtual_bridge_name;
ALTER TABLE host_network ADD COLUMN network_name TEXT;
UPDATE host_network SET network_name = 'public1' WHERE virtual_bridge_name = 'virbr1';
UPDATE host_network SET network_name = 'public2' WHERE virtual_bridge_name = 'virbr2';
UPDATE host_network SET network_name = 'public3' WHERE virtual_bridge_name = 'virbr3';
UPDATE vms SET network_name = 'public1' WHERE network_name = 'virbr1';
UPDATE vms SET network_name = 'public2' WHERE network_name = 'virbr2';
UPDATE vms SET network_name = 'public3' WHERE network_name = 'virbr3';
ALTER TABLE host_network ALTER COLUMN network_name SET NOT NULL;
ALTER TABLE host_network DROP CONSTRAINT host_network_pkey;
ALTER TABLE host_network ADD CONSTRAINT host_network_pkey PRIMARY KEY (host, network_name);
ALTER TABLE vms ADD CONSTRAINT vms_host_network_name_fkey FOREIGN KEY (host, network_name)
REFERENCES host_network(host, network_name) ON DELETE RESTRICT;
UPDATE schemaversion SET version = 17;

View File

@ -1,6 +0,0 @@
ALTER TABLE host_network DROP COLUMN public_ipv4_first_usable_ip;
ALTER TABLE host_network DROP COLUMN public_ipv4_last_usable_ip;
UPDATE schemaversion SET version = 17;

View File

@ -1,25 +0,0 @@
ALTER TABLE host_network ADD COLUMN public_ipv4_first_usable_ip TEXT;
ALTER TABLE host_network ADD COLUMN public_ipv4_last_usable_ip TEXT;
-- public1, 69.61.2.162/27
UPDATE host_network
SET public_ipv4_first_usable_ip = '69.61.2.163', public_ipv4_last_usable_ip = '69.61.2.190'
WHERE network_name = 'public1';
-- public2, 69.61.2.194/26
UPDATE host_network
SET public_ipv4_first_usable_ip = '69.61.2.195', public_ipv4_last_usable_ip = '69.61.2.254'
WHERE network_name = 'public2';
-- public3, 69.61.38.193/26
UPDATE host_network
SET public_ipv4_first_usable_ip = '69.61.38.194', public_ipv4_last_usable_ip = '69.61.38.254'
WHERE network_name = 'public3';
ALTER TABLE host_network ALTER COLUMN public_ipv4_first_usable_ip SET NOT NULL;
ALTER TABLE host_network ALTER COLUMN public_ipv4_last_usable_ip SET NOT NULL;
UPDATE schemaversion SET version = 18;

View File

@ -3,7 +3,7 @@
# check available RAM and IPv4s
ram_bytes_to_allocate="$1"
ram_bytes_available="$(($(grep Available /proc/meminfo | grep -o '[0-9]*') * 1024))"
ram_bytes_available=$(grep -E "^(size|memory_available_bytes)" /proc/spl/kstat/zfs/arcstats | awk '{sum+=$3} END {printf "%.0f", sum}')
ram_bytes_remainder="$((ram_bytes_available - ram_bytes_to_allocate))"
if echo "$ram_bytes_to_allocate" | grep -vqE "^[0-9]+$"; then
@ -11,17 +11,17 @@ if echo "$ram_bytes_to_allocate" | grep -vqE "^[0-9]+$"; then
exit 1
fi
# 0.25GB
if [ "$ram_bytes_remainder" -le $((1 * 1024 * 1024 * 1024 / 4)) ]; then
# 20GB
if [ "$ram_bytes_remainder" -le $((20 * 1024 * 1024 * 1024)) ]; then
echo "VM is requesting more RAM than $(hostname -f) has available."
echo "Bytes requested: $ram_bytes_to_allocate"
echo "Bytes available: $ram_bytes_available"
exit 1
fi
ipv4_limit=61
used_ips=$(grep ip-add "/var/lib/libvirt/dnsmasq/virbr3.status" | cut -d '"' -f 4)
reserved_ips=$(cat "/var/lib/libvirt/dnsmasq/public3.hostsfile" | cut -d ',' -f 2)
ipv4_limit=28
used_ips=$(grep ip-add "/var/lib/libvirt/dnsmasq/virbr1.status" | cut -d '"' -f 4)
reserved_ips=$(cat "/var/lib/libvirt/dnsmasq/public1.hostsfile" | cut -d ',' -f 2)
total_addresses_used=$(printf "$used_ips\n$reserved_ips" | sort | uniq | wc -l)
ipv4_count=$(printf "$total_addresses_used")

View File

@ -6,12 +6,9 @@
vmname="$1"
template_file="/tank/img/$2"
qemu_tank_dir="/tank"
vcpus="$3"
memory="$4"
pubkeys="$5"
network_name="$6"
public_ipv4="$7"
root_volume_size="25G"
if echo "$vmname" | grep -vqE '^capsul-[a-z0-9]{10}$'; then
@ -41,50 +38,40 @@ echo "$pubkeys" | while IFS= read -r line; do
fi
done
if echo "$network_name" | grep -vqE "^[a-zA-Z0-9_-]+"; then
echo "network_name \"$network_name\" must match ^[a-zA-Z0-9_-]+"
exit 1
fi
if echo "$public_ipv4" | grep -vqE "^[0-9.]+$"; then
echo "public_ipv4 \"$public_ipv4\" must match ^[0-9.]+$"
exit 1
fi
disk="$vmname.qcow2"
cdrom="$vmname.iso"
xml="$vmname.xml"
disk="/tank/vm/$vmname.qcow2"
cdrom="/tank/vm/$vmname.iso"
xml="/tank/vm/$vmname.xml"
if [ -f /tank/vm/$vmname.qcow2 ]; then
echo "Randomly generated name matched an existing VM! Odds are like one in a billion. Buy a lotto ticket."
exit 1
fi
cp "$template_file" "/tank/vm/$disk"
cp "$template_file" "$disk"
cp /tank/config/cyberia-cloudinit.yml /tmp/cloudinit.yml
echo "$pubkeys" | while IFS= read -r line; do
echo " - $line" >> /tmp/cloudinit.yml
done
cloud-localds "/tank/vm/$cdrom" /tmp/cloudinit.yml
cloud-localds "$cdrom" /tmp/cloudinit.yml
qemu-img resize "/tank/vm/$disk" "$root_volume_size"
qemu-img resize "$disk" "$root_volume_size"
virt-install \
--memory "$memory" \
--vcpus "$vcpus" \
--name "$vmname" \
--disk "$qemu_tank_dir/vm/$disk",bus=virtio \
--disk "$qemu_tank_dir/vm/$cdrom",device=cdrom \
--disk "$disk",bus=virtio \
--disk "$cdrom",device=cdrom \
--os-type Linux \
--os-variant generic \
--virt-type kvm \
--graphics vnc,listen=127.0.0.1 \
--network network=$network_name,model=virtio \
--network network=public1,filterref=clean-traffic,model=virtio \
--import \
--print-xml > "/tank/vm/$xml"
--print-xml > "$xml"
chmod 0600 "/tank/vm/$xml" "/tank/vm/$disk" "/tank/vm/$cdrom"
virsh define "/tank/vm/$xml"
chmod 0600 "$xml" "$disk" "$cdrom"
virsh define "$xml"
virsh start "$vmname"
echo "success"

View File

@ -104,7 +104,7 @@ def handle_create(operation_id, request_body):
current_app.logger.error(f"/hosts/operation returned 400: operation_id is required for create ")
return abort(400, f"bad request; operation_id is required. try POST /spoke/operation/<id>")
parameters = ["email", "id", "os", "size", "template_image_file_name", "vcpus", "memory_mb", "ssh_authorized_keys"]
parameters = ["email", "id", "template_image_file_name", "vcpus", "memory_mb", "ssh_authorized_keys"]
error_message = ""
for parameter in parameters:
if parameter not in request_body:
@ -115,28 +115,14 @@ def handle_create(operation_id, request_body):
return abort(400, f"bad request; {error_message}")
# only one host should create the vm, so we first race to assign this create operation to ourselves.
# only one host will win this race.
# only one host will win this race
authorization_header = f"Bearer {current_app.config['SPOKE_HOST_TOKEN']}"
url = f"{current_app.config['HUB_URL']}/hub/claim-operation/{operation_id}/{current_app.config['SPOKE_HOST_ID']}"
result = current_app.config['HTTP_CLIENT'].do_http_sync(url, body=None, authorization_header=authorization_header)
assignment_status = ""
if result.status_code == 200:
try:
assignment_info = json.loads(result.body)
if not isinstance(assignment_info, dict):
return abort(503, f"hub at '{url}' returned 200, but did not return assignment_info json object")
if 'network_name' not in assignment_info:
return abort(503, f"hub at '{url}' returned 200, but the returned assignment_info object did not include network_name")
if 'public_ipv4' not in assignment_info:
return abort(503, f"hub at '{url}' returned 200, but the returned assignment_info object did not include public_ipv4")
assignment_status = "assigned"
request_body['network_name'] = assignment_info['network_name']
request_body['public_ipv4'] = assignment_info['public_ipv4']
except:
return abort(503, f"hub at '{url}' returned 200, but did not return valid json")
assignment_status = "assigned"
elif result.status_code == 409:
assignment_status = "assigned_to_other_host"
else:
@ -151,9 +137,7 @@ def handle_create(operation_id, request_body):
template_image_file_name=request_body['template_image_file_name'],
vcpus=request_body['vcpus'],
memory_mb=request_body['memory_mb'],
ssh_authorized_keys=list(map(lambda x: x['content'], request_body['ssh_authorized_keys'])),
network_name=request_body['network_name'],
public_ipv4=request_body['public_ipv4'],
ssh_authorized_keys=request_body['ssh_authorized_keys'],
)
except:
error_message = my_exec_info_message(sys.exc_info())
@ -163,11 +147,7 @@ def handle_create(operation_id, request_body):
params= f"{params} vcpus='{request_body['vcpus'] if 'vcpus' in request_body else 'KeyError'}', "
params= f"{params} memory_mb='{request_body['memory_mb'] if 'memory_mb' in request_body else 'KeyError'}', "
params= f"{params} ssh_authorized_keys='{request_body['ssh_authorized_keys'] if 'ssh_authorized_keys' in request_body else 'KeyError'}', "
params= f"{params} network_name='{request_body['network_name'] if 'network_name' in request_body else 'KeyError'}', "
params= f"{params} public_ipv4='{request_body['public_ipv4'] if 'public_ipv4' in request_body else 'KeyError'}', "
current_app.logger.error(f"spoke_model.create({params}) failed: {error_message}")
return jsonify(dict(assignment_status=assignment_status, error_message=error_message))
return jsonify(dict(assignment_status=assignment_status))

View File

@ -14,37 +14,28 @@ from capsulflask.shared import VirtualizationInterface, VirtualMachine, validate
class MockSpoke(VirtualizationInterface):
def __init__(self):
self.capsuls = dict()
def capacity_avaliable(self, additional_ram_bytes):
return True
def get(self, id, get_ssh_host_keys):
validate_capsul_id(id)
ipv4 = "1.1.1.1"
if id in self.capsuls:
ipv4 = self.capsuls[id]['public_ipv4']
if get_ssh_host_keys:
ssh_host_keys = json.loads("""[
{"key_type":"ED25519", "content":"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN8cna0zeKSKl/r8whdn/KmDWhdzuWRVV0GaKIM+eshh", "sha256":"V4X2apAF6btGAfS45gmpldknoDX0ipJ5c6DLfZR2ttQ"},
{"key_type":"RSA", "content":"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCvotgzgEP65JUQ8S8OoNKy1uEEPEAcFetSp7QpONe6hj4wPgyFNgVtdoWdNcU19dX3hpdse0G8OlaMUTnNVuRlbIZXuifXQ2jTtCFUA2mmJ5bF+XjGm3TXKMNGh9PN+wEPUeWd14vZL+QPUMev5LmA8cawPiU5+vVMLid93HRBj118aCJFQxLgrdP48VPfKHFRfCR6TIjg1ii3dH4acdJAvlmJ3GFB6ICT42EmBqskz2MPe0rIFxH8YohCBbAbrbWYcptHt4e48h4UdpZdYOhEdv89GrT8BF2C5cbQ5i9qVpI57bXKrj8hPZU5of48UHLSpXG8mbH0YDiOQOfKX/Mt", "sha256":"ghee6KzRnBJhND2kEUZSaouk7CD6o6z2aAc8GPkV+GQ"},
{"key_type":"ECDSA", "content":"ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBLLgOoATz9R4aS2kk7vWoxX+lshK63t9+5BIHdzZeFE1o+shlcf0Wji8cN/L1+m3bi0uSETZDOAWMP3rHLJj9Hk=", "sha256":"aCYG1aD8cv/TjzJL0bi9jdabMGksdkfa7R8dCGm1yYs"}
]""")
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4=ipv4, state="running", ssh_host_keys=ssh_host_keys)
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4="1.1.1.1", state="running", ssh_host_keys=ssh_host_keys)
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4=ipv4, state="running")
return VirtualMachine(id, current_app.config["SPOKE_HOST_ID"], ipv4="1.1.1.1", state="running")
def list_ids(self) -> list:
return get_model().all_non_deleted_vm_ids()
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list, network_name: str, public_ipv4: str):
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
validate_capsul_id(id)
current_app.logger.info(f"mock create: {id} for {email}")
self.capsuls[id] = dict(email=email, id=id, network_name=network_name, public_ipv4=public_ipv4)
sleep(1)
def destroy(self, email: str, id: str):
@ -138,7 +129,7 @@ class ShellScriptSpoke(VirtualizationInterface):
self.validate_completed_process(completedProcess)
return list(map(lambda x: x.decode("utf-8"), completedProcess.stdout.splitlines() ))
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list, network_name: str, public_ipv4: str):
def create(self, email: str, id: str, template_image_file_name: str, vcpus: int, memory_mb: int, ssh_authorized_keys: list):
validate_capsul_id(id)
if not re.match(r"^[a-zA-Z0-9/_.-]+$", template_image_file_name):
@ -148,18 +139,12 @@ class ShellScriptSpoke(VirtualizationInterface):
if not re.match(r"^(ssh|ecdsa)-[0-9A-Za-z+/_=@:. -]+$", ssh_authorized_key):
raise ValueError(f"ssh_authorized_key \"{ssh_authorized_key}\" must match \"^(ssh|ecdsa)-[0-9A-Za-z+/_=@:. -]+$\"")
if isinstance(vcpus, int) and (vcpus < 1 or vcpus > 8):
if vcpus < 1 or vcpus > 8:
raise ValueError(f"vcpus \"{vcpus}\" must match 1 <= vcpus <= 8")
if isinstance(memory_mb, int) and (memory_mb < 512 or memory_mb > 16384):
if memory_mb < 512 or memory_mb > 16384:
raise ValueError(f"memory_mb \"{memory_mb}\" must match 512 <= memory_mb <= 16384")
if not re.match(r"^[a-zA-Z0-9_-]+$", network_name):
raise ValueError(f"network_name \"{network_name}\" must match \"^[a-zA-Z0-9_-]+\"")
if not re.match(r"^[0-9.]+$", public_ipv4):
raise ValueError(f"public_ipv4 \"{public_ipv4}\" must match \"^[0-9.]+$\"")
ssh_keys_string = "\n".join(ssh_authorized_keys)
completedProcess = run([
@ -168,9 +153,7 @@ class ShellScriptSpoke(VirtualizationInterface):
template_image_file_name,
str(vcpus),
str(memory_mb),
ssh_keys_string,
network_name,
public_ipv4
ssh_keys_string
], capture_output=True)
self.validate_completed_process(completedProcess, email)
@ -183,8 +166,6 @@ class ShellScriptSpoke(VirtualizationInterface):
vcpus={str(vcpus)}
memory={str(memory_mb)}
ssh_authorized_keys={ssh_keys_string}
network_name={network_name}
public_ipv4={public_ipv4}
"""
if not status == "success":

View File

@ -31,7 +31,7 @@
{% if session["account"] %}
<a href="/console">Capsuls</a>
<a href="/console/ssh">SSH Public Keys</a>
<a href="/console/keys">SSH &amp; API Keys</a>
<a href="/console/account-balance">Account Balance</a>
{% endif %}

View File

@ -101,7 +101,7 @@
</div>
<div class="row justify-start">
<label class="align" for="ssh_authorized_keys">SSH Authorized Keys</label>
<a id="ssh_authorized_keys" href="/console/ssh">{{ vm['ssh_authorized_keys'] }}</a>
<a id="ssh_authorized_keys" href="/console/keys">{{ vm['ssh_authorized_keys'] }}</a>
</div>
</div>

View File

@ -31,7 +31,7 @@
<p>(At least one month of funding is required)</p>
{% elif no_ssh_public_keys %}
<p>You don't have any ssh public keys yet.</p>
<p>You must <a href="/console/ssh">upload one</a> before you can create a Capsul.</p>
<p>You must <a href="/console/keys">upload one</a> before you can create a Capsul.</p>
{% elif not capacity_avaliable %}
<p>Host(s) at capacity. No capsuls can be created at this time. sorry. </p>
{% else %}

View File

@ -1,17 +1,18 @@
{% extends 'base.html' %}
{% block title %}SSH Public Keys{% endblock %}
{% block title %}SSH &amp; API Keys{% endblock %}
{% block content %}
<div class="row third-margin">
<h1>SSH PUBLIC KEYS</h1>
</div>
<div class="row third-margin"><div>
{% if has_ssh_public_keys %} <hr/> {% endif %}
{% if ssh_public_keys|length > 0 %} <hr/> {% endif %}
{% for ssh_public_key in ssh_public_keys %}
<form method="post">
<input type="hidden" name="method" value="DELETE"></input>
<input type="hidden" name="action" value="delete_ssh_key"></input>
<input type="hidden" name="name" value="{{ ssh_public_key['name'] }}"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="row">
@ -22,13 +23,14 @@
</form>
{% endfor %}
{% if has_ssh_public_keys %} <hr/> {% endif %}
{% if ssh_public_keys|length > 0 %} <hr/> {% endif %}
<div class="third-margin">
<h1>UPLOAD A NEW SSH PUBLIC KEY</h1>
</div>
<form method="post">
<input type="hidden" name="method" value="POST"></input>
<input type="hidden" name="action" value="upload_ssh_key"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="row justify-start">
<label class="align" for="content">File Contents</label>
@ -54,6 +56,51 @@
</div>
</form>
</div></div>
<hr/>
<div class="row third-margin">
<h1>API KEYS</h1>
</div>
<div class="row third-margin"><div>
{% if generated_api_token %}
<hr/>
Generated key:
<span class="code">{{ generated_api_token }}</span>
{% endif %}
{% if api_tokens|length >0 %} <hr/>{% endif %}
{% for api_token in api_tokens %}
<form method="post">
<input type="hidden" name="method" value="DELETE"></input>
<input type="hidden" name="action" value="delete_api_token"></input>
<input type="hidden" name="id" value="{{ api_token['id'] }}"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="row">
<span class="code">{{ api_token['name'] }}</span>
created {{ api_token['created'].strftime("%b %d %Y") }}
<input type="submit" value="Delete">
</div>
</form>
{% endfor %}
{% if api_tokens|length >0 %} <hr/>{% endif %}
<div class="third-margin">
<h1>GENERATE A NEW API KEY</h1>
</div>
<form method="post">
<input type="hidden" name="method" value="POST"></input>
<input type="hidden" name="action" value="generate_api_token"></input>
<input type="hidden" name="csrf-token" value="{{ csrf_token }}"/>
<div class="smalltext">
<p>Generate a new API key, to integrate with other systems.</p>
</div>
<div class="row justify-start">
<label class="align" for="name">Key Name</label>
<input type="text" id="name" name="name"></input> (defaults to creation time)
</div>
<div class="row justify-end">
<input type="submit" value="Generate">
</div>
</form>
</div></div>
{% endblock %}
{% block pagesource %}/templates/ssh-public-keys.html{% endblock %}

View File

@ -7,26 +7,14 @@ services:
build: .
volumes:
- "./:/app/code"
- "../tank:/tank"
- "/var/run/libvirt/libvirt-sock:/var/run/libvirt/libvirt-sock"
depends_on:
- db
ports:
- "5000:5000"
environment:
- "POSTGRES_CONNECTION_PARAMETERS=host=db port=5432 user=capsul password=capsul dbname=capsul"
- SPOKE_MODEL=shell-scripts
#- FLASK_DEBUG=1
- BASE_URL=http://localhost:5000
- ADMIN_PANEL_ALLOW_EMAIL_ADDRESSES=3wc.capsul@doesthisthing.work
- VIRSH_DEFAULT_CONNECT_URI=qemu:///system
# The image uses gunicorn by default, let's override it with Flask's
# built-in development server
command: ["flask", "run", "-h", "0.0.0.0", "-p", "5000"]
devices:
- "/dev/kvm:/dev/kvm"
db:
image: "postgres:9.6.5-alpine"
image: "postgres:9.6.5"
volumes:
- "postgres:/var/lib/postgresql/data"
environment: