+{% endblock %}
diff --git a/server/src/celery.py b/server/src/celery.py
new file mode 100644
index 00000000..ddcff993
--- /dev/null
+++ b/server/src/celery.py
@@ -0,0 +1,139 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+import shutil
+import os
+from datetime import datetime, timedelta
+from celery import Celery
+from flask_mail import Message
+from smtplib import SMTPException, SMTPServerDisconnected
+from celery.schedules import crontab
+from sqlalchemy import and_, false
+
+from .organisation import Organisation
+from .auth import User
+from .models.db_models import RemovedProject, Account, Project
+from .config import Configuration
+from . import mail, db
+from .storages.disk import move_to_tmp
+
+
+# create on flask app independent object
+# we need this for defining tasks, and celery is then configured in run_celery.py
+celery = Celery(__name__, broker=Configuration.CELERY_BROKER_URL, backend=Configuration.CELERY_RESULT_BACKEND)
+
+
+@celery.on_after_configure.connect
+def setup_periodic_tasks(sender, **kwargs):
+ sender.add_periodic_task(crontab(hour=2, minute=0), remove_temp_files, name='clean temp files')
+ sender.add_periodic_task(crontab(hour=2, minute=0), remove_projects_backups, name='remove old project backups')
+ sender.add_periodic_task(crontab(hour=1, minute=0), remove_accounts_data, name='remove personal data of inactive users')
+
+
+@celery.task(
+ autoretry_for=(SMTPException, SMTPServerDisconnected, ),
+ retry_kwargs={'max_retries': 3, 'default_retry_delay': 300},
+ ignore_result=True)
+def send_email_async(**kwargs):
+ """
+ Send flask mail (application context needed).
+
+ :param email_data: content for flask mail Message
+ :param email_data: dict
+ """
+ return send_email(**kwargs)
+
+
+def send_email(**kwargs):
+ """
+ Send flask mail (application context needed).
+
+ :param email_data: content for flask mail Message
+ :param email_data: dict
+ """
+ msg = Message(**kwargs)
+ # let's add default sender to BCC on production/staging server to make sure emails are in inbox
+ if not Configuration.MERGIN_TESTING:
+ msg.bcc.append(Configuration.MAIL_DEFAULT_SENDER)
+ mail.send(msg)
+
+
+@celery.task
+def remove_temp_files():
+ """ Remove old temp folders in mergin temp directory.
+ This is clean up for storages.disk.move_to_tmp() function.
+ """
+ for dir in os.listdir(Configuration.TEMP_DIR):
+ # ignore folder with apple notifications receipts which we want (temporarily) to maintain
+ if dir == 'notifications':
+ continue
+ path = os.path.join(Configuration.TEMP_DIR, dir)
+ if datetime.fromtimestamp(os.path.getmtime(path)) < datetime.utcnow() - timedelta(days=Configuration.TEMP_EXPIRATION):
+ try:
+ shutil.rmtree(path)
+ except OSError as e:
+ print(f"Unable to remove {path}: {str(e)}")
+
+
+@celery.task
+def remove_projects_backups():
+ """ Permanently remove deleted projects. All data is lost, and project could not be restored anymore """
+ projects = RemovedProject.query.filter(
+ RemovedProject.timestamp < datetime.utcnow() - timedelta(days=Configuration.DELETED_PROJECT_EXPIRATION)
+ ).all()
+
+ for p in projects:
+ p_dir = os.path.abspath(os.path.join(Configuration.LOCAL_PROJECTS, p.properties["storage_params"]["location"]))
+ if os.path.exists(p_dir):
+ move_to_tmp(p_dir)
+ db.session.delete(p)
+ db.session.commit()
+
+
+@celery.task
+def remove_accounts_data():
+ before_expiration = datetime.today() - timedelta(days=Configuration.CLOSED_ACCOUNT_EXPIRATION)
+
+ # regex condition to account name to avoid process deleted accounts multiple times
+ subquery = db.session.query(User.id).filter(User.active == false(), User.inactive_since <= before_expiration, User.username.op("~")('^(?!deleted_\d{13})')).subquery()
+ subquery2 = db.session.query(Organisation.id).filter(Organisation.active == false(), Organisation.inactive_since <= before_expiration, Organisation.name.op("~")('^(?!deleted_\d{13})')).subquery()
+ accounts = Account.query.filter(and_(Account.owner_id.in_(subquery), Account.type == "user") | and_(Account.owner_id.in_(subquery2), Account.type == "organisation"))
+
+ for account in accounts:
+ timestamp = round(datetime.now().timestamp() * 1000)
+ user = None
+ organisation = None
+ if account.type == 'user':
+ user = User.query.get(account.owner_id)
+
+ user.username = f"deleted_{timestamp}"
+ user.email = f"deleted_{timestamp}"
+ user.verified_email = False
+ user.assign_password(f"deleted_{timestamp}")
+ user.profile.firs_name = ""
+ user.profile.last_name = ""
+
+ else:
+ organisation = Organisation.query.get(account.owner_id)
+ organisation.name = f"deleted_{timestamp}"
+ organisation.description = ""
+
+ # delete account's projects
+ projects = Project.query.filter_by(namespace=account.namespace.name).all()
+ for p in projects:
+ p_dir = p.storage.project_dir
+ if os.path.exists(p_dir):
+ move_to_tmp(p_dir)
+ db.session.delete(p)
+
+ # delete account's removed projects
+ projects = RemovedProject.query.filter_by(namespace=account.namespace.name).all()
+ for p in projects:
+ p_dir = os.path.abspath(os.path.join(Configuration.LOCAL_PROJECTS, p.properties["storage_params"]["location"]))
+ if os.path.exists(p_dir):
+ move_to_tmp(p_dir)
+ db.session.delete(p)
+
+ db.session.commit()
+ account.namespace.name = f"deleted_{timestamp}"
+
+ db.session.commit()
diff --git a/server/src/config.py b/server/src/config.py
new file mode 100644
index 00000000..7b739d9f
--- /dev/null
+++ b/server/src/config.py
@@ -0,0 +1,88 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+import os
+from tempfile import tempdir
+from .version import get_version
+from decouple import config, Csv
+
+config_dir = os.path.abspath(os.path.dirname(__file__))
+
+
+class Configuration(object):
+ DEBUG = config('FLASK_DEBUG', default=False, cast=bool)
+ TESTING = config('TESTING', default=False, cast=bool)
+ SECRET_KEY = config('SECRET_KEY')
+ PROXY_FIX = config('PROXY_FIX', default=True, cast=bool)
+ SWAGGER_UI = config('SWAGGER_UI', default=False, cast=bool) # to enable swagger UI console (for test only)
+ VERSION = config('VERSION', default=get_version())
+ PUBLIC_DIR = config('PUBLIC_DIR', default=os.path.join(config_dir, os.pardir, 'build', 'static'))
+ # for local storage type
+ LOCAL_PROJECTS = config('LOCAL_PROJECTS', default=os.path.join(config_dir, os.pardir, os.pardir, 'projects'))
+
+ # Mergin DB related
+ SQLALCHEMY_TRACK_MODIFICATIONS = config('SQLALCHEMY_TRACK_MODIFICATIONS', default=False, cast=bool)
+ SQLALCHEMY_ENGINE_OPTIONS = {
+ 'pool_size': 2,
+ 'pool_timeout': 300
+ }
+ DB_USER = config('DB_USER', default='postgres')
+ DB_PASSWORD = config('DB_PASSWORD', default='postgres')
+ DB_HOST = config('DB_HOST', default='localhost')
+ DB_PORT = config('DB_PORT', default=5002, cast=int)
+ DB_DATABASE = config('DB_DATABASE', default='postgres')
+ DB_APPLICATION_NAME = config('DB_APPLICATION_NAME', default='mergin')
+ SQLALCHEMY_DATABASE_URI = os.getenv('SQLALCHEMY_DATABASE_URI', f'postgresql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_DATABASE}?application_name={DB_APPLICATION_NAME}')
+
+ # auth related
+ SECURITY_PASSWORD_SALT = config('SECURITY_PASSWORD_SALT')
+ WTF_CSRF_TIME_LIMIT = config('WTF_CSRF_TIME_LIMIT', default=3600 * 24, cast=int) # in seconds
+ BEARER_TOKEN_EXPIRATION = config('BEARER_TOKEN_EXPIRATION', default=3600 * 12, cast=int) # in seconds
+ WTF_CSRF_ENABLED = config('WTF_CSRF_ENABLED', default=True, cast=bool)
+
+ # for flask mail
+ MAIL_SERVER = config('MAIL_SERVER', default='localhost')
+ MAIL_PORT = config('MAIL_PORT', default=587, cast=int)
+ MAIL_USE_TLS = config('MAIL_USE_TLS', default=True, cast=bool)
+ MAIL_DEFAULT_SENDER = config('MAIL_DEFAULT_SENDER')
+ MAIL_USERNAME = config('MAIL_USERNAME')
+ MAIL_PASSWORD = config('MAIL_PASSWORD')
+ MAIL_DEBUG = config('MAIL_SUPPRESS_SEND', default=False, cast=bool)
+ MAIL_SUPPRESS_SEND = config('MAIL_SUPPRESS_SEND', default=True, cast=bool)
+
+ USER_SELF_REGISTRATION = config('USER_SELF_REGISTRATION', default=True, cast=bool)
+
+ # locking file when backups are created
+ MAINTENANCE_FILE = config('MAINTENANCE_FILE', default=os.path.join(LOCAL_PROJECTS, 'MAINTENANCE'))
+
+ # data sync
+ LOCKFILE_EXPIRATION = config('LOCKFILE_EXPIRATION', default=300, cast=int) # in seconds
+ MAX_CHUNK_SIZE = config('MAX_CHUNK_SIZE', default=10 * 1024 * 1024, cast=int) # in bytes
+ USE_X_ACCEL = config('USE_X_ACCEL', default=False, cast=bool) # use nginx (in front of gunicorn) to serve files (https://www.nginx.com/resources/wiki/start/topics/examples/x-accel/)
+ FILE_EXPIRATION = config('FILE_EXPIRATION', default=48 * 3600, cast=int) # for clean up of old files where diffs were applied, in seconds
+ BLACKLIST = config('BLACKLIST', default='.mergin/, .DS_Store, .directory', cast=Csv())
+
+ # celery
+ CELERY_IMPORTS = config('CELERY_IMPORTS', default="src.celery")
+ CELERY_BROKER_URL = config('CELERY_BROKER_URL', default='redis://172.17.0.1:6379/0')
+ CELERY_RESULT_BACKEND = config('CELERY_RESULT_BACKEND', default='redis://172.17.0.1:6379/0')
+
+ # various life times
+ TRANSFER_EXPIRATION = config('TRANSFER_EXPIRATION', default=7 * 24 * 3600, cast=int) # in seconds
+ ORGANISATION_INVITATION_EXPIRATION = config('ORGANISATION_INVITATION_EXPIRATION', default=7 * 24 * 3600, cast=int) # in seconds
+ PROJECT_ACCESS_REQUEST = config('PROJECT_ACCESS_REQUEST', default=7 * 24 * 3600, cast=int)
+
+ TEMP_EXPIRATION = config('TEMP_EXPIRATION', default=7, cast=int) # time in days after files are permanently deleted
+ CLOSED_ACCOUNT_EXPIRATION = config('CLOSED_ACCOUNT_EXPIRATION', default=5, cast=int) # time in days after user closed his account to all projects and files are permanently deleted
+ DELETED_PROJECT_EXPIRATION = config('DELETED_PROJECT_EXPIRATION', default=7, cast=int) # lifetime of deleted project, expired project are removed permanently without restore possibility, in days
+
+ # trash dir for temp files being cleaned regularly
+ TEMP_DIR = config('TEMP_DIR', default=tempdir)
+
+ # for links generated in emails
+ MERGIN_BASE_URL = config('MERGIN_BASE_URL', default="http://localhost:5000")
+ # for link to logo in emails
+ MERGIN_LOGO_URL = config('MERGIN_LOGO_URL', default="")
+
+ MERGIN_SUBSCRIPTIONS = config('MERGIN_SUBSCRIPTIONS', default=False, cast=bool)
+ MERGIN_TESTING = config('MERGIN_TESTING', default=False, cast=bool)
diff --git a/server/src/controllers/__init__.py b/server/src/controllers/__init__.py
new file mode 100644
index 00000000..5bb08739
--- /dev/null
+++ b/server/src/controllers/__init__.py
@@ -0,0 +1,2 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
\ No newline at end of file
diff --git a/server/src/controllers/account_controller.py b/server/src/controllers/account_controller.py
new file mode 100644
index 00000000..da58090b
--- /dev/null
+++ b/server/src/controllers/account_controller.py
@@ -0,0 +1,257 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from datetime import datetime
+from operator import and_
+
+from flask import Blueprint, abort, jsonify, request
+from flask_login import current_user
+from sqlalchemy import or_, desc, asc, true
+
+from src.auth import AccountSearchSchema
+from .. import db
+from ..auth import auth_required
+from ..models.db_models import Account, Namespace
+from ..models.schemas import AccountSchema, AccountExtendedSchema
+from ..auth.models import User
+from ..models.db_models import Project, ProjectAccess, ProjectTransfer
+from ..organisation.models import OrganisationInvitation, Organisation
+
+account = Blueprint("account", __name__)
+
+
+@account.route('/accounts/', methods=['GET'])
+@auth_required(permissions=['admin'])
+def get_account_by_id(account_id): # pylint: disable=W0613,W0612
+ """ get account by id
+ :rtype: Account
+ """
+ accounts = Account.query.get(account_id)
+ data = AccountSchema().dump(accounts)
+ return jsonify(data), 200
+
+
+@account.route('/accounts/', methods=['GET'])
+@auth_required(permissions=['admin'])
+def list_accounts(type): # pylint: disable=W0613,W0612
+ """ List of either user or organisation paginated accounts with optional filters and sort.
+
+ :param type: account type, either 'user' or 'organisation'
+ :type type: str
+ :returns: Total number of accounts and paginated results for accounts
+ :rtype: dict(total: int, accounts: List[Account])
+ """
+ try:
+ page = int(request.args.get('page', 1))
+ per_page = int(request.args.get('per_page', 10))
+ except ValueError:
+ abort(400, "Invalid query format")
+ order_by = request.args.get('order_by')
+ descending = str(request.args.get('descending', 'false')) == 'true'
+ name = str(request.args.get('name', ''))
+ if type == "user":
+ model = User
+ name_col = User.username
+ active_col = User.active
+ elif type == "organisation":
+ model = Organisation
+ name_col = Organisation.name
+ active_col = Organisation.active
+ else:
+ abort(400, 'Invalid account type')
+
+ query = db.session.query(
+ Account.id,
+ Account.type,
+ name_col.label("name"),
+ active_col.label("active"),
+ Namespace.storage
+ )\
+ .join(model, Account.owner_id == model.id) \
+ .join(Namespace, Namespace.account_id == Account.id) \
+ .filter(Account.type == type)
+
+ if name:
+ query = query.filter(name_col.ilike(f'%{name}%'))
+
+ # sort by some column
+ col = None
+ if order_by:
+ if order_by == 'name':
+ col = name_col
+
+ if col:
+ query = query.order_by(desc(col)) if descending else query.order_by(asc(col))
+
+ paginate = query.paginate(page, per_page, max_per_page=100)
+ result = paginate.items
+ total = paginate.total
+ accounts = AccountExtendedSchema(many=True).dump(result)
+ return jsonify(accounts=accounts, total=total), 200
+
+
+@account.route('/change_account_status/', methods=['PATCH'])
+@auth_required(permissions=['admin'])
+def change_account_status(account_id):
+ """
+ Change account active status to true or false
+
+ :param account_id:
+ :return changed status:
+ """
+ if request.json.get("status") is None:
+ abort(400, "Status is empty")
+ account = Account.query.get_or_404(account_id, "Account not found")
+ owner = account.owner()
+ owner.active = request.json.get("status")
+ owner.inactive_since = datetime.utcnow() if not owner.active else None
+ if request.json.get("status") is False:
+ account.inactivated("deactivate")
+ db.session.commit()
+
+ return jsonify({'status': request.json.get("status")}), 200
+
+
+@account.route('/account/', methods=['DELETE'])
+@auth_required
+def close_account(id): # pylint: disable=W0613,W0612
+ """ Close account.
+ All related objects remain (temporarily) in database and files on disk, following actions are done:
+
+ - account holder is turned to inactive
+ - users's reference from 3rd parties integration is removed (e.g. Stripe)
+ - all references in projects' permissions are removed
+ - all pending project transfers related to account namespace are removed
+ - all membership in organisations and pending invitations are removed
+
+ For permanent delete, account holder object needs to be deleted.
+ """
+ account = Account.query.get_or_404(id, f'Account {id} not found')
+ if not account.can_edit(current_user.id) and not current_user.is_admin:
+ abort(403)
+
+ user = None
+ organisation = None
+ if account.type == 'user':
+ user = User.query.get(account.owner_id)
+ # remove membership in organisations
+ organisations = Organisation.query.filter(or_(
+ Organisation.owners.contains([user.id]),
+ Organisation.admins.contains([user.id]),
+ Organisation.writers.contains([user.id]),
+ Organisation.readers.contains([user.id])
+ )).all()
+
+ user_organisation = next((o for o in organisations if o.owners == [user.id]), None)
+ if user_organisation:
+ abort(400, f"Can not close account because user is the only owner of organisation {user_organisation.name}")
+
+ for org in organisations:
+ for key in ('owners', 'admins', 'writers', 'readers'):
+ value = set(getattr(org, key))
+ if user.id in value:
+ value.remove(user.id)
+ setattr(org, key, list(value))
+ db.session.add(org)
+
+ # remove user reference from shared projects
+ shared_projects = Project.query \
+ .filter(Project.namespace != account.namespace.name) \
+ .filter(or_(
+ Project.access.has(ProjectAccess.owners.contains([user.id])),
+ Project.access.has(ProjectAccess.writers.contains([user.id])),
+ Project.access.has(ProjectAccess.readers.contains([user.id]))
+ )).all()
+
+ for p in shared_projects:
+ for key in ('owners', 'writers', 'readers'):
+ value = set(getattr(p.access, key))
+ if user.id in value:
+ value.remove(user.id)
+ setattr(p.access, key, list(value))
+ db.session.add(p)
+
+ # remove pending invitations
+ invitations = OrganisationInvitation.query.filter_by(username=user.username).all()
+ for i in invitations:
+ db.session.delete(i)
+
+ else:
+ organisation = Organisation.query.get(account.owner_id)
+ invitations = OrganisationInvitation.query.filter_by(org_name=account.name()).all()
+ for i in invitations:
+ db.session.delete(i)
+
+ # reset permissions for namespace's projects
+ projects = Project.query.filter_by(namespace=account.namespace.name).all()
+ for p in projects:
+ p.access.owners = []
+ p.access.writers = []
+ p.access.readers = []
+ db.session.add(p)
+
+ # remove pending project transfers (both directions)
+ transfers = ProjectTransfer.query.filter(or_(
+ ProjectTransfer.from_ns_name == account.namespace.name,
+ ProjectTransfer.to_ns_name == account.namespace.name
+ )).all()
+ for t in transfers:
+ db.session.delete(t)
+
+ account.inactivated("delete")
+
+ # inactivate account
+ owner = account.owner()
+ owner.active = False
+ owner.inactive_since = datetime.utcnow()
+
+ db.session.add(account)
+ db.session.commit()
+ return '', 200
+
+@account.route('/account/change_storage/', methods=['POST'])
+@auth_required(permissions=['admin'])
+def change_storage(account_id): # pylint: disable=W0613,W0612
+ """ Change storage.
+ Change account storage with new value
+ - account_id account id
+ - storage: new storage value in bytes
+ """
+ namespace = Namespace.query.filter(Namespace.account_id == account_id).first_or_404(f'Namespace for accountId: {account_id} not found')
+ if not request.json.get("storage"):
+ abort(400, "Storage is empty")
+ try:
+ storage = int(request.json.get("storage"))
+ except Exception as e:
+ abort(400, "Storage is not a number")
+ namespace.storage = storage
+ db.session.commit()
+ return '', 200
+
+
+@account.route('/accounts/search', methods=['GET'])
+@auth_required(permissions=['admin'])
+def search_accounts_by_name(): # pylint: disable=W0613,W0612
+ """
+ search by like param returns results in order: 1.) match is on start of words - ordered by id
+ 2.) match is anywhere - ordered by id
+ """
+ from src.models.db_models import Account
+ from src.organisation import Organisation
+
+ query = db.session.query(
+ Account.id,
+ Account.type,
+ Organisation.name,
+ User.username
+ ) \
+ .outerjoin(Organisation, and_(Account.owner_id == Organisation.id, Account.type == "organisation")) \
+ .outerjoin(User, and_(Account.owner_id == User.id, Account.type == "user")) \
+
+ like = request.args.get('like')
+ schema = AccountSearchSchema(many=True)
+ if like:
+ ilike = "{}%".format(like)
+ accounts = query.filter(and_(User.active == true(), (User.username.ilike(ilike) | User.username.op("~")(f'[\\.|\\-|_| ]{like}.*'))) |
+ and_(Organisation.active == true(), Organisation.name.ilike(ilike))).limit(10).all()
+ return jsonify(schema.dump(accounts))
diff --git a/server/src/controllers/forms.py b/server/src/controllers/forms.py
new file mode 100644
index 00000000..63f3147c
--- /dev/null
+++ b/server/src/controllers/forms.py
@@ -0,0 +1,10 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from ..auth.forms import LoginForm
+
+
+class ApiLoginForm(LoginForm):
+ class Meta:
+ csrf = False
+
diff --git a/server/src/controllers/namespace_controller.py b/server/src/controllers/namespace_controller.py
new file mode 100644
index 00000000..e5eb945f
--- /dev/null
+++ b/server/src/controllers/namespace_controller.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from flask import jsonify, abort
+
+from ..auth import auth_required
+from ..auth.models import User
+from ..models.db_models import Namespace, Account
+from ..models.schemas import NamespaceSchema
+from ..organisation.models import Organisation
+
+
+def check_access_to_namespace(namespace, user):
+ Namespace.query.filter_by(name=namespace).first_or_404()
+ org = Organisation.query.filter_by(name=namespace).first()
+ if user.username != namespace and not (org and user.id in org.writers):
+ abort(403, "Permission denied.")
+
+
+@auth_required
+def search_namespace(namespace_type, q=None): # pylint: disable=W0613,W0612
+ """ Search namespace by query """
+ namespaces = []
+ if namespace_type == "user":
+ namespaces = Namespace.query.join(Namespace.account).join(User, User.username == Namespace.name).filter(User.active, Account.type == "user", Namespace.name.ilike(f"{q}%")).limit(5).all() if q else []
+ elif namespace_type == "organisation":
+ namespaces = Namespace.query.join(Namespace.account).join(Organisation, Organisation.name == Namespace.name).filter(Organisation.active, Account.type == "organisation", Namespace.name.ilike(f"{q}%")).limit(5).all() if q else []
+ data = NamespaceSchema(many=True).dump(namespaces)
+ return jsonify(data)
diff --git a/server/src/controllers/project_controller.py b/server/src/controllers/project_controller.py
new file mode 100644
index 00000000..583adb77
--- /dev/null
+++ b/server/src/controllers/project_controller.py
@@ -0,0 +1,929 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+import json
+import mimetypes
+import os
+import logging
+import copy
+from urllib.parse import quote
+import uuid
+from time import time
+from datetime import datetime, timedelta
+import psycopg2
+from connexion import NoContent, request
+from flask import abort, render_template, current_app, send_from_directory, jsonify, make_response
+from pygeodiff import GeoDiffLibError
+from sqlalchemy.orm import joinedload
+from flask_login import current_user
+from sqlalchemy.types import String
+from sqlalchemy.dialects.postgresql import ARRAY
+from sqlalchemy import cast, and_, or_, desc, asc
+from sqlalchemy.exc import IntegrityError
+from sqlalchemy.orm.attributes import flag_modified
+from binaryornot.check import is_binary
+from gevent import sleep
+
+from .. import db, wm, SIG_NEW_PROJECT
+from ..auth import auth_required
+from ..auth.models import User, UserProfile
+from ..models.db_models import Project, ProjectAccess, ProjectVersion, Namespace, Upload, RemovedProject
+from ..models.schemas import ProjectSchema, ProjectListSchema, ProjectVersionSchema, FileInfoSchema, ProjectSchemaForVersion
+from ..organisation.models import Organisation
+from ..storages.storage import FileNotFound, DataSyncError, InitializationError
+from ..storages.disk import save_to_file, move_to_tmp
+from ..permissions import require_project, projects_query, ProjectPermissions, get_upload
+from ..mergin_utils import generate_checksum, Toucher, int_version, is_file_name_blacklisted, get_ip, get_user_agent, \
+ generate_location, is_valid_uuid
+from ..util import is_name_allowed, mergin_secure_filename, get_path_from_files
+from ..celery import send_email_async
+from .namespace_controller import check_access_to_namespace
+
+
+def _project_version_files(project, version=None):
+ if version:
+ pv = next((v for v in project.versions if v.name == version), None)
+ if not pv:
+ abort(404, "Project version does not exist")
+ return pv.files
+ return project.files
+
+
+@auth_required
+def add_project(namespace, project): # noqa: E501
+ """Add a new mergin project.
+
+ # noqa: E501
+
+ :param project: Project object that needs to be added to the database.
+ :type project: dict | bytes
+
+ :rtype: None
+ """
+ if not is_name_allowed(project['name']):
+ abort(400, "Please use only alphanumeric or these -._~()'!*:@,; characters in project name.")
+
+ project['name'] = project['name'].strip()
+ if request.is_json:
+ ua = get_user_agent(request)
+ check_access_to_namespace(namespace, current_user)
+
+ proj = Project.query.filter_by(name=project['name'], namespace=namespace).first()
+ if proj:
+ abort(409, "Project {} already exists!".format(project['name']))
+
+ project['storage_params'] = {"type": "local", "location": generate_location()}
+
+ p = Project(**project, creator=current_user, namespace=namespace)
+ p.updated = datetime.utcnow()
+ pa = ProjectAccess(p, public=project.get('public', False))
+
+ template = project.get('template', None)
+ ip = get_ip(request)
+ if template:
+ template = Project.query.\
+ filter(Project.creator.has(username='TEMPLATES')).\
+ filter(Project.name == template)\
+ .first_or_404()
+ # create mutable object
+ temp_copy = copy.deepcopy(template)
+ temp_files = ProjectSchema(only=('files',)).dump(temp_copy)
+ changes = {"added": temp_files['files'], "renamed": [], "updated": [], "removed": []}
+ version = ProjectVersion(p, 'v1', current_user.username, changes, template.files, ip, ua)
+ p.latest_version = 'v1'
+ else:
+ changes = {"added": [], "renamed": [], "updated": [], "removed": []}
+ version = ProjectVersion(p, 'v0', current_user.username, changes, [], ip, ua)
+ p.latest_version = 'v0'
+ try:
+ p.storage.initialize(template_project=template)
+ except Exception as exc:
+ abort(400, f"Failed to initialize project: {str(exc)}")
+
+ p.versions.append(version)
+ db.session.add(p)
+ db.session.add(pa)
+ db.session.commit()
+
+ wm.emit_signal(SIG_NEW_PROJECT, request.path, msg=f'New project *{namespace}/{project["name"]}* has been created')
+ return NoContent, 200
+
+
+@auth_required
+def delete_project(namespace, project_name): # noqa: E501
+ """Delete a project.
+
+ # noqa: E501
+
+ :param project_name: name of project to delete.
+ :type project_name: str
+
+ :rtype: None
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Delete)
+ rm_project = RemovedProject(project, current_user.username)
+ db.session.add(rm_project)
+ db.session.delete(project)
+ db.session.commit()
+ return NoContent, 200
+
+
+def download_project(namespace, project_name, format=None, version=None): # noqa: E501 # pylint: disable=W0622
+ """Download dir for single project.
+
+ # noqa: E501
+
+ :param project_name: name of project to return.
+ :type project_name: str
+ :param format: output format
+ :type format: str [zip]
+ :param version: version tag name
+ :type version: str
+
+ :rtype: None
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Read)
+ files = _project_version_files(project, version)
+ try:
+ return project.storage.download_files(files, format, version=version)
+ except FileNotFound as e:
+ abort(404, str(e))
+
+
+def download_project_file(project_name, namespace, file, version=None, diff=None): # noqa: E501
+ """Download project file
+
+ Download individual file or it's diff file from project. # noqa: E501
+
+ :param project_name: Project name.
+ :type project_name: str
+ :param namespace: Namespace for project to look into.
+ :type namespace: str
+ :param file: Path to file.
+ :type file: str
+ :param version: Version tag.
+ :type version: str
+ :param diff: Ask for diff file instead of full one.
+ :type diff: bool
+
+ :rtype: None
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Read)
+ files = _project_version_files(project, version)
+
+ if diff and version:
+ # get specific version of geodiff file modified in requested version
+ file_obj = next((f for f in files if f['location'] == os.path.join(version, file)), None)
+ if not file_obj:
+ abort(404, file)
+ if 'diff' not in file_obj:
+ abort(404, f"No diff in particular file {file} version")
+ file_path = file_obj['diff']['location']
+ elif diff:
+ abort(400, f"Changeset must be requested for particular file version")
+ else:
+ # get latest version of file
+ file_path = next((f['location'] for f in files if f['path'] == file), None)
+
+ if not file_path:
+ abort(404, file)
+
+ if version and not diff:
+ project.storage.restore_versioned_file(file, version)
+
+ if current_app.config['USE_X_ACCEL']:
+ # encoding for nginx to be able to download file with non-ascii chars
+ encoded_file_path = quote(file_path.encode("utf-8"))
+ resp = make_response()
+ resp.headers['X-Accel-Redirect'] = f"/download/{project.storage_params['location']}/{encoded_file_path}"
+ resp.headers['X-Accel-Buffering'] = True
+ resp.headers['X-Accel-Expires'] = 'off'
+ else:
+ resp = send_from_directory(project.storage.project_dir, file_path)
+ abs_path = os.path.join(project.storage.project_dir, file_path)
+ if not is_binary(abs_path):
+ mime_type = "text/plain"
+ else:
+ mime_type = mimetypes.guess_type(abs_path)[0]
+ resp.headers['Content-Type'] = mime_type
+ resp.headers['Content-Disposition'] = 'attachment; filename={}'.format(quote(os.path.basename(file).encode("utf-8")))
+ return resp
+
+
+def get_project(project_name, namespace, since='', version=None): # noqa: E501
+ """Find project by name.
+
+ Returns a single project with details about files including history for versioned files (diffs) if needed. # noqa: E501
+
+ :param project_name: Name of project to return.
+ :type project_name: str
+ :param namespace: Namespace for project to look into.
+ :type namespace: str
+ :param since: Version to look up diff files history from.
+ :type since: str
+ :param version: Version to return files details for.
+ :type version: str
+
+ :rtype: ProjectDetail
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Read)
+
+ if since and version:
+ abort(400, "Parameters 'since' and 'version' are mutually exclusive")
+ elif since:
+ # append history for versioned files
+ last_version = ProjectVersion.query.filter_by(project_id=project.id).order_by(
+ ProjectVersion.created.desc()).first()
+ for f in project.files:
+ f['history'] = project.file_history(f['path'], since, last_version.name)
+ data = ProjectSchema(exclude=['storage_params']).dump(project)
+ elif version:
+ # return project info at requested version
+ version_obj = next((v for v in project.versions if v.name == version), None)
+ if not version_obj:
+ abort(404, "Project at requested version does not exist")
+ data = ProjectSchemaForVersion().dump(version_obj)
+ else:
+ # return current project info
+ data = ProjectSchema(exclude=['storage_params']).dump(project)
+ return data, 200
+
+
+def get_project_versions(namespace, project_name, version_id=None): # noqa: E501
+ """Get versions (history) of project.
+
+ Returns a list of project versions with changes information. # noqa: E501
+
+ :param project_name: Name of project to return versions for.
+ :type project_name: str
+ :param version_id:
+ :type version_id: str
+
+ :rtype: List[ProjectVersion]
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Read)
+ query = ProjectVersion.query.filter(and_(ProjectVersion.project_id == project.id, ProjectVersion.name != "v0"))
+ if version_id:
+ query = ProjectVersion.query.filter_by(project_id=project.id).filter_by(name=version_id)
+ versions = query.order_by(ProjectVersion.created.desc()).all()
+ data = ProjectVersionSchema(exclude=['files'], many=True).dump(versions)
+ return data, 200
+
+
+def get_projects_by_names(data): # noqa: E501
+ """List mergin projects by list of projects namespace and name.
+ Returns limited list of projects
+ :rtype: Dict{namespace/projectName: Project]
+ """
+
+ list_of_projects = data.get('projects', [])
+ if len(list_of_projects) > 50:
+ abort(400, "Too many projects")
+ results = {}
+ for project in list_of_projects:
+ projects = projects_query(ProjectPermissions.Read, as_admin=False)
+ splitted = project.split("/")
+ if len(splitted) != 2:
+ results[project] = {"error": 404}
+ continue
+ namespace = splitted[0]
+ name = splitted[1]
+ result = projects.filter(Project.namespace == namespace, Project.name == name).first()
+ if result:
+ user_ids = result.access.owners + result.access.writers + result.access.readers
+ users_map = {u.id: u.username for u in User.query.filter(User.id.in_(set(user_ids))).all()}
+ results[project] = ProjectListSchema(context={'users_map': users_map}).dump(result)
+ else:
+ if not current_user or not current_user.is_authenticated:
+ results[project] = {"error": 401}
+ else:
+ results[project] = {"error": 403}
+ return results, 200
+
+
+def get_projects_by_uuids(uuids): # noqa: E501
+ """Get mergin projects by list of projects ids
+
+ Returns a list of projects filtered by ids # noqa: E501
+ :rtype: dict{project.id: project}
+ """
+ proj_ids = [uuid for uuid in uuids.split(',') if is_valid_uuid(uuid)]
+ if len(proj_ids) > 10:
+ abort(400, "Too many projects")
+
+ user_ids = []
+ projects = projects_query(ProjectPermissions.Read, as_admin=False).filter(Project.id.in_(proj_ids)).all()
+ for p in projects:
+ user_ids.extend(p.access.owners+p.access.writers+p.access.readers)
+ users_map = {u.id: u.username for u in User.query.filter(User.id.in_(set(user_ids))).all()}
+ data = ProjectListSchema(many=True, context={'users_map': users_map}).dump(projects)
+ return data, 200
+
+
+def get_projects(tags=None, q=None, user=None, flag=None, limit=None): # noqa: E501
+ """List mergin projects.
+
+ Returns limited list of projects, optionally filtered by tags, search query, username.
+
+ :rtype: List[Project]
+ """
+ projects = projects_query(ProjectPermissions.Read)
+
+ if flag:
+ user = User.query.filter_by(username=user).first_or_404() if user else current_user
+ if not user.is_anonymous:
+ orgs = Organisation.query.with_entities(Organisation.name).filter(
+ or_(Organisation.admins.contains([user.id]), Organisation.readers.contains([user.id]),
+ Organisation.writers.contains([user.id]), Organisation.owners.contains([user.id])))
+ if flag == 'created':
+ projects = projects.filter(Project.creator_id == user.id).filter_by(namespace=user.username)
+ if flag == 'shared':
+ projects = projects.filter(or_(and_(Project.creator_id != user.id,
+ Project.access.has(ProjectAccess.readers.contains([user.id]))), Project.namespace.in_(orgs)))
+ else:
+ abort(401)
+ if tags:
+ projects = projects.filter(Project.tags.contains(cast(tags, ARRAY(String))))
+
+ if q:
+ projects = projects.filter(Project.name.ilike('%{}%'.format(q)))
+
+ proj_limit = limit if limit and limit < 100 else 100
+ projects = projects.options(joinedload(Project.access)).order_by(Project.namespace, Project.name).limit(proj_limit).all()
+ # create user map id:username passed to project schema to minimize queries to db
+ user_ids = []
+ for p in projects:
+ user_ids.extend(p.access.owners+p.access.writers+p.access.readers)
+ users_map = {u.id: u.username for u in User.query.filter(User.id.in_(set(user_ids))).all()}
+ sleep(0) # temporary yield to gevent hub until serialization is fully resolved (#317)
+ data = ProjectListSchema(many=True, context={'users_map': users_map}).dump(projects)
+ return data, 200
+
+
+def get_paginated_projects(page, per_page, order_params=None, order_by=None, descending=False, name=None, namespace=None, user=None, flag=None, last_updated_in=None, only_namespace=None, as_admin=False, public=True, only_public=False): # noqa: E501
+ """List mergin projects.
+
+ Returns dictionary with paginated list of projects, optionally filtered by tags, project name, username, namespace, updated date.
+ and number of total filtered projects
+
+ :param page:page number
+ :param per_page results per page
+ :param order_by order by field name -deprecated
+ :param descending order of sort -deprecated
+ :param order_params fields to sort e.g. name_asc, updated_desc
+ :param name filter by project name
+ :param namespace filter by project namespace
+ :param user Username for 'flag' filter. If not provided, in case that is not provided uses logged user
+ :param flag created or shared
+ :param: last_updated_in for filter projects by days from last update
+ :param only_namespace Filter namespace equality to in contrast with namespace attribute which is determinated to search (like)
+ :param as_admin User access as admin
+ :param public should return any public project, if false filter only projects where user has explicit read permission to project
+ :param only_public should return only public projects
+
+ :rtype: Dictionary{
+ projects: List[Project],
+ count: Integer
+ """
+ if only_public:
+ projects = Project.query.filter(Project.access.has(public=only_public))
+ else:
+ projects = projects_query(ProjectPermissions.Read, as_admin=as_admin, public=public)
+
+ if flag:
+ user = User.query.filter_by(username=user).first_or_404() if user else current_user
+ if not user.is_anonymous:
+ if flag == 'created':
+ projects = projects.filter(Project.creator_id == user.id).filter_by(namespace=user.username)
+ if flag == 'shared':
+ orgs = Organisation.query.with_entities(Organisation.name).filter(
+ or_(Organisation.admins.contains([user.id]), Organisation.readers.contains([user.id]),
+ Organisation.writers.contains([user.id]), Organisation.owners.contains([user.id])))
+ projects = projects.filter(or_(and_(Project.creator_id != user.id,
+ Project.access.has(ProjectAccess.readers.contains([user.id]))), Project.namespace.in_(orgs)))
+ else:
+ abort(401)
+
+ if name:
+ projects = projects.filter(Project.name.ilike('%{}%'.format(name)) | Project.namespace.ilike('%{}%'.format(name)))
+
+ if namespace:
+ projects = projects.filter(Project.namespace.ilike('%{}%'.format(namespace)))
+
+ if only_namespace:
+ projects = projects.filter(Project.namespace == only_namespace)
+
+ if last_updated_in:
+ projects = projects.filter(Project.updated >= datetime.utcnow() - timedelta(days=last_updated_in))
+
+ projects = projects.options(joinedload(Project.access))
+
+ if order_params:
+ order_by_params = []
+ for p in order_params.split(","):
+ string_param = p.strip()
+ if "_asc" in string_param:
+ string_param = string_param.replace("_asc", "")
+ order_by_params.append(Project.__table__.c[string_param].asc())
+ elif "_desc" in string_param:
+ string_param = string_param.replace("_desc", "")
+ order_by_params.append(Project.__table__.c[string_param].desc())
+ projects = projects.order_by(*order_by_params)
+ elif descending and order_by:
+ projects = projects.order_by(desc(Project.__table__.c[order_by]))
+ elif not descending and order_by:
+ projects = projects.order_by(asc(Project.__table__.c[order_by]))
+
+ result = projects.paginate(page, per_page).items
+ total = projects.paginate(page, per_page).total
+
+ # create user map id:username passed to project schema to minimize queries to db
+ user_ids = []
+ for p in result:
+ user_ids.extend(p.access.owners+p.access.writers+p.access.readers)
+
+ users_map = {u.id: u.username for u in User.query.filter(User.id.in_(set(user_ids))).all()}
+ sleep(0) # temporary yield to gevent hub until serialization is fully resolved (#317)
+ data = ProjectListSchema(many=True, context={'users_map': users_map}).dump(result)
+ data = {'projects': data,
+ 'count': total}
+ return data, 200
+
+@auth_required
+def update_project(namespace, project_name, data): # noqa: E501 # pylint: disable=W0613
+ """Update an existing project.
+
+ # noqa: E501
+
+ :param project_name: Name of project that need to be updated.
+ :type project_name: str
+ :param data: Data to be updated.
+ :type data: dict | bytes
+
+ :rtype: Project
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Update)
+ access = data.get('access', {})
+ id_diffs = []
+
+ #transform usernames from client to ids
+ if "ownersnames" in access:
+ owners = User.query.with_entities(User.id).filter(User.username.in_(access['ownersnames'])).all()
+ access["owners"] = [w.id for w in owners]
+ if "readersnames" in access:
+ readers = User.query.with_entities(User.id).filter(User.username.in_(access['readersnames'])).all()
+ access["readers"] = [w.id for w in readers]
+ if "writersnames" in access:
+ writers = User.query.with_entities(User.id).filter(User.username.in_(access['writersnames'])).all()
+ access["writers"] = [w.id for w in writers]
+
+ # prevent to remove ownership of project creator
+ if 'owners' in access:
+ if project.creator_id not in access['owners']:
+ abort(400, str('Ownership of project creator cannot be removed.'))
+
+ for key, value in access.items():
+ if not hasattr(project.access, key):
+ continue
+ if isinstance(value, list):
+ id_diffs.append(set(value) ^ set(getattr(project.access, key)))
+ setattr(project.access, key, value)
+
+ db.session.add(project)
+ db.session.commit()
+
+ users_ids = set().union(*id_diffs)
+ user_profiles = UserProfile.query.filter(UserProfile.user_id.in_(users_ids)).all()
+ project_path = '/'.join([namespace, project.name])
+ web_link = f"{request.url_root.strip('/')}/projects/{project_path}"
+ for user_profile in user_profiles:
+ privileges = []
+ if user_profile.user.id in project.access.owners:
+ privileges += ['edit', 'remove']
+ if user_profile.user.id in project.access.writers:
+ privileges.append('upload')
+ if user_profile.user.id in project.access.readers:
+ privileges.append('download')
+ subject = "Project access modified"
+ if len(privileges):
+ html = render_template('email/modified_project_access.html', subject=subject, project=project, user=user_profile.user,
+ privileges=privileges, link=web_link)
+ else:
+ html = render_template('email/removed_project_access.html', subject=subject, project=project, user=user_profile.user)
+
+ if not (user_profile.receive_notifications and user_profile.user.verified_email):
+ continue
+ email_data = {
+ 'subject': f'Access to mergin project {project_path} has been modified',
+ 'html': html,
+ 'recipients': [user_profile.user.email],
+ 'sender': current_app.config['MAIL_DEFAULT_SENDER']
+ }
+ send_email_async.delay(**email_data)
+
+ return ProjectSchema().dump(project), 200
+
+
+@auth_required
+def project_push(namespace, project_name, data):
+ """
+ Synchronize project data.
+
+ Apply changes in project if no uploads required. Creates upload transaction for added/modified files. # noqa: E501
+
+ :param namespace: Namespace for project to look into.
+ :type namespace: str
+ :param project_name: Project name.
+ :type project_name: str
+ :param data: Description of project changes.
+ :type data: dict | bytes
+
+ :rtype: None
+ """
+ version = data.get('version')
+ changes = data['changes']
+ project = require_project(namespace, project_name, ProjectPermissions.Upload)
+ pv = project.versions[0] if project.versions else None
+ if pv and pv.name != version:
+ abort(400, 'Version mismatch')
+ if not pv and version != 'v0':
+ abort(400, 'First push should be with v0')
+
+ if all(len(changes[key]) == 0 for key in changes.keys()):
+ abort(400, 'No changes')
+
+ # check if same file is not already uploaded
+ for item in changes["added"]:
+ if not all(ele['path'] != item['path'] for ele in project.files):
+ abort(400, 'File {} has been already uploaded'.format(item["path"]))
+
+ # changes' files must be unique
+ changes_files = []
+ sanitized_files = []
+ blacklisted_files = []
+ for change in changes.values():
+ for f in change:
+ if is_file_name_blacklisted(f['path'], current_app.config['BLACKLIST']):
+ blacklisted_files.append(f)
+ # all file need to be unique after sanitized
+ f['sanitized_path'] = mergin_secure_filename(f['path'])
+ if f['sanitized_path'] in sanitized_files:
+ filename, file_extension = os.path.splitext(f['sanitized_path'])
+ f['sanitized_path'] = filename + f'.{str(uuid.uuid4())}' + file_extension
+ sanitized_files.append(f['sanitized_path'])
+ if 'diff' in f:
+ f['diff']['sanitized_path'] = mergin_secure_filename(f['diff']['path'])
+ if f['diff']['sanitized_path'] in sanitized_files:
+ filename, file_extension = os.path.splitext(f['diff']['sanitized_path'])
+ f['diff']['sanitized_path'] = filename + f'.{str(uuid.uuid4())}' + file_extension
+ changes_files.append(f['path'])
+ if len(set(changes_files)) != len(changes_files):
+ abort(400, 'Not unique changes')
+
+ # remove blacklisted files from changes
+ for key, change in changes.items():
+ files_to_upload = [f for f in change if f not in blacklisted_files]
+ changes[key] = files_to_upload
+
+ # Convert datetimes to UTC
+ for key in changes.keys():
+ for f in changes[key]:
+ f['mtime'] = datetime.utcnow()
+
+ num_version = int_version(version)
+
+ # Check user data limit
+ updates = [f['path'] for f in changes['updated']]
+ updated_files = list(filter(lambda i: i['path'] in updates, project.files))
+ additional_disk_usage = sum(file['size'] for file in changes['added'] + changes['updated']) - \
+ sum(file['size'] for file in updated_files) - sum(file['size'] for file in changes["removed"])
+ ns = Namespace.query.filter_by(name=project.namespace).first()
+ if ns.disk_usage() + additional_disk_usage > ns.storage:
+ abort(400, 'You have reached a data limit')
+
+ upload = Upload(project, num_version, changes, current_user.id)
+ db.session.add(upload)
+ try:
+ # Creating upload transaction with different project's version is possible.
+ db.session.commit()
+ except IntegrityError:
+ db.session.rollback()
+ # check and clean dangling uploads or abort
+ for current_upload in project.uploads.all():
+ upload_dir = os.path.join(project.storage.project_dir, 'tmp', current_upload.id)
+ upload_lockfile = os.path.join(upload_dir, 'lockfile')
+ if os.path.exists(upload_lockfile):
+ if time() - os.path.getmtime(upload_lockfile) < current_app.config['LOCKFILE_EXPIRATION']:
+ abort(400, 'Another process is running. Please try later.')
+ db.session.delete(current_upload)
+ db.session.commit()
+
+
+ # Try again after cleanup
+ db.session.add(upload)
+ try:
+ db.session.commit()
+ move_to_tmp(upload_dir)
+ except IntegrityError:
+ abort(422, 'Failed to create upload session. Please try later.')
+
+ # Create transaction folder and lockfile
+ folder = os.path.join(project.storage.project_dir, "tmp", upload.id)
+ os.makedirs(folder)
+ open(os.path.join(folder, 'lockfile'), 'w').close()
+
+ # Update immediately without uploading of new/modified files, and remove transaction/lockfile
+ if not(changes['added'] or changes['updated']):
+ next_version = "v{}".format(num_version + 1)
+ project.storage.apply_changes(changes, next_version, upload.id)
+ flag_modified(project, 'files')
+ project.disk_usage = sum(file['size'] for file in project.files)
+ user_agent = get_user_agent(request)
+ pv = ProjectVersion(project, next_version, current_user.username, changes, project.files, get_ip(request), user_agent)
+ project.latest_version = next_version
+ db.session.add(pv)
+ db.session.add(project)
+ db.session.delete(upload)
+ db.session.commit()
+ move_to_tmp(folder)
+ return jsonify(ProjectSchema().dump(project)), 200
+
+ return {'transaction': upload.id}
+
+
+@auth_required
+def chunk_upload(transaction_id, chunk_id):
+ """Upload file chunk as defined in upload transaction.
+
+ # noqa: E501
+
+ :param transaction_id: Transaction id.
+ :type transaction_id: str
+ :param chunk_id: Chunk id.
+ :type chunk_id: str
+
+ :rtype: None
+ """
+ upload, upload_dir = get_upload(transaction_id)
+ for f in upload.changes["added"] + upload.changes["updated"]:
+ if "chunks" in f and chunk_id in f["chunks"]:
+ dest = os.path.join(upload_dir, "chunks", chunk_id)
+ lockfile = os.path.join(upload_dir, "lockfile")
+ with Toucher(lockfile, 30):
+ try:
+ # we could have used request.data here but it could eventually cause OOM issue
+ save_to_file(request.stream, dest, current_app.config['MAX_CHUNK_SIZE'])
+ except IOError:
+ move_to_tmp(dest, transaction_id)
+ abort(400, "Too big chunk")
+ if os.path.exists(dest):
+ checksum = generate_checksum(dest)
+ size = os.path.getsize(dest)
+ return jsonify({
+ "checksum": checksum,
+ "size": size
+ }), 200
+ else:
+ abort(400, 'Upload was probably canceled')
+ abort(404)
+
+
+@auth_required
+def push_finish(transaction_id):
+ """Finalize project data upload.
+
+ Steps involved in finalization:
+ - merge chunks together (if there are some)
+ - do integrity check comparing uploaded file sizes with what was expected
+ - move uploaded files to new version dir and applying sync changes (e.g. geodiff apply_changeset)
+ - bump up version in database
+ - remove artifacts (chunks, lockfile) by moving them to tmp directory
+
+ # noqa: E501
+
+ :param transaction_id: Transaction id.
+ :type transaction_id: str
+
+ :rtype: None
+ """
+ upload, upload_dir = get_upload(transaction_id)
+ changes = upload.changes
+ upload_files = changes["added"] + changes["updated"]
+ project = upload.project
+ project_path = os.path.join(project.namespace, project.name)
+ corrupted_files = []
+
+ for f in upload_files:
+ if "diff" in f:
+ dest_file = os.path.join(
+ upload_dir, "files", get_path_from_files(
+ upload_files, f["diff"]["path"], is_diff=True))
+ expected_size = f["diff"]["size"]
+ else:
+ dest_file = os.path.join(
+ upload_dir, "files", get_path_from_files(upload_files, f["path"]))
+ expected_size = f["size"]
+ if "chunks" in f:
+ # Concatenate chunks into single file
+ # TODO we need to move this elsewhere since it can fail for large files (and slow FS)
+ os.makedirs(os.path.dirname(dest_file), exist_ok=True)
+ with open(dest_file, "wb") as dest:
+ try:
+ for chunk_id in f["chunks"]:
+ sleep(0) # to unblock greenlet
+ chunk_file = os.path.join(upload_dir, "chunks", chunk_id)
+ with open(chunk_file, "rb") as src:
+ data = src.read(8192)
+ while data:
+ dest.write(data)
+ data = src.read(8192)
+ except IOError:
+ logging.exception("Failed to process chunk: %s in project %s" % (chunk_id, project_path))
+ corrupted_files.append(f["path"])
+ continue
+
+ if expected_size != os.path.getsize(dest_file):
+ logging.error("Data integrity check has failed on file %s in project %s" % (f["path"], project_path), exc_info=True)
+ corrupted_files.append(f["path"])
+
+ if corrupted_files:
+ move_to_tmp(upload_dir)
+ abort(422, {"corrupted_files": corrupted_files})
+
+ next_version = "v{}".format(upload.version + 1)
+ files_dir = os.path.join(upload_dir, "files")
+ target_dir = os.path.join(project.storage.project_dir, next_version)
+ if os.path.exists(target_dir):
+ pv = project.versions[0] if project.versions else None
+ if pv and pv.name == next_version:
+ abort(409, {"There is already version with this name %s" % next_version})
+ logging.info("Upload transaction: Target directory already exists. Overwriting %s" % target_dir)
+ move_to_tmp(target_dir)
+
+ try:
+ # let's move uploaded files where they are expected to be
+ os.renames(files_dir, target_dir)
+ project.storage.apply_changes(changes, next_version, transaction_id)
+ flag_modified(project, "files")
+ project.disk_usage = sum(file['size'] for file in project.files)
+
+ user_agent = get_user_agent(request)
+ pv = ProjectVersion(project, next_version, current_user.username, changes, project.files, get_ip(request), user_agent)
+ project.latest_version = next_version
+ db.session.add(pv)
+ db.session.add(project)
+ db.session.delete(upload)
+ db.session.commit()
+ # remove artifacts
+ move_to_tmp(upload_dir, transaction_id)
+ except (psycopg2.Error, FileNotFoundError, DataSyncError) as err:
+ move_to_tmp(upload_dir)
+ abort(422, "Failed to create new version: {}".format(str(err)))
+
+ project.storage.optimize_storage()
+ return jsonify(ProjectSchema().dump(project)), 200
+
+
+@auth_required
+def push_cancel(transaction_id):
+ """Cancel upload transaction.
+
+ # noqa: E501
+
+ :param transaction_id: Transaction id.
+ :type transaction_id: str
+
+ :rtype: None
+ """
+ upload, upload_dir = get_upload(transaction_id)
+ db.session.delete(upload)
+ db.session.commit()
+ move_to_tmp(upload_dir)
+ return jsonify({"success": True}), 200
+
+
+@auth_required
+def clone_project(namespace, project_name, destination=None): # noqa: E501
+ """Clone project.
+
+ Clone project to another namespace. Only recent version is copied over and history is lost.
+ Destination namespace and project name are optionally set in query parameters
+ otherwise request user is used with the same project name as cloned project. # noqa: E501
+
+ :param namespace: Namespace for project to look into.
+ :type namespace: str
+ :param project_name: Project name.
+ :type project_name: str
+ :param destination: Destination (namespace and project name) where project should be cloned.
+ :type destination: dict | bytes
+
+ :rtype: None
+ """
+ cloned_project = require_project(namespace, project_name, ProjectPermissions.Read)
+ dest_ns = destination.get('namespace', current_user.username).strip()
+ dest_project = destination.get('project', cloned_project.name).strip()
+
+ check_access_to_namespace(dest_ns, current_user)
+
+ _project = Project.query.filter_by(name=dest_project, namespace=dest_ns).first()
+ if _project:
+ abort(409, "Project {}/{} already exists!".format(dest_ns, dest_project))
+
+ p = Project(
+ name=dest_project,
+ storage_params={"type": "local", "location": generate_location()},
+ creator=current_user,
+ namespace=dest_ns
+ )
+ p.updated = datetime.utcnow()
+ pa = ProjectAccess(p, public=False)
+
+ try:
+ p.storage.initialize(template_project=cloned_project)
+ except InitializationError as e:
+ abort(400, f"Failed to clone project: {str(e)}")
+
+ version = "v1" if p.files else "v0"
+ changes = {"added": p.files, "renamed": [], "updated": [], "removed": []}
+ user_agent = get_user_agent(request)
+ p.latest_version = version
+ version = ProjectVersion(p, version, current_user.username, changes, p.files, get_ip(request), user_agent)
+ p.versions.append(version)
+ db.session.add(p)
+ db.session.add(pa)
+ db.session.commit()
+ wm.emit_signal(SIG_NEW_PROJECT, request.path, msg=f'New project *{dest_ns}/{dest_project}* has been cloned')
+ return NoContent, 200
+
+
+def get_resource_history(project_name, namespace, path): # noqa: E501
+ """History of project resource (file)
+
+ Lookup in project versions to get history of changes for particular file # noqa: E501
+
+ :param project_name: Project name.
+ :type project_name: str
+ :param namespace: Namespace project belong to.
+ :type namespace: str
+ :param path: Path to file in project.
+ :type path: str
+
+ :rtype: FileInfo
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Read)
+ file = next((f for f in project.files if f['path'] == path), None)
+ if not file:
+ abort(404, path)
+
+ last_version = ProjectVersion.query.filter_by(project_id=project.id).order_by(
+ ProjectVersion.created.desc()).first_or_404()
+ file['history'] = project.file_history(file['path'], 'v1', last_version.name)
+ file_info = FileInfoSchema(context={'project_dir': project.storage.project_dir}).dump(file)
+ return file_info, 200
+
+
+def get_resource_changeset(project_name, namespace, version_id, path): # noqa: E501
+ """ Changeset of the resource (file)
+
+ Calculate geodiff changeset for particular file and particular project version # noqa: E501
+
+ :param project_name: Project name.
+ :type project_name: str
+ :param namespace: Namespace project belong to.
+ :type namespace: str
+ :param version_id: Version id of the file.
+ :type version_id: str
+ :param path: Path to file in project.
+ :type path: str
+
+ :rtype: [GeodiffChangeset]
+ """
+ project = require_project(namespace, project_name, ProjectPermissions.Read)
+ if not project:
+ abort(404, f"Project {namespace}/{project_name} not found")
+
+ version = ProjectVersion.query.filter_by(project_id=project.id, name=version_id).first()
+ if not version:
+ abort(404, f"Version {version_id} in project {namespace}/{project_name} not found")
+
+ file = next((f for f in version.files if f['location'] == os.path.join(version_id, path)), None)
+ if not file:
+ abort(404, f"File {path} not found")
+
+ if 'diff' not in file:
+ abort(404, "Diff not found")
+
+ changeset = os.path.join(version.project.storage.project_dir, file['diff']['location'])
+ json_file = os.path.join(version.project.storage.project_dir, file['location'] + '-diff-changeset')
+ if not os.path.exists(json_file):
+ try:
+ version.project.storage.geodiff.list_changes(changeset, json_file)
+ except GeoDiffLibError as e:
+ abort(422, f"Change set could not be calculated: {str(e)}")
+
+ with open(json_file, 'r') as jf:
+ content = json.load(jf)
+ if 'geodiff' not in content:
+ abort(422, "Expected format does not match response from Geodiff")
+
+ return content['geodiff'], 200
diff --git a/server/src/controllers/project_transfer_controller.py b/server/src/controllers/project_transfer_controller.py
new file mode 100644
index 00000000..fde405f0
--- /dev/null
+++ b/server/src/controllers/project_transfer_controller.py
@@ -0,0 +1,208 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from datetime import datetime, timedelta
+from connexion import NoContent, request
+from flask import abort, render_template, current_app
+from flask_login import current_user
+from sqlalchemy.orm.attributes import flag_modified
+from sqlalchemy.orm.session import make_transient
+from sqlalchemy import or_
+
+from .. import db
+from ..models.db_models import Project, ProjectTransfer, Namespace, Account, AccessRequest
+from ..models.schemas import ProjectTransferSchema
+
+from ..auth import auth_required
+from ..auth.models import User
+from ..permissions import require_project, ProjectPermissions, check_namespace_permissions
+from .. import wm, SIG_PROJECT_TRANSFERED
+from ..organisation.models import Organisation
+
+
+@auth_required
+def get_project_transfers(namespace): # noqa: E501
+ """List project transfers.
+
+ :rtype: List[ProjectTransfer]
+ """
+ if not (check_namespace_permissions(namespace, current_user, 'admin') or current_user.is_admin):
+ abort(403)
+ transfers = ProjectTransfer.query.filter(or_(
+ ProjectTransfer.to_ns_name == namespace,
+ ProjectTransfer.from_ns_name == namespace)
+ ).all()
+ data = ProjectTransferSchema(many=True).dump(transfers)
+ return data, 200
+
+
+@auth_required
+def request_transfer(namespace, project_name, data=None):
+ """ Request transfer project.
+
+ Request transfer project to another namespace.
+
+ :param namespace: Namespace for project to look into.
+ :type namespace: str
+ :param project_name: Project name.
+ :type project_name: str
+ :param data: Request payload - destination namespace.
+ :type data: dict | bytes
+ :rtype: None
+ """
+ from ..celery import send_email_async
+
+ project = require_project(namespace, project_name, ProjectPermissions.All)
+ dest_ns = data.get('namespace', None)
+ if not dest_ns:
+ abort(400, "Missing destination namespace")
+ to_ns = Namespace.query.filter_by(name=dest_ns).first_or_404(f"{dest_ns} namespace not found")
+ pt = ProjectTransfer.query.filter_by(project_id=project.id, from_ns_name=project.namespace).first()
+ if pt:
+ abort(409, f"The project {project.namespace}/{project.name} is already in a transfer process")
+ try:
+ transfer = ProjectTransfer(project, to_ns, current_user.id)
+ db.session.add(transfer)
+ db.session.commit()
+
+ if to_ns.account.type == "user":
+ user = User.query.get(to_ns.account.owner_id)
+ users = [user]
+ link = f"{request.url_root.rstrip('/')}/users/{user.username}/projects"
+ else:
+ org = Organisation.query.get(to_ns.account.owner_id)
+ users = User.query.filter(User.id.in_(org.admins)).all()
+ link = f"{request.url_root.rstrip('/')}/organisations/{org.name}/projects"
+ for user in users:
+ body = render_template(
+ 'email/project_transfer_request.html',
+ subject="Project transfer requested",
+ username=user.username,
+ project_name=project_name,
+ namescape_to=dest_ns,
+ namespace_from=namespace,
+ link=link,
+ expire=datetime.utcnow() + timedelta(seconds=current_app.config['TRANSFER_EXPIRATION'])
+ )
+ email_data = {
+ 'subject': 'Mergin project transfer request',
+ 'html': body,
+ 'recipients': [user.email],
+ 'sender': current_app.config['MAIL_DEFAULT_SENDER']
+ }
+ send_email_async.delay(**email_data)
+
+ return NoContent, 201
+ except ProjectTransfer.TransferError as e:
+ abort(400, str(e))
+
+
+@auth_required
+def delete_transfer_project(id):
+ """ Delete transfer project of Project Transfer data.
+
+ Delete transfer project on the project transfer data
+
+ :param id: project transfer id.
+ :type id: str
+
+ :rtype: None
+ """
+ project_transfer = ProjectTransfer.query.filter_by(id=id).first_or_404("Project transfer is not found")
+ if not check_namespace_permissions(project_transfer.from_ns_name, current_user, 'admin') and not check_namespace_permissions(project_transfer.to_ns_name, current_user, 'admin'):
+ abort(403, "You don't have access for transferring this project")
+ db.session.delete(project_transfer)
+ db.session.commit()
+ return NoContent, 200
+
+
+@auth_required
+def execute_transfer_project(id, data=None):
+ """ Execute transfer project of Project Transfer data.
+
+ Only project namespace/name is modified. Files are saved on disk independently on project owner, hence not touched.
+
+ :param id: project transfer id.
+ :type id: str
+ :param data: payload of post request
+ :type data: dict
+ :rtype: None
+ """
+ project_transfer = ProjectTransfer.query.filter_by(id=id).first_or_404("Project transfer not found")
+ if not check_namespace_permissions(project_transfer.to_ns_name, current_user, 'admin'):
+ abort(403, "You don't have access for transferring this project")
+
+ # we check if user use new project name
+ old_project_name = project_transfer.project.name
+ old_project_id = project_transfer.project.id
+ old_namespace = project_transfer.from_ns_name
+ new_project_name = data.get('name', project_transfer.project.name)
+ new_namespace = project_transfer.to_ns_name
+ transfer_permission = data.get('transfer_permissions', True)
+
+ # we validate if the project already exist in new namespace
+ if Project.query.filter_by(name=new_project_name, namespace=project_transfer.to_ns.name).first():
+ abort(409, f"Project {project_transfer.to_ns.name}/{new_project_name} already exists")
+
+ # check if there is ongoing upload
+ if project_transfer.project.uploads.first():
+ abort(400, f"There is ongoing upload for {project_transfer.from_ns_name}/{project_transfer.project.name}. "
+ f"Please try later")
+
+ # check if expired
+ if project_transfer.is_expired():
+ abort(400, "The request is already expired")
+
+ # check if new owner has enough disk space to host new project
+ new_ns = Namespace.query.filter_by(name=project_transfer.to_ns_name).first()
+ if new_ns.disk_usage() + project_transfer.project.disk_usage > new_ns.storage:
+ abort(400, "Disk quota reached")
+
+ new_owner = new_ns.account.owner()
+ if isinstance(new_owner, User):
+ new_owner_id = new_owner.id
+ elif isinstance(new_owner, Organisation):
+ owner_user = User.query.filter_by(id=new_owner.owners[0]).first()
+ if not owner_user:
+ abort(400, "Target organisation does not have an owner to accept transfer")
+ new_owner_id = owner_user.id
+ else:
+ assert False
+
+ # all checks passed - let's transfer it
+ # delete ongoing project access requests
+ AccessRequest.query.filter(AccessRequest.namespace == old_namespace, AccessRequest.project_id == old_project_id).delete()
+ db.session.commit()
+
+ # change namespace/name
+ project = project_transfer.project
+ project.name = new_project_name
+ project.namespace = project_transfer.to_ns.name
+
+ # we change creator id to the new owner, either new user or first owner of organisation
+ project.creator_id = new_owner_id
+
+ # clean permissions if new owner decided for it or just append new owner
+ if not transfer_permission:
+ project.access.owners = [new_owner_id]
+ project.access.readers = [new_owner_id]
+ project.access.writers = [new_owner_id]
+ else:
+ if new_owner_id not in project.access.owners:
+ project.access.owners.append(new_owner_id)
+ if new_owner_id not in project.access.readers:
+ project.access.readers.append(new_owner_id)
+ if new_owner_id not in project.access.writers:
+ project.access.writers.append(new_owner_id)
+
+ db.session.add(project)
+ flag_modified(project.access, "owners")
+ flag_modified(project.access, "writers")
+ flag_modified(project.access, "readers")
+ db.session.commit()
+
+ wm.emit_signal(
+ SIG_PROJECT_TRANSFERED,
+ request.path,
+ msg=f'Project *{old_namespace}/{old_project_name}* has been transferred to *{new_namespace}/{new_project_name}*')
+ return NoContent, 200
diff --git a/server/src/controllers/user_controller.py b/server/src/controllers/user_controller.py
new file mode 100644
index 00000000..fec7e729
--- /dev/null
+++ b/server/src/controllers/user_controller.py
@@ -0,0 +1,103 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+import pytz
+from datetime import datetime, timedelta
+from flask_login import current_user
+from flask import current_app, abort, request
+
+from .forms import ApiLoginForm
+from ..models.db_models import Namespace
+from ..auth import auth_required, authenticate
+from ..auth.bearer import encode_token
+from ..auth.models import User, LoginHistory
+from ..auth.schemas import UserProfileSchema
+
+
+def user_profile(user, return_all=True):
+ """ Return user profile in json format
+
+ Will return just some public profile if not return_all
+
+ :param user: User data that will be returned
+ :type user: User
+
+ :return: extended user profile with mergin service info
+ :rtype: dict
+ """
+ data = UserProfileSchema().dump(user.profile)
+ data['username'] = user.username
+ data['id'] = user.id
+
+ if return_all:
+ ns = Namespace.query.filter_by(name=user.username).first()
+ user_disk_space = ns.disk_usage()
+ data.update({
+ "email": user.email,
+ "disk_usage": user_disk_space,
+ "storage_limit": ns.storage,
+ "receive_notifications": user.profile.receive_notifications,
+ "verified_email": user.verified_email,
+ "tier": "free",
+ "registration_date": user.profile.registration_date
+ })
+ return data
+
+
+@auth_required
+def get_user(username=None): # noqa: E501
+ """ Return user profile """
+ return user_profile(current_user, return_all=True)
+
+
+def _extract_first_error(errors):
+ """
+ For now, if the response is plain string,
+ InputApp displays it in the nice
+ notification window. Extract first error
+ and just send that one to client
+ """
+ for key, value in errors.items():
+ val = str(value[0])
+ if key.lower() in val.lower():
+ # e.g. Passwords must contain special character.
+ # in this case we do not need to add key "password"
+ # since it is obvious from where it comes from
+ return val
+ elif val.startswith("Field"):
+ # e.g. Field must be longer than 4 characters
+ # In this case the generic validator use Field
+ # replace with the key (e.g. "Username")
+ return val.replace("Field", key.capitalize())
+ else:
+ # show both key and value
+ return val + "(" + key + ")"
+
+ return "Unknown error in input fields"
+
+
+def login(): # noqa: E501
+ form = ApiLoginForm()
+ if form.validate():
+ user = authenticate(form.login.data, form.password.data)
+ if user and user.active:
+ expire = datetime.now(pytz.utc) + timedelta(seconds=current_app.config['BEARER_TOKEN_EXPIRATION'])
+ token_data = {
+ "user_id": user.id,
+ "username": user.username,
+ "email": user.email,
+ "expire": str(expire)
+ }
+ token = encode_token(current_app.config['SECRET_KEY'], token_data)
+
+ data = user_profile(user)
+ data["session"] = {
+ "token": token,
+ "expire": expire
+ }
+ LoginHistory.add_record(user.username, request)
+ return data
+ elif not user:
+ abort(401, 'Invalid username or password')
+ elif not user.active:
+ abort(401, 'Account is not activated')
+ abort(400, _extract_first_error(form.errors))
diff --git a/server/src/db_events.py b/server/src/db_events.py
new file mode 100644
index 00000000..6a30ba96
--- /dev/null
+++ b/server/src/db_events.py
@@ -0,0 +1,191 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+import os
+from flask import render_template, current_app, abort
+from sqlalchemy import event
+
+from . import db
+from .auth.models import User, UserProfile
+from .models.db_models import Namespace, Project, ProjectAccess, Account, RemovedProject
+from .organisation import Organisation, OrganisationInvitation
+from .celery import send_email_async
+from .storages.disk import move_to_tmp
+
+
+def before_namespace_delete(mapper, connection, namespace): # pylint: disable=W0612
+ """ Remove namespace projects including project files on disk. Also remove project backups for restore """
+ projects = Project.query.filter_by(namespace=namespace.name).all()
+ for project in projects:
+ if os.path.exists(project.storage.project_dir):
+ project.storage.delete()
+
+ removed_projects = RemovedProject.query.filter_by(namespace=namespace.name).all()
+ rp_table = RemovedProject.__table__
+ for rp in removed_projects:
+ rp_dir = os.path.abspath(os.path.join(current_app.config['LOCAL_PROJECTS'], rp.properties["storage_params"]["location"]))
+ if os.path.exists(rp_dir):
+ move_to_tmp(rp_dir)
+ connection.execute(removed_projects.delete().where(rp_table.c.id == rp.id))
+
+
+def add_user_namespace(mapper, connection, user): # pylint: disable=W0612
+ ns = Namespace.query.filter_by(name=user.username).first()
+ if ns:
+ abort(400, "Namespace already exists")
+ account_table = Account.__table__
+ connection.execute(account_table.insert().values(owner_id=user.id, type="user"))
+ account = Account.query.filter_by(type='user', owner_id=user.id).first()
+ ns_table = Namespace.__table__
+ connection.execute(ns_table.insert().values(name=user.username, account_id=account.id))
+ # emit signal that account has been created
+ account.created(connection)
+
+
+def remove_user_references(mapper, connection, user): # pylint: disable=W0612
+ q = Project.access.has(ProjectAccess.owners.contains([user.id])) \
+ | Project.access.has(ProjectAccess.readers.contains([user.id])) \
+ | Project.access.has(ProjectAccess.readers.contains([user.id]))
+ projects = Project.query.filter(q).all()
+
+ def filter_user(ids):
+ return filter(lambda i: i != user.id, ids)
+
+ if projects:
+ pa_table = ProjectAccess.__table__
+ for p in projects:
+ pa = p.access
+ connection.execute(
+ pa_table.update().where(pa_table.c.project_id == p.id),
+ owners=filter_user(pa.owners),
+ writers=filter_user(pa.writers),
+ readers=filter_user(pa.readers)
+ )
+
+ # remove pending invitations for user
+ inv_table = OrganisationInvitation.__table__
+ connection.execute(inv_table.delete().where(inv_table.c.username == user.username))
+
+ # remove from organisations
+ q = Organisation.owners.contains([user.id]) \
+ | Organisation.readers.contains([user.id]) \
+ | Organisation.admins.contains([user.id]) \
+ | Organisation.writers.contains([user.id])
+ organisations = Organisation.query.filter(q).all()
+
+ if organisations:
+ o_table = Organisation.__table__
+ for o in organisations:
+ # in case of user is the only owner, remove also whole organisation
+ if o.owners == [user.id]:
+ connection.execute(inv_table.delete().where(inv_table.c.org_name == o.name))
+ connection.execute(o_table.delete().where(o_table.c.name == o.name))
+
+ connection.execute(
+ o_table.update().where(o_table.c.name == o.name),
+ owners=filter_user(o.owners),
+ writers=filter_user(o.writers),
+ readers=filter_user(o.readers),
+ admins=filter_user(o.admins)
+ )
+
+
+def project_post_delete_actions(mapper, connection, project): # pylint: disable=W0612
+ """
+ After project is deleted inform users by sending email.
+
+ :param project: Project object
+ """
+ if not project.access:
+ return
+
+ users_ids = list(set(project.access.owners + project.access.writers + project.access.readers))
+ users_profiles = UserProfile.query.filter(UserProfile.user_id.in_(users_ids)).all()
+ for profile in users_profiles:
+ if not (profile.receive_notifications and profile.user.verified_email):
+ continue
+
+ email_data = {
+ 'subject': f'Mergin project {"/".join([project.namespace, project.name])} has been deleted',
+ 'html': render_template('email/removed_project.html', subject="Project deleted", project=project, username=profile.user.username),
+ 'recipients': [profile.user.email],
+ 'sender': current_app.config['MAIL_DEFAULT_SENDER']
+ }
+ send_email_async.delay(**email_data)
+
+
+def check(session):
+ if os.path.isfile(current_app.config['MAINTENANCE_FILE']):
+ abort(503, "Service unavailable due to maintenance, please try later")
+
+
+def before_user_profile_updated(mapper, connection, target):
+ """
+ Before profile updated, inform user by sending email about that profile that changed
+ Just send email if user want to receive notifications
+ """
+ if target.receive_notifications and target.user.verified_email:
+ state = db.inspect(target)
+ changes = {}
+
+ for attr in state.attrs:
+ hist = attr.load_history()
+ if not hist.has_changes():
+ continue
+
+ before = hist.deleted[0]
+ after = hist.added[0]
+ field = attr.key
+
+ # if boolean, show Yes or No
+ if before is not None and isinstance(before, bool):
+ before = 'Yes' if before is True else 'No'
+ if after is not None and isinstance(after, bool):
+ after = 'Yes' if after is True else 'No'
+
+ profile_key = field.title().replace('_', ' ')
+ changes[profile_key] = {
+ 'before': before,
+ 'after': after
+ }
+
+ # inform user
+ if changes:
+ email_data = {
+ 'subject': 'Profile has been changed',
+ 'html': render_template('email/profile_changed.html', subject="Profile update", user=target.user, changes=changes),
+ 'recipients': [target.user.email],
+ 'sender': current_app.config['MAIL_DEFAULT_SENDER']
+ }
+ send_email_async.delay(**email_data)
+
+
+def add_org_namespace(mapper, connection, organisation): # pylint: disable=W0612
+ ns = Namespace.query.filter_by(name=organisation.name).first()
+ if ns:
+ abort(400, "Namespace already exists")
+ account_table = Account.__table__
+ connection.execute(account_table.insert().values(owner_id=organisation.id, type="organisation"))
+ account = Account.query.filter_by(type='organisation', owner_id=organisation.id).first()
+ ns_table = Namespace.__table__
+ connection.execute(ns_table.insert().values(name=organisation.name, account_id=account.id))
+ account.created(connection)
+
+
+def register_events():
+ event.listen(User, "after_insert", add_user_namespace)
+ event.listen(User, "before_delete", remove_user_references)
+ event.listen(Project, "after_delete", project_post_delete_actions)
+ event.listen(db.session, 'before_commit', check)
+ event.listen(UserProfile, 'after_update', before_user_profile_updated)
+ event.listen(Namespace, "before_delete", before_namespace_delete)
+ event.listen(Organisation, "after_insert", add_org_namespace)
+
+
+def remove_events():
+ event.remove(User, "after_insert", add_user_namespace)
+ event.remove(User, "before_delete", remove_user_references)
+ event.remove(Project, "after_delete", project_post_delete_actions)
+ event.remove(db.session, 'before_commit', check)
+ event.remove(UserProfile, 'after_update', before_user_profile_updated)
+ event.remove(Namespace, "before_delete", before_namespace_delete)
+ event.remove(Organisation, "after_insert", add_org_namespace)
diff --git a/server/src/encoder.py b/server/src/encoder.py
new file mode 100644
index 00000000..67d3c698
--- /dev/null
+++ b/server/src/encoder.py
@@ -0,0 +1,20 @@
+from connexion.apps.flask_app import FlaskJSONEncoder
+import six
+
+from .models.base_model_ import Model
+
+
+class JSONEncoder(FlaskJSONEncoder):
+ include_nulls = False
+
+ def default(self, o):
+ if isinstance(o, Model):
+ dikt = {}
+ for attr, _ in six.iteritems(o.swagger_types):
+ value = getattr(o, attr)
+ if value is None and not self.include_nulls:
+ continue
+ attr = o.attribute_map[attr]
+ dikt[attr] = value
+ return dikt
+ return FlaskJSONEncoder.default(self, o)
diff --git a/server/src/forms.py b/server/src/forms.py
new file mode 100644
index 00000000..e479a8ee
--- /dev/null
+++ b/server/src/forms.py
@@ -0,0 +1,31 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from wtforms import StringField, Field, SelectField
+from wtforms.validators import DataRequired, Length, ValidationError
+from flask_wtf import FlaskForm
+from .util import is_name_allowed
+
+
+def namespace_validation(form, field):
+ if field.data and (not is_name_allowed(field.data) or '@' in field.data):
+ raise ValidationError("Please use only alphanumeric or these -._~()'!*:,; characters in {}.".format(field.name))
+
+
+class IntegerListField(Field):
+ def _value(self):
+ return self.data
+
+ def process_formdata(self, valuelist):
+ self.data = valuelist
+
+
+class SendEmailForm(FlaskForm):
+ users = IntegerListField() # FieldList(IntegerField()) was not working
+ subject = StringField(validators=[DataRequired(), Length(max=50)])
+ message = StringField(validators=[DataRequired()])
+
+
+class AccessPermissionForm(FlaskForm):
+ permissions = SelectField("permissions", [DataRequired()], choices=[
+ ('read', 'read'), ('write', 'write'), ('owner', 'owner')])
diff --git a/server/src/mergin_utils.py b/server/src/mergin_utils.py
new file mode 100644
index 00000000..5d6b1d08
--- /dev/null
+++ b/server/src/mergin_utils.py
@@ -0,0 +1,166 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+import os
+import hashlib
+import re
+import secrets
+from threading import Timer
+from uuid import UUID
+
+from gevent import sleep
+
+
+def generate_checksum(file, chunk_size=4096):
+ """
+ Generate checksum for file from chunks.
+
+ :param file: file to calculate checksum
+ :param chunk_size: size of chunk
+ :return: sha1 checksum
+ """
+ checksum = hashlib.sha1()
+ with open(file, 'rb') as f:
+ while True:
+ chunk = f.read(chunk_size)
+ sleep(0) # to unblock greenlet
+ if not chunk:
+ return checksum.hexdigest()
+ checksum.update(chunk)
+
+
+class Toucher:
+ """
+ Helper class to periodically update modification time of file during
+ execution of longer lasting task.
+
+ Example of usage:
+ -----------------
+ with Toucher(file, interval):
+ do_something_slow
+
+ """
+ def __init__(self, lockfile, interval):
+ self.lockfile = lockfile
+ self.interval = interval
+ self.running = False
+ self.timer = None
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, type, value, tb): # pylint: disable=W0612,W0622
+ self.release()
+
+ def release(self):
+ self.running = False
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+
+ def acquire(self):
+ self.running = True
+ self.touch_lockfile()
+
+ def touch_lockfile(self):
+ # do an NFS ACCESS procedure request to clear the attribute cache (for various pods to actually see the file)
+ # https://docs.aws.amazon.com/efs/latest/ug/troubleshooting-efs-general.html#custom-nfs-settings-write-delays
+ os.access(self.lockfile, os.W_OK)
+ with open(self.lockfile, 'a'):
+ os.utime(self.lockfile, None)
+ if self.running:
+ self.timer = Timer(self.interval, self.touch_lockfile)
+ self.timer.start()
+
+
+def resolve_tags(files):
+ def _is_qgis(filename):
+ _, ext = os.path.splitext(filename)
+ return ext in ['.qgs', '.qgz']
+
+ tags = []
+ qgis_count = 0
+ for f in files:
+ if _is_qgis(f['path']):
+ qgis_count += 1
+ #TODO add some rules for intput validity and mappin validity
+ if qgis_count == 1:
+ tags.extend(['valid_qgis', 'input_use'])
+ return tags
+
+
+def int_version(version):
+ """ Convert v format of version to integer representation. """
+ return int(version.lstrip('v')) if re.match(r'v\d', version) else None
+
+
+def is_versioned_file(file):
+ """ Check if file is compatible with geodiff lib and hence suitable for versioning. """
+ diff_extensions = ['.gpkg', '.sqlite']
+ f_extension = os.path.splitext(file)[1]
+ return f_extension in diff_extensions
+
+
+def is_file_name_blacklisted(path, blacklist):
+ blacklisted_dirs = get_blacklisted_dirs(blacklist)
+ blacklisted_files = get_blacklisted_files(blacklist)
+ if blacklisted_dirs:
+ regexp_dirs = re.compile(r'({})'.format('|'.join(".*" + re.escape(x) + ".*" for x in blacklisted_dirs)))
+ if regexp_dirs.search(os.path.dirname(path)):
+ return True
+ if blacklisted_files:
+ regexp_files = re.compile(r'({})'.format('|'.join(".*" + re.escape(x) + ".*" for x in blacklisted_files)))
+ if regexp_files.search(os.path.basename(path)):
+ return True
+
+ return False
+
+
+def get_blacklisted_dirs(blacklist):
+ return [p.replace("/", "") for p in blacklist if p.endswith("/")]
+
+
+def get_blacklisted_files(blacklist):
+ return [p for p in blacklist if not p.endswith("/")]
+
+
+def get_user_agent(request):
+ """ Return user agent from request headers
+
+ In case of browser client a parsed version from werkzeug utils is returned else raw value of header.
+ """
+ if request.user_agent.browser and request.user_agent.platform:
+ client = request.user_agent.browser.capitalize()
+ version = request.user_agent.version
+ system = request.user_agent.platform.capitalize()
+ return f"{client}/{version} ({system})"
+ else:
+ return request.user_agent.string
+
+
+def get_ip(request):
+ """ Returns request's IP address based on X_FORWARDED_FOR header
+ from proxy webserver (which should always be the case)
+ """
+ forwarded_ips = request.environ.get('HTTP_X_FORWARDED_FOR', request.environ.get('REMOTE_ADDR', 'untrackable'))
+ # seems like we get list of IP addresses from AWS infra (beginning with external IP address of client, followed by some internal IP)
+ ip = forwarded_ips.split(",")[0]
+ return ip
+
+
+def generate_location():
+ """ Return random location where project is saved on disk
+
+ Example:
+ >>> generate_location()
+ '1c/624c6af4d6d2710bbfe1c128e8ca267b'
+ """
+ return os.path.join(secrets.token_hex(1), secrets.token_hex(16))
+
+
+def is_valid_uuid(uuid):
+ """ Check object can be parse as valid UUID """
+ try:
+ UUID(uuid)
+ return True
+ except (ValueError, AttributeError):
+ return False
diff --git a/server/src/models/__init__.py b/server/src/models/__init__.py
new file mode 100644
index 00000000..9bba2f8f
--- /dev/null
+++ b/server/src/models/__init__.py
@@ -0,0 +1,4 @@
+# coding: utf-8
+
+# flake8: noqa
+from __future__ import absolute_import
diff --git a/server/src/models/base_model_.py b/server/src/models/base_model_.py
new file mode 100644
index 00000000..a61cf285
--- /dev/null
+++ b/server/src/models/base_model_.py
@@ -0,0 +1,68 @@
+import pprint
+import typing
+import six
+
+from .. import util
+
+T = typing.TypeVar('T')
+
+
+class Model(object):
+ # swaggerTypes: The key is attribute name and the
+ # value is attribute type.
+ swagger_types = {}
+
+ # attributeMap: The key is attribute name and the
+ # value is json key in definition.
+ attribute_map = {}
+
+ @classmethod
+ def from_dict(cls: typing.Type[T], dikt) -> T:
+ """Returns the dict as a model"""
+ return util.deserialize_model(dikt, cls)
+
+ def to_dict(self):
+ """Returns the model properties as a dict
+
+ :rtype: dict
+ """
+ result = {}
+
+ for attr, _ in six.iteritems(self.swagger_types):
+ value = getattr(self, attr)
+ if isinstance(value, list):
+ result[attr] = list(map(
+ lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
+ value
+ ))
+ elif hasattr(value, "to_dict"):
+ result[attr] = value.to_dict()
+ elif isinstance(value, dict):
+ result[attr] = dict(map(
+ lambda item: (item[0], item[1].to_dict())
+ if hasattr(item[1], "to_dict") else item,
+ value.items()
+ ))
+ else:
+ result[attr] = value
+
+ return result
+
+ def to_str(self):
+ """Returns the string representation of the model
+
+ :rtype: str
+ """
+ return pprint.pformat(self.to_dict())
+
+ def __repr__(self):
+ """For `print` and `pprint`"""
+ return self.to_str()
+
+ def __eq__(self, other):
+ """Returns true if both objects are equal"""
+ return self.__dict__ == other.__dict__
+
+ def __ne__(self, other):
+ """Returns true if both objects are not equal"""
+ return not self == other
diff --git a/server/src/models/db_models.py b/server/src/models/db_models.py
new file mode 100644
index 00000000..6d890548
--- /dev/null
+++ b/server/src/models/db_models.py
@@ -0,0 +1,465 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+import json
+import os
+import uuid
+from datetime import datetime, timedelta
+from blinker import signal
+from sqlalchemy.dialects.postgresql import ARRAY, BIGINT, ENUM, UUID
+from sqlalchemy.types import String
+from collections import OrderedDict
+from pygeodiff.geodifflib import GeoDiffLibError
+
+from .. import current_app, db
+from ..storages import DiskStorage
+from ..auth.models import User # pylint: disable=W0611
+from ..mergin_utils import int_version, is_versioned_file
+
+Storages = {
+ "local": DiskStorage
+}
+
+account_created = signal('account_created')
+account_inactivated = signal('account_inactivated')
+
+
+class Project(db.Model):
+ id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
+ name = db.Column(db.String, index=True)
+ storage_params = db.Column(db.JSON)
+ created = db.Column(db.DateTime, default=datetime.utcnow, index=True)
+ creator_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=True, index=True)
+ updated = db.Column(db.DateTime, onupdate=datetime.utcnow)
+ # metadata for project files (see also FileInfoSchema)
+ files = db.Column(db.JSON, default=[])
+ tags = db.Column(ARRAY(String), server_default="{}")
+ disk_usage = db.Column(BIGINT, nullable=False, default=0)
+ latest_version = db.Column(db.String, index=True)
+
+ creator = db.relationship("User", uselist=False, backref=db.backref("projects", cascade="all,delete"))
+ namespace = db.Column(db.String, db.ForeignKey("namespace.name", ondelete="CASCADE"), index=True)
+ __table_args__ = (db.UniqueConstraint('name', 'namespace'),)
+
+ def __init__(self, name, storage_params, creator, namespace, **kwargs): # pylint: disable=W0613
+ self.name = name
+ self.storage_params = storage_params
+ self.creator = creator
+ self.namespace = namespace
+ self.latest_version = "v0"
+
+ @property
+ def storage(self):
+ if not hasattr(self, '_storage'): # best approach, seriously
+ StorageBackend = Storages[self.storage_params['type']]
+ self._storage = StorageBackend(self) # pylint: disable=W0201
+ return self._storage
+
+ def file_history(self, file, since, to):
+ """
+ Look up in project versions for history of versioned file.
+ Returns ordered (from latest) dict with versions where some change happened and corresponding metadata.
+
+ :Example:
+
+ >>> self.file_history('mergin/base.gpkg', 'v1', 'v2')
+ {'v2': {'checksum': '08b0e8caddafe74bf5c11a45f65cedf974210fed', 'location': 'v2/base.gpkg', 'path': 'base.gpkg',
+ 'size': 2793, 'change': 'updated'}, 'v1': {checksum': '89469a6482267de394c7c7270cb7ffafe694ea76',
+ 'location': 'v1/base.gpkg', 'mtime': '2019-07-18T07:52:38.770113Z', 'path': 'base.gpkg', 'size': 98304,
+ 'change': 'added'}}
+
+ :param file: file path
+ :type file: str
+ :param since: start version for history (e.g. v1)
+ :type since: str
+ :param to: end version for history (e.g. v2)
+ :type to: str
+ :returns: changes metadata for versions where some file change happened
+ :rtype: dict
+ """
+ since = int_version(since)
+ to = int_version(to)
+ if not (is_versioned_file(file) and since is not None and to is not None):
+ return {}
+
+ history = OrderedDict()
+ versions = sorted(self.versions, key=lambda v: int_version(v.name))
+ # version v0 was added as initial version later and some older projects may not have it
+ if versions[0].name == "v0":
+ to = to + 1
+ since = since + 1
+
+ for version in reversed(versions[since-1:to]):
+ f_change = version.find_file_change(file)
+ if not f_change:
+ continue
+ # make sure we find with correct filename next time
+ if f_change['change'] == 'renamed':
+ file = f_change['path']
+ history[version.name] = f_change
+ # end of file history
+ if f_change['change'] in ['added', 'removed']:
+ break
+
+ return history
+
+
+class ProjectAccess(db.Model):
+ project_id = db.Column(UUID(as_uuid=True), db.ForeignKey("project.id", ondelete="CASCADE"), primary_key=True, index=True)
+ public = db.Column(db.Boolean, default=False, index=True)
+ owners = db.Column(ARRAY(db.Integer), server_default="{}")
+ readers = db.Column(ARRAY(db.Integer), server_default="{}")
+ writers = db.Column(ARRAY(db.Integer), server_default="{}")
+
+ project = db.relationship("Project",
+ uselist=False,
+ backref=db.backref("access", single_parent=True, uselist=False, cascade="all,delete", lazy='joined'))
+
+ __table_args__ = (db.Index('ix_project_access_owners', owners, postgresql_using="gin"),
+ db.Index('ix_project_access_readers', readers, postgresql_using="gin"),
+ db.Index('ix_project_access_writers', writers, postgresql_using="gin"),)
+
+ def __init__(self, project, public=False):
+ self.project = project
+ self.owners = [project.creator.id]
+ self.writers = [project.creator.id]
+ self.readers = [project.creator.id]
+ self.project_id = project.id
+ self.public = public
+
+
+class ProjectVersion(db.Model):
+ id = db.Column(db.Integer, primary_key=True, autoincrement=True)
+ name = db.Column(db.String, index=True)
+ project_id = db.Column(UUID(as_uuid=True), db.ForeignKey("project.id", ondelete="CASCADE"), index=True)
+ created = db.Column(db.DateTime, default=datetime.utcnow, index=True)
+ author = db.Column(db.String, index=True)
+ # metadata with files changes
+ # {"added": [{"checksum": "c9a4fd2afd513a97aba19d450396a4c9df8b2ba4", "path": "test.qgs", "size": 31980}],
+ # "removed": [], "renamed": [], "updated": []}
+ changes = db.Column(db.JSON)
+ # metadata (see also FileInfoSchema) for files in actual version
+ files = db.Column(db.JSON)
+ user_agent = db.Column(db.String, index=True)
+ ip_address = db.Column(db.String, index=True)
+ ip_geolocation_country = db.Column(db.String, index=True) # geolocation country derived from IP (with celery job)
+ project_size = db.Column(BIGINT, nullable=False, default=0, index=True) # size of project at current version (incl. files from older versions)
+
+ project = db.relationship(
+ "Project",
+ uselist=False,
+ backref=db.backref("versions", single_parent=True, lazy='subquery', cascade="all,delete", order_by="desc(ProjectVersion.created)")
+ )
+
+ def __init__(self, project, name, author, changes, files, ip, user_agent=None):
+ self.project_id = project.id
+ self.name = name
+ self.author = author
+ self.changes = changes
+ self.files = files
+ self.user_agent = user_agent
+ self.ip_address = ip
+ self.project_size = sum(f["size"] for f in self.files) if self.files else 0
+
+ def find_file_change(self, file):
+ """
+ Browse version changes and return requested file change metadata (if any). Append type of change.
+
+ :Example:
+
+ >>> self.find_file_change('data/test.gpkg')
+ {'checksum': '89469a6482267de394c7c7270cb7ffafe694ea76', 'location': 'v1/data/test.gpkg',
+ 'mtime': '2019-07-18T07:52:38.770113Z', 'path': 'base.gpkg', 'size': 98304, 'change': 'added'}
+
+ :param file: file path
+ :type file: str
+ :returns: change metadata
+ :rtype: dict
+ """
+ for k, v in self.changes.items():
+ match_key = 'new_path' if k == 'renamed' else 'path'
+ changed_item = next((item for item in v if item.get(match_key) == file), None)
+ if changed_item:
+ changed_item['change'] = k
+ changed_item['location'] = next((f['location'] for f in self.files if f['path'] == changed_item[match_key]), None)
+ # append location of diff file
+ if 'diff' in changed_item:
+ changed_item['diff']['location'] = next(
+ (f['diff']['location'] for f in self.files if f['path'] == changed_item[match_key]), None)
+ return changed_item
+
+ def diff_summary(self):
+ """ Calculate diff summary for versioned files updated with geodiff
+
+ :Example:
+
+ >>> self.diff_summary()
+ {
+ 'base.gpkg': {
+ 'summary': [
+ {'table': 'gpkg_contents', 'insert': 0, 'update': 1, 'delete': 0},
+ {'table': 'simple', 'insert': 2, 'update': 0, 'delete': 0}
+ ],
+ 'size': 278
+ },
+ 'fail.gpkg': {
+ 'error': 'some geodiff error',
+ 'size': 278
+ }
+ }
+
+ :return: diffs' summaries for all updated files
+ :rtype: dict
+ """
+ output = {}
+ for f in self.changes["updated"]:
+ if 'diff' not in f:
+ continue
+ json_file = os.path.join(self.project.storage.project_dir, f['location'] + '-diff-summary')
+ changeset = os.path.join(self.project.storage.project_dir, f['diff']['location'])
+ if not os.path.exists(json_file):
+ try:
+ self.project.storage.geodiff.list_changes_summary(changeset, json_file)
+ except GeoDiffLibError as e:
+ output[f['path']] = {
+ "error": str(e),
+ "size": f['diff']['size']
+ }
+ continue
+
+ with open(json_file, 'r') as jf:
+ content = json.load(jf)
+ if 'geodiff_summary' not in content:
+ continue
+
+ output[f['path']] = {
+ "summary": content["geodiff_summary"],
+ "size": f['diff']['size']
+ }
+
+ return output
+
+
+class Namespace(db.Model):
+ name = db.Column(db.String, primary_key=True)
+ account_id = db.Column(db.Integer, db.ForeignKey("account.id", ondelete="CASCADE"))
+ storage = db.Column(BIGINT, nullable=False, default=os.environ.get('DEFAULT_STORAGE_SIZE', 100 * 1024 * 1024))
+
+ account = db.relationship("Account", uselist=False, backref=db.backref("namespace", single_parent=True, uselist=False, cascade="all,delete"))
+
+ def __init__(self, name, account_id):
+ self.name = name
+ self.account_id = account_id
+
+ def projects(self):
+ return Project.query.filter_by(namespace=self.name).all()
+
+ def owner(self):
+ self.account.owner()
+
+ def disk_usage(self):
+ return sum(p.disk_usage for p in self.projects())
+
+
+class Upload(db.Model):
+ id = db.Column(db.String, primary_key=True)
+ project_id = db.Column(UUID(as_uuid=True), db.ForeignKey("project.id", ondelete="CASCADE"), index=True)
+ version = db.Column(db.Integer, index=True)
+ changes = db.Column(db.JSON)
+ user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"), nullable=True)
+ created = db.Column(db.DateTime, default=datetime.utcnow)
+
+ user = db.relationship("User")
+ project = db.relationship(
+ "Project",
+ uselist=False,
+ backref=db.backref("uploads", single_parent=True, lazy='dynamic', cascade="all,delete")
+ )
+ __table_args__ = (
+ db.UniqueConstraint('project_id', 'version'),
+ )
+
+ def __init__(self, project, version, changes, user_id):
+ self.id = str(uuid.uuid4())
+ self.project_id = project.id
+ self.version = version
+ self.changes = changes
+ self.user_id = user_id
+
+
+class ProjectTransfer(db.Model):
+ id = db.Column(db.String, primary_key=True)
+ project_id = db.Column(UUID(as_uuid=True), db.ForeignKey("project.id", ondelete="CASCADE"), index=True)
+ from_ns_name = db.Column(db.String, nullable=False, index=True) # cached value for easier lookups
+ to_ns_name = db.Column(db.String, db.ForeignKey("namespace.name", ondelete="CASCADE"), nullable=False, index=True)
+ requested_by = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"), nullable=True)
+ expire = db.Column(db.DateTime)
+
+ project = db.relationship(
+ "Project",
+ uselist=False,
+ backref=db.backref("transfers", single_parent=True, lazy='dynamic', cascade="all,delete")
+ )
+ to_ns = db.relationship(
+ "Namespace",
+ backref=db.backref("transfers", single_parent=True, lazy='dynamic', cascade="all,delete")
+ )
+ user = db.relationship("User")
+
+ __table_args__ = (db.UniqueConstraint('project_id'),)
+
+ class TransferError(Exception):
+ def __init__(self, reason=None):
+ error = 'Project transfer failed'
+ if reason:
+ error = '{} : {}'.format(error, reason)
+ self.errors = error
+
+ def __init__(self, project, to_namespace, requested_by):
+ """ Initiate project transfer to different namespace
+
+ :param project: project to be transferred
+ :type project: Project
+
+ :param to_namespace: the namespace for project to be transferred
+ :type to_namespace: Namespace
+
+ :param requested_by: requested by
+ :type requested_by: User.id
+ """
+ self.id = str(uuid.uuid4())
+ self.project_id = project.id
+ self.from_ns_name = project.namespace
+ self.to_ns_name = to_namespace.name
+ self.requested_by = requested_by
+ self.expire = datetime.utcnow() + timedelta(seconds=current_app.config['TRANSFER_EXPIRATION'])
+
+ if to_namespace.name == project.namespace:
+ raise self.TransferError('origin and destination namespaces are the same')
+
+ def is_expired(self):
+ """ Check if transfer request is expired
+ :rtype: bool
+ """
+ return datetime.utcnow() > self.expire
+
+
+class Account(db.Model):
+ """ Reference class to claim service ownership either by user or organisation """
+ id = db.Column(db.Integer, primary_key=True, autoincrement=True)
+ type = db.Column(ENUM("user", "organisation", name="account_type"), nullable=False, index=True)
+ owner_id = db.Column(db.Integer, nullable=False, index=True)
+
+ def __init__(self, type, owner_id):
+ self.type = type
+ self.owner_id = owner_id
+
+ def owner(self):
+ from ..organisation.models import Organisation
+
+ if self.type == 'organisation':
+ return Organisation.query.get(self.owner_id)
+ elif self.type == 'user':
+ return User.query.get(self.owner_id)
+ else:
+ return
+
+ def can_edit(self, user_id):
+ from ..organisation.models import Organisation
+ owner = self.owner()
+ if isinstance(owner, User):
+ return owner.id == user_id
+ elif isinstance(owner, Organisation):
+ return user_id in owner.owners
+ else:
+ return False
+
+ def email(self):
+ from ..organisation.models import Organisation
+ owner = self.owner()
+
+ if isinstance(owner, User):
+ return owner.email
+ elif isinstance(owner, Organisation):
+ owner_id = owner.owners[0]
+ user = User.query.get(owner_id)
+ return user.email
+ else:
+ return ''
+
+ def name(self):
+ from ..organisation.models import Organisation
+
+ owner = self.owner()
+ if isinstance(owner, User):
+ return owner.username
+ elif isinstance(owner, Organisation):
+ return owner.name
+ else:
+ return ''
+
+ def created(self, connection=None):
+ """ Emit blinker.signal event that account has been created """
+ account_created.send(self, connection=connection)
+
+ def inactivated(self, action):
+ account_inactivated.send(self, action=action)
+
+
+class AccessRequest(db.Model):
+ id = db.Column(db.Integer, primary_key=True, autoincrement=True)
+ user_id = db.Column(db.Integer, db.ForeignKey("user.id", ondelete="CASCADE"), index=True)
+ project_id = db.Column(UUID(as_uuid=True), db.ForeignKey("project.id", ondelete="CASCADE"), index=True)
+ namespace = db.Column(db.String, nullable=False, index=True) # cached value for easier lookups
+ expire = db.Column(db.DateTime)
+
+ user = db.relationship("User", uselist=False)
+
+ project = db.relationship(
+ "Project",
+ uselist=False,
+ backref=db.backref("access_requests", single_parent=True, cascade="all,delete")
+ )
+
+ def __init__(self, project, user_id):
+ self.project_id = project.id
+ self.namespace = project.namespace
+ self.user_id = user_id
+ self.expire = datetime.utcnow() + timedelta(seconds=current_app.config['PROJECT_ACCESS_REQUEST'])
+
+ def accept(self, permissions):
+ """ The accept to project access request
+ """
+ # user = User.query.filter(User.username == self.username)
+ project_access = self.project.access
+ readers = project_access.readers.copy()
+ writers = project_access.writers.copy()
+ owners = project_access.owners.copy()
+ readers.append(self.user_id)
+ project_access.readers = readers
+ if permissions == "write" or permissions == "owner":
+ writers.append(self.user_id)
+ project_access.writers = writers
+ if permissions == "owner":
+ owners.append(self.user_id)
+ project_access.owners = owners
+
+ db.session.delete(self)
+ db.session.commit()
+
+
+class RemovedProject(db.Model):
+ id = db.Column(db.Integer, primary_key=True, autoincrement=True)
+ name = db.Column(db.String, nullable=False, index=True)
+ namespace = db.Column(db.String, nullable=False, index=True)
+ properties = db.Column(db.JSON, nullable=False)
+ timestamp = db.Column(db.DateTime, default=datetime.utcnow, index=True)
+ removed_by = db.Column(db.String, nullable=False)
+
+ def __init__(self, project, removed_by):
+ from .schemas import ProjectSchemaForDelete
+
+ self.name = project.name
+ self.namespace = project.namespace
+ self.properties = ProjectSchemaForDelete().dump(project)
+ self.removed_by = removed_by
diff --git a/server/src/models/schemas.py b/server/src/models/schemas.py
new file mode 100644
index 00000000..655d7dac
--- /dev/null
+++ b/server/src/models/schemas.py
@@ -0,0 +1,267 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+import copy
+import os
+import re
+from datetime import datetime
+from marshmallow import fields, pre_dump, post_dump
+from flask_login import current_user
+from flask import current_app
+
+from ..auth.schemas import UserSchema
+from .. import ma
+from ..mergin_utils import resolve_tags
+from ..permissions import ProjectPermissions
+from .db_models import Project, ProjectVersion, ProjectTransfer, Namespace, Account, AccessRequest, RemovedProject
+from ..auth.models import User
+
+
+class DateTimeWithZ(fields.DateTime):
+ def __init__(self, **kwargs):
+ super(DateTimeWithZ, self).__init__('%Y-%m-%dT%H:%M:%S%zZ', **kwargs)
+
+
+class ProjectAccessSchema(ma.ModelSchema):
+ owners = fields.List(fields.Integer())
+ writers = fields.List(fields.Integer())
+ readers = fields.List(fields.Integer())
+ public = fields.Boolean()
+
+ @post_dump
+ def insert_usernames(self, data, **kwargs):
+ """ Convert list of user ids in access levels to corresponding usernames
+ Adds fields 'ownersnames', 'writersnames' and 'readersnames' to serialized data
+ """
+ if 'users_map' in self.context:
+ # user map can be pass as context to save db query
+ users_map = self.context['users_map']
+ else:
+ user_ids = data['owners'] + data['writers'] + data['readers']
+ users_map = {u.id: u.username for u in User.query.filter(User.id.in_(set(user_ids))).all()}
+
+ for field in ('owners', 'writers', 'readers'):
+ new_key = field + 'names'
+ data[new_key] = []
+ users_ids = data[field]
+ for uid in users_ids:
+ if uid not in users_map:
+ continue
+ username = users_map[uid]
+ data[new_key].append(username)
+ return data
+
+
+def project_user_permissions(project):
+ return {
+ "upload": ProjectPermissions.Upload.check(project, current_user),
+ "update": ProjectPermissions.Update.check(project, current_user),
+ "delete": ProjectPermissions.Delete.check(project, current_user)
+ }
+
+class FileInfoSchema(ma.ModelSchema):
+ path = fields.String()
+ size = fields.Integer()
+ checksum = fields.String()
+ location = fields.String(load_only=True)
+ mtime = fields.String()
+ diff = fields.Nested('self', required=False, missing={})
+ history = fields.Dict(required=False, dump_only=True, missing={})
+
+ @pre_dump
+ def patch_history_field(self, data, **kwargs):
+ """
+ Append expiration to materialized versioned files and remove internal server metadata from final response.
+ This is because history is general dict with yet unknown structure.
+ #TODO resolve once marshmallow 3.0 is released.
+ history = fields.Dict(keys=fields.String(), values=fields.Nested('self', exclude=['location', 'chunks']))
+ """
+ # diff field (self-nested does not contain history)
+ if 'history' not in data:
+ return data
+
+ _data = copy.deepcopy(data) # create deep copy to avoid messing around with original object
+ for item in _data['history'].values():
+ if 'diff' in item:
+ item['diff'].pop('location', None)
+ item['diff'].pop('sanitized_path', None)
+ if self.context and 'project_dir' in self.context:
+ abs_path = os.path.join(self.context['project_dir'], item['location'])
+ if os.path.exists(abs_path):
+ expiration = os.path.getmtime(abs_path) + current_app.config['FILE_EXPIRATION']
+ item.update(expiration=datetime.utcfromtimestamp(expiration))
+ item.pop('location', None)
+ item.pop('chunks', None)
+ item.pop('sanitized_path', None)
+ return _data
+
+
+class ProjectSchemaForVersion(ma.ModelSchema):
+ """ Equivalent of ProjectSchema when version object is serialized """
+ id = fields.UUID()
+ created = DateTimeWithZ(attribute="project.created")
+ creator = fields.Int(attribute="project.creator_id")
+ uploads = fields.Function(lambda obj: obj.project.uploads.all())
+ name = fields.Function(lambda obj: obj.project.name)
+ namespace = fields.Function(lambda obj: obj.project.namespace)
+ access = fields.Method("_access")
+ permissions = fields.Method("_permissions")
+ disk_usage = fields.Method("_disk_usage")
+ files = fields.Nested(FileInfoSchema(), many=True)
+ tags = fields.Method("_tags")
+ updated = DateTimeWithZ(attribute="created")
+ version = fields.Function(lambda obj: obj.name)
+
+ def _access(self, obj):
+ return ProjectAccessSchema().dump(obj.project.access)
+
+ def _permissions(self, obj):
+ return project_user_permissions(obj.project)
+
+ def _disk_usage(self, obj):
+ return sum(f["size"] for f in obj.files)
+
+ def _tags(self, obj):
+ return resolve_tags(obj.files)
+
+
+class ProjectAccessRequestSchema(ma.ModelSchema):
+ user = fields.Nested(UserSchema(), exclude=['profile', 'is_admin', 'email', 'id', 'is_admin', 'verified_email'])
+ project_name = fields.Function(lambda obj: obj.project.name)
+ namespace = fields.Str()
+ expire = DateTimeWithZ()
+
+ class Meta:
+ model = AccessRequest
+
+
+class ProjectSchema(ma.ModelSchema):
+ id = fields.UUID()
+ files = fields.Nested(FileInfoSchema(), many=True)
+ access = fields.Nested(ProjectAccessSchema())
+ access_requests = fields.Nested(ProjectAccessRequestSchema(), many=True, exclude=['project'])
+ permissions = fields.Function(project_user_permissions)
+ version = fields.String(attribute='latest_version')
+ namespace = fields.Str()
+ created = DateTimeWithZ()
+
+ class Meta:
+ model = Project
+ exclude = ['versions', 'transfers', 'latest_version', 'storage_params']
+
+
+class ProjectListSchema(ma.ModelSchema):
+ id = fields.UUID()
+ name = fields.Str()
+ namespace = fields.Str()
+ access = fields.Nested(ProjectAccessSchema())
+ permissions = fields.Function(project_user_permissions)
+ version = fields.String(attribute='latest_version')
+ updated = fields.Method("get_updated")
+ created = DateTimeWithZ()
+ creator = fields.Integer(attribute='creator_id')
+ disk_usage = fields.Integer()
+ tags = fields.List(fields.Str())
+ has_conflict = fields.Method("get_has_conflict")
+
+
+ def get_updated(self, obj):
+ return obj.updated if obj.updated else obj.created
+
+
+ def get_has_conflict(self, obj):
+ # check if there is any conflict file in project
+ files = obj.files
+ for file in [f for f in files if '_conflict' in f.get('path')]:
+ if len([f for f in files if f.get('path') == re.sub(r"(\.gpkg)(.*conflict.*)", r"\1", file.get('path'))]):
+ return True
+ return False
+
+
+class ProjectVersionSchema(ma.ModelSchema):
+ project_name = fields.Function(lambda obj: obj.project.name)
+ namespace = fields.Function(lambda obj: obj.project.namespace)
+ author = fields.String()
+ project = fields.Nested(ProjectSchema())
+ changesets = fields.Method("get_diff_summary")
+ files = fields.String()
+ created = DateTimeWithZ()
+
+ def get_diff_summary(self, obj):
+ return obj.diff_summary()
+
+ class Meta:
+ model = ProjectVersion
+ exclude = ['id', 'ip_address', 'ip_geolocation_country']
+
+
+class NamespaceSchema(ma.ModelSchema):
+ type = fields.Method("namespace_type")
+
+ class Meta:
+ model = Namespace
+ fields = ('name', 'type')
+
+ def namespace_type(self, obj):
+ return obj.account.type
+
+
+class ProjectTransferSchema(ma.ModelSchema):
+ requested_by = fields.Method("requested_by_username")
+ project = fields.Nested(ProjectSchema())
+ project_name = fields.Function(lambda obj: obj.project.name)
+
+ class Meta:
+ model = ProjectTransfer
+ fields = ('id', 'project_name', 'from_ns_name', 'to_ns_name', 'requested_by', 'requested_at', 'project', 'expire')
+
+ def requested_by_username(self, obj):
+ return obj.user.username
+
+
+class AccountSchema(ma.ModelSchema):
+ name = fields.Method('get_owner_name')
+ email = fields.Method('get_owner_email')
+
+ def get_owner_name(self, obj):
+ return obj.name()
+
+ def get_owner_email(self, obj):
+ return obj.email()
+
+ class Meta:
+ model = Account
+ fields = ('id', 'type', 'owner_id', 'name', 'email', )
+
+
+class AccountExtendedSchema(ma.ModelSchema):
+ id = fields.Integer()
+ name = fields.String()
+ type = fields.String()
+ active = fields.Boolean()
+ storage = fields.Integer()
+
+
+class FullVersionSchema(ma.ModelSchema):
+ project_name = fields.Function(lambda obj: obj.project.name)
+ namespace = fields.Function(lambda obj: obj.project.namespace)
+
+ class Meta:
+ model = ProjectVersion
+ exclude = ['id']
+
+
+class ProjectSchemaForDelete(ma.ModelSchema):
+ versions = fields.Nested(FullVersionSchema(), many=True)
+ creator_id = fields.Method("_creator_id")
+
+ def _creator_id(self, obj):
+ return obj.creator_id
+
+ class Meta:
+ model = Project
+ exclude = ['transfers', 'uploads', 'access_requests', 'access'] # these fields will be lost
+
+
+class RemovedProjectSchema(ma.ModelSchema):
+ class Meta:
+ model = RemovedProject
diff --git a/server/src/organisation/__init__.py b/server/src/organisation/__init__.py
new file mode 100644
index 00000000..ea0b05b0
--- /dev/null
+++ b/server/src/organisation/__init__.py
@@ -0,0 +1,325 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from flask import Blueprint, jsonify, request, abort, render_template, current_app
+from flask_login import current_user
+from sqlalchemy import or_, func
+
+from ..auth import auth_required
+from ..auth.models import User
+from .forms import UpdateOrganisationForm, AccessForm, CreateOrganisationForm, OwnerAccessForm, OrganisationInvitationForm
+from .models import Organisation, OrganisationInvitation
+from ..models.db_models import Namespace, Account
+from .schemas import OrganisationSchema, OrganisationInvitationSchema
+from .. import db, wm, SIG_NEW_ORGANISATION, SIG_DELETED_ORGANISATION
+
+
+def find_organisations_by_username(username):
+ user = User.query.filter_by(username=username).first_or_404()
+
+ organisations = Organisation.query.filter(
+ or_(
+ Organisation.owners.contains([user.id]),
+ Organisation.admins.contains([user.id]),
+ Organisation.writers.contains([user.id]),
+ Organisation.readers.contains([user.id])
+ )
+ ).filter_by(active=True).all()
+
+ return organisations
+
+
+def init_app(app):
+ organisation = Blueprint("organisation", __name__, template_folder='templates')
+
+ def get_org_by_name(name, only_active=True):
+ query = Organisation.query.filter_by(name=name)
+ if only_active:
+ query = query.filter_by(active=True)
+ return query.first_or_404(f"Organisation {name} not found")
+
+ @organisation.route('/', methods=['GET'])
+ @auth_required
+ def get_organisations(): # noqa: E501
+ """List mergin organisations a current user has (at least read) access to.
+
+ :rtype: List[Organisation]
+ """
+ organisations = find_organisations_by_username(current_user.username)
+ data = OrganisationSchema(many=True, context={'user': current_user}).dump(organisations)
+ return jsonify(data), 200
+
+ @organisation.route('/', methods=['POST'])
+ @auth_required
+ def create_organisation(): # noqa: E501
+ """ Create a new organisation.
+ :rtype: None
+ """
+ free_orgs = Organisation.query.join(Account, Account.owner_id == Organisation.id).join(Namespace, Namespace.account_id == Account.id)\
+ .filter(Organisation.owners.contains([current_user.id]))\
+ .filter((Namespace.storage == 0))\
+ .filter(Organisation.active)\
+ .count()
+ if free_orgs > 2:
+ abort(400, "Too many free organisations")
+
+ form = CreateOrganisationForm.from_json(request.json)
+ if not form.validate_on_submit():
+ return jsonify(form.errors), 400
+
+ name = form.name.data
+ ns = Namespace.query.filter(func.lower(Organisation.name) == func.lower(name)).first()
+ if ns:
+ abort(409, f"Namespace {name} already exist, please choose another name.")
+
+ org = Organisation(creator_id=current_user.id, **form.data)
+ db.session.add(org)
+ db.session.commit()
+ wm.emit_signal(SIG_NEW_ORGANISATION, request.path, msg=f'New organisation *{name}* has been created')
+ return jsonify({"success": True}), 201
+
+ @organisation.route('/', methods=['GET'])
+ @auth_required
+ def get_organisation_by_name(name): # noqa: E501
+ """ Return organisation by name.
+
+ :param name: name of organisation
+ :type name: str
+ :rtype: Organisation
+ """
+ org = get_org_by_name(name, only_active=not current_user.is_admin)
+ if current_user.id not in org.readers and not current_user.is_admin:
+ abort(403, "You do not have permissions to get organisation")
+ data = OrganisationSchema(context={'user': current_user}).dump(org)
+ return data, 200
+
+ @organisation.route('/', methods=['PATCH'])
+ @auth_required
+ def update_organisation(name): # noqa: E501
+ """ Update organisation.
+
+ Information fields (name, description) and owners to be updated only by organisation owners.
+
+ :param name: name of organisation
+ :type name: str
+ :rtype: Organisation
+ """
+ org = get_org_by_name(name)
+ if current_user.id not in org.owners and not current_user.is_admin:
+ abort(403, "You do not have permissions to update organisation")
+
+ form = UpdateOrganisationForm.from_json(request.json)
+ if not form.validate_on_submit():
+ return jsonify(form.errors), 400
+
+ form.populate_obj(org)
+ db.session.add(org)
+ db.session.commit()
+ data = OrganisationSchema(context={'user': current_user}).dump(org)
+ return data, 200
+
+ @organisation.route('//access', methods=['PATCH'])
+ @auth_required
+ def update_access(name): # noqa: E501
+ """ Update access fields of organisation.
+
+ Access fields: admins, writers, readers to be amended by organisation admins.
+
+ :param name: name of organisation
+ :type name: str
+ :rtype: Organisation
+ """
+ if not request.is_json:
+ abort(400, "Payload format should be json")
+
+ org = get_org_by_name(name)
+ usernames = list(
+ set(request.json['owners']) |
+ set(request.json['admins']) |
+ set(request.json['writers']) |
+ set(request.json['readers'])
+ )
+ users = User.query.with_entities(User.username, User.id).filter(User.username.in_(usernames)).all()
+ users_map = {u.username: u.id for u in users}
+ access = {}
+ for key in ('owners', 'admins', 'writers', 'readers'):
+ access[key] = []
+ for username in request.json[key]:
+ if username not in users_map:
+ continue
+ access[key].append(users_map[username])
+
+ if current_user.id in org.owners or current_user.is_admin:
+ form = OwnerAccessForm().from_json(access)
+ elif current_user.id in org.admins:
+ form = AccessForm().from_json(access)
+ else:
+ abort(403, "You do not have permissions to update organisation members")
+
+ if not form.validate_on_submit():
+ return jsonify(form.errors), 400
+
+ form.populate_obj(org)
+ db.session.add(org)
+ db.session.commit()
+ data = OrganisationSchema(context={"user": current_user}).dump(org)
+ return data, 200
+
+ @organisation.route('/', methods=['DELETE'])
+ @auth_required
+ def delete_organisation(name): # noqa: E501
+ """ Delete organisation.
+
+ :param name: name of organisation
+ :type name: str
+ :rtype: None
+ """
+ org = Organisation.query.filter_by(name=name).first_or_404()
+ if not current_user.is_admin and current_user.id not in org.owners:
+ abort(403, "You do not have permissions to delete organisation")
+
+ account = Account.query.filter_by(type='organisation', owner_id=org.id).first()
+ db.session.delete(account)
+ db.session.commit()
+ db.session.delete(org) # make sure to delete namespace and all projects
+ db.session.commit()
+ wm.emit_signal(SIG_DELETED_ORGANISATION, request.path, msg=f'Organisation *{name}* has been deleted')
+ return jsonify({"success": True}), 200
+
+ @organisation.route('/invitation/create', methods=['POST'])
+ @auth_required
+ def create_invitation(): # noqa: E501
+ """ Create invitation to organisation.
+ """
+ from ..celery import send_email_async
+
+ if not request.is_json:
+ abort(400, "Payload format should be json")
+
+ form = OrganisationInvitationForm.from_json(request.json)
+ if not form.validate_on_submit():
+ return jsonify(form.errors), 400
+
+ username = form.data.get('username')
+ org_name = form.data.get('org_name')
+ invitation = OrganisationInvitation.query.filter_by(username=username, org_name=org_name).first()
+ if invitation:
+ abort(409, "Invitation already exist.")
+
+ user = User.query.filter_by(username=username).first_or_404(f"User {username} not found")
+ organisation = get_org_by_name(org_name)
+ if current_user.id not in organisation.admins and current_user.id not in organisation.owners:
+ abort(403, "You do not have permissions to create an invitation.")
+
+ invitation = OrganisationInvitation(org_name=org_name, username=username, role=form.data.get('role'))
+ db.session.add(invitation)
+ db.session.commit()
+ body = render_template(
+ 'email/organisation_invitation.html',
+ subject='Organisation invitation',
+ username=username,
+ invitation=invitation,
+ link=f"{request.url_root.rstrip('/')}/users/{username}/organisations"
+ )
+ email_data = {
+ 'subject': 'Organisation invitation',
+ 'html': body,
+ 'recipients': [user.email],
+ 'sender': current_app.config['MAIL_DEFAULT_SENDER']
+ }
+ send_email_async.delay(**email_data)
+ return jsonify(OrganisationInvitationSchema().dump(invitation)), 201
+
+ @organisation.route('/invitations//', methods=['GET'])
+ @auth_required
+ def get_invitations(type, name): # noqa: E501
+ """ Get invitations of user.
+ :param name: username or organisation name
+ :type name: str
+ :param type: type of subject user or org
+ :type type: enumerate
+ """
+ data = None
+ if type == "user":
+ if current_user.username != name and not current_user.is_admin:
+ abort(403, "You do not have permissions to list invitations")
+ data = OrganisationInvitationSchema(many=True).dump(OrganisationInvitation.query.filter_by(username=name).all())
+ if not data:
+ User.query.filter_by(username=name).first_or_404(f"User {name} not found")
+ elif type == "org":
+ organisation = get_org_by_name(name)
+ if (current_user.id not in organisation.admins and current_user.id not in organisation.owners) and not current_user.is_admin:
+ abort(403, "You do not have permissions to list invitations.")
+ data = OrganisationInvitationSchema(many=True).dump(OrganisationInvitation.query.filter_by(org_name=name))
+ else:
+ abort(400, "Invalid account type")
+
+ return jsonify(data), 200
+
+ @organisation.route('/invitation/', methods=['GET'])
+ @auth_required
+ def get_invitation(id): # noqa: E501
+ """ Get invitation detail.
+ :param id: invitation id
+ :type id: int
+ """
+ invitation = OrganisationInvitation.query.filter_by(id=id).first_or_404(f"Invitation {id} not found")
+ if invitation.username != current_user.username and \
+ current_user.id not in invitation.organisation.owners and \
+ current_user.id not in invitation.organisation.admins:
+ abort(403, "You do not have permissions to access invitation")
+
+ data = OrganisationInvitationSchema().dump(invitation)
+ return jsonify(data), 200
+
+ @organisation.route('/invitation/confirm/', methods=['POST'])
+ @auth_required
+ def accept_invitation(id): # noqa: E501
+ """ Accept invitation.
+ :param id: invitation id
+ :type id: int
+ """
+ invitation = OrganisationInvitation.query.get_or_404(id, "Invitation does not exist")
+ if invitation.username != current_user.username:
+ abort(403, "You do not have permissions to accept invitation")
+ if invitation.is_expired():
+ abort(400, "This invitation is already expired.")
+
+ invitation.accept()
+ org = OrganisationSchema(context={"user": current_user}).dump(invitation.organisation)
+ return jsonify(org), 200
+
+ @organisation.route('/invitation/', methods=['DELETE'])
+ @auth_required
+ def delete_invitation(id): # noqa: E501
+ """ Delete/reject organisation invitation.
+ :param id: invitation id
+ :type id: int
+ """
+ from ..celery import send_email_async
+
+ invitation = OrganisationInvitation.query.get_or_404(id, "Invitation does not exist")
+ if invitation.username != current_user.username and \
+ current_user.id not in invitation.organisation.owners + invitation.organisation.admins:
+ abort(403, "You do not have permissions to delete invitation")
+
+ db.session.delete(invitation)
+ db.session.commit()
+ user = User.query.filter(User.username == invitation.username).first()
+
+ body = render_template(
+ 'email/organisation_invitation_revoke.html',
+ subject='Organisation invitation revoked',
+ username=invitation.username,
+ org_name=invitation.org_name
+ )
+ email_data = {
+ 'subject': 'Your organisation invitation has been revoked',
+ 'html': body,
+ 'recipients': [user.email],
+ 'sender': current_app.config['MAIL_DEFAULT_SENDER']
+ }
+ send_email_async.delay(**email_data)
+ return '', 200
+
+ app.register_blueprint(organisation, url_prefix='/orgs')
diff --git a/server/src/organisation/forms.py b/server/src/organisation/forms.py
new file mode 100644
index 00000000..4eff9930
--- /dev/null
+++ b/server/src/organisation/forms.py
@@ -0,0 +1,39 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from flask_wtf import FlaskForm
+from wtforms import StringField, validators, IntegerField, SelectField, BooleanField
+from wtforms.validators import Optional, DataRequired
+from ..forms import namespace_validation
+from ..forms import IntegerListField
+
+
+class CreateOrganisationForm(FlaskForm):
+ """ This form is for create/update organisation """
+ name = StringField('Name', [validators.Length(min=4, max=25), namespace_validation])
+ description = StringField('Description', [validators.Length(max=256), Optional()])
+
+
+class AccessForm(FlaskForm):
+ """ Form to update access to organisation up to admin level. """
+ admins = IntegerListField("Admins", [DataRequired()])
+ writers = IntegerListField("Writers", [DataRequired()])
+ readers = IntegerListField("Readers", [DataRequired()])
+
+
+class OwnerAccessForm(AccessForm):
+ owners = IntegerListField("Owners", [DataRequired()])
+
+
+class UpdateOrganisationForm(FlaskForm):
+ """ Form to update of organisation by owner """
+ description = StringField('Description', [validators.Length(max=256), Optional()])
+
+
+class OrganisationInvitationForm(FlaskForm):
+ """ Form to create/update organisation invitation. """
+ org_name = StringField('Organisation name', validators=[DataRequired()])
+ username = StringField('Username', validators=[DataRequired()])
+ role = SelectField('role', choices=[
+ ('reader', 'reader'), ('writer', 'writer'), ('admin', 'admin'), ('owner', 'owner')])
+
diff --git a/server/src/organisation/models.py b/server/src/organisation/models.py
new file mode 100644
index 00000000..2ef2b27e
--- /dev/null
+++ b/server/src/organisation/models.py
@@ -0,0 +1,112 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from datetime import datetime, timedelta
+
+from sqlalchemy import or_
+from sqlalchemy.dialects.postgresql import ARRAY, BIGINT, ENUM
+from .. import db, current_app
+
+
+class Organisation(db.Model):
+ """ Organization db class.
+
+ Organisation is one-to-one with Mergin namespace (which is unique).
+
+ Organization supports tiers, with default 'free' which means organisation is not ready to use.
+
+ Organization access is managed by access list control:
+ Owners: users who are allowed remove organisation or change billing
+ Admins: users who can administer users for organisation (except owners)
+ Writers: writers have read-write access to organisation namespace
+ Readers: reader have read-only access to organisation namespace
+
+ """
+ id = db.Column(db.Integer, primary_key=True, autoincrement=True)
+ # modified only by owners
+ name = db.Column(db.String, nullable=False, index=True)
+ description = db.Column(db.String, nullable=True)
+ owners = db.Column(ARRAY(db.Integer), server_default="{}")
+ # access modified also by admins
+ admins = db.Column(ARRAY(db.Integer), server_default="{}")
+ writers = db.Column(ARRAY(db.Integer), server_default="{}")
+ readers = db.Column(ARRAY(db.Integer), server_default="{}")
+ registration_date = db.Column(db.DateTime(), nullable=True, default=datetime.utcnow)
+ active = db.Column(db.Boolean, default=True)
+ inactive_since = db.Column(db.DateTime(), nullable=True, index=True)
+
+ __table_args__ = (db.UniqueConstraint('name'),
+ db.Index('ix_org_owners', owners, postgresql_using="gin"),
+ db.Index('ix_org_readers', readers, postgresql_using="gin"),
+ db.Index('ix_org_writers', writers, postgresql_using="gin"),
+ db.Index('ix_org_admins', admins, postgresql_using="gin"),)
+
+ def __init__(self, creator_id, name, **kwargs):
+ self.name = name
+ self.owners = [creator_id]
+ self.admins = [creator_id]
+ self.writers = [creator_id]
+ self.readers = [creator_id]
+ self.description = kwargs.get('description', None)
+ self.active = True
+
+ @staticmethod
+ def find_by_member_id(user_id):
+ return Organisation.query.filter(
+ or_(
+ Organisation.owners.contains([user_id]),
+ Organisation.admins.contains([user_id]),
+ Organisation.writers.contains([user_id]),
+ Organisation.readers.contains([user_id])
+ )
+ ).filter_by(active=True).all()
+
+ def get_member_role(self, user_id):
+ for role in ('owners', 'admins', 'writers', 'readers'):
+ if user_id not in getattr(self, role):
+ continue
+ return role.rstrip('s')
+
+
+class OrganisationInvitation(db.Model):
+ """ Organization Invitations db class.
+
+ Adding new users to Organization is invitation based with required confirmation.
+ """
+ id = db.Column(db.Integer, primary_key=True)
+ org_name = db.Column(db.String, db.ForeignKey("organisation.name", ondelete="CASCADE"))
+ username = db.Column(db.String, db.ForeignKey("user.username", ondelete="CASCADE"))
+ role = db.Column(ENUM('reader', 'writer', 'admin', 'owner', name='role'), nullable=False)
+ expire = db.Column(db.DateTime)
+
+ organisation = db.relationship(
+ "Organisation",
+ uselist=False,
+ backref=db.backref("invitations", single_parent=True, uselist=False, cascade="all,delete")
+ )
+
+ user = db.relationship("User", uselist=False)
+
+ def __init__(self, org_name, username, role):
+ self.org_name = org_name
+ self.username = username
+ self.role = role
+ self.expire = datetime.utcnow() + timedelta(seconds=current_app.config['ORGANISATION_INVITATION_EXPIRATION'])
+
+ def accept(self):
+ """ The invitation accepted
+ """
+ attribute = self.role + 's'
+ roles = getattr(self.organisation, attribute)
+ roles.append(self.user.id)
+ db.session.refresh(self.organisation)
+ setattr(self.organisation, attribute, roles)
+ db.session.add(self.organisation)
+ db.session.delete(self)
+ db.session.commit()
+
+ def is_expired(self):
+ """ Check if invitation is expired
+ :rtype: bool
+ """
+ return datetime.utcnow() > self.expire
diff --git a/server/src/organisation/permission.py b/server/src/organisation/permission.py
new file mode 100644
index 00000000..fa4821f2
--- /dev/null
+++ b/server/src/organisation/permission.py
@@ -0,0 +1,42 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from flask_login import current_user
+from ..organisation import Organisation
+
+
+class OrganisationPermissions:
+ """ Get or check organisation by permission """
+
+ @staticmethod
+ def _query(user, field):
+ """ return query of organisation """
+ if user.is_authenticated and user.is_admin:
+ return Organisation.query
+ if not user.is_authenticated:
+ return Organisation.query.filter(False)
+ return Organisation.query.filter(field.any(user.id))
+
+ class Owner:
+ @staticmethod
+ def query(user):
+ return OrganisationPermissions._query(user, Organisation.owners)
+
+ class Admin:
+ @staticmethod
+ def query(user):
+ return OrganisationPermissions._query(user, Organisation.admins)
+
+ class Writer:
+ @staticmethod
+ def query(user):
+ return OrganisationPermissions._query(user, Organisation.writers)
+
+ class Reader:
+ @staticmethod
+ def query(user):
+ return OrganisationPermissions._query(user, Organisation.readers)
+
+
+def organisations_query(permission):
+ return permission.query(current_user)
diff --git a/server/src/organisation/schemas.py b/server/src/organisation/schemas.py
new file mode 100644
index 00000000..2d115eb7
--- /dev/null
+++ b/server/src/organisation/schemas.py
@@ -0,0 +1,80 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from marshmallow import fields
+from .. import ma
+from ..models.db_models import Project, Namespace
+from ..auth.models import User
+from .models import Organisation, OrganisationInvitation
+
+
+class OrganisationSchema(ma.ModelSchema):
+ name = fields.Str()
+ disk_usage = fields.Method("get_disk_usage")
+ project_count = fields.Method("get_project_count")
+ owners = fields.Method("get_owners")
+ admins = fields.Method("get_admins")
+ writers = fields.Method("get_writers")
+ readers = fields.Method("get_readers")
+ storage = fields.Method("get_storage")
+ role = fields.Method("get_role", dump_only=True)
+ account = fields.Method("get_account", dump_only=True)
+
+ def get_owners(self, obj):
+ return self.get_access_usernames(obj, 'owners')
+
+ def get_admins(self, obj):
+ return self.get_access_usernames(obj, 'admins')
+
+ def get_writers(self, obj):
+ return self.get_access_usernames(obj, 'writers')
+
+ def get_readers(self, obj):
+ return self.get_access_usernames(obj, 'readers')
+
+ def get_access_usernames(self, obj, role):
+ users = User.query.filter(User.id.in_(getattr(obj, role))).all()
+ return [u.username for u in users]
+
+ def get_disk_usage(self, obj):
+ return sum([p.disk_usage for p in Project.query.filter_by(namespace=obj.name)])
+
+ def get_project_count(self, obj):
+ return Project.query.filter_by(namespace=obj.name).count()
+
+ def get_storage(self, obj):
+ ns = Namespace.query.filter_by(name=obj.name).first()
+ return ns.storage
+
+ def get_role(self, obj):
+ if self.context and 'user' in self.context:
+ return obj.get_member_role(self.context['user'].id)
+ else:
+ return "unknown"
+
+ def _is_owner(self, obj):
+ return self.context and 'user' in self.context and obj.get_member_role(self.context['user'].id) == "owner"
+
+ def _is_mergin_admin(self, obj):
+ return self.context and 'user' in self.context and self.context['user'].is_admin
+
+ def get_account(self, obj):
+ from ..models.db_models import Account
+ from ..models.schemas import AccountSchema
+ account = Account.query.filter_by(type='organisation', owner_id=obj.id).first()
+ if self._is_owner(obj) or self._is_mergin_admin(obj):
+ return AccountSchema().dump(account)
+ else:
+ return AccountSchema(only=('email',)).dump(account) # do not send private information
+
+ class Meta:
+ model = Organisation
+ exclude = ('invitations', )
+
+
+class OrganisationInvitationSchema(ma.ModelSchema):
+ org_name = fields.Str()
+ username = fields.Str()
+
+ class Meta:
+ model = OrganisationInvitation
diff --git a/server/src/permissions.py b/server/src/permissions.py
new file mode 100644
index 00000000..be2cf065
--- /dev/null
+++ b/server/src/permissions.py
@@ -0,0 +1,146 @@
+# Copyright (C) 2019 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+import os
+from flask import abort
+from flask_login import current_user
+from sqlalchemy import or_
+
+from .auth.models import User
+from .organisation import Organisation
+from .organisation.permission import organisations_query, OrganisationPermissions
+from .models.db_models import ProjectAccess, Project, Upload, Namespace
+
+
+class ProjectPermissions:
+ class Read:
+ @staticmethod
+ def check(project, user):
+ pa = project.access
+ return pa.public or (user.is_authenticated and (user.is_admin or user.id in pa.readers)) or (check_namespace_permissions(project.namespace, user, "read"))
+
+ @staticmethod
+ def query(user, as_admin=True, public=True):
+ if user.is_authenticated and user.is_admin and as_admin:
+ return Project.query
+ query = Project.access.has(public=public)
+ if user.is_authenticated:
+ orgs = Organisation.query.with_entities(Organisation.name).filter(
+ or_(Organisation.admins.contains([user.id]), Organisation.readers.contains([user.id]),
+ Organisation.writers.contains([user.id]), Organisation.owners.contains([user.id])))
+ if public:
+ query = query | Project.access.has(ProjectAccess.readers.contains([user.id]) | Project.namespace.in_(orgs))
+ else:
+ query = Project.access.has(ProjectAccess.readers.contains([user.id]) | Project.namespace.in_(orgs))
+ return Project.query.filter(query)
+
+ class Upload:
+ @staticmethod
+ def check(project, user):
+ return user.is_authenticated and (user.id in project.access.writers or check_namespace_permissions(project.namespace, user, "write"))
+
+ class Update:
+ @staticmethod
+ def check(project, user):
+ return user.is_authenticated and (user.is_admin or user.id in project.access.owners or user.username in project.access.owners or check_namespace_permissions(project.namespace, user, "write"))
+
+ class Delete:
+ @staticmethod
+ def check(project, user):
+ return user.is_authenticated and (user.is_admin or user.id in project.access.owners or check_namespace_permissions(project.namespace, user, "write"))
+
+ class All:
+ @staticmethod
+ def check(project, user):
+ return user.is_authenticated and (user.is_admin or user.id in project.access.owners or check_namespace_permissions(project.namespace, user, "admin"))
+
+
+def require_project(ns, project_name, permission):
+ project = Project.query.filter_by(name=project_name, namespace=ns).first_or_404()
+ if not permission.check(project, current_user):
+ abort(403, "You do not have permissions for this project")
+ return project
+
+
+def get_upload(transaction_id):
+ upload = Upload.query.get_or_404(transaction_id)
+ if upload.user_id != current_user.id:
+ abort(403, "You do not have permissions for ongoing upload")
+
+ upload_dir = os.path.join(upload.project.storage.project_dir, "tmp", transaction_id)
+ return upload, upload_dir
+
+
+def projects_query(permission, as_admin=True, public=True):
+ return permission.query(current_user, as_admin, public)
+
+
+def check_namespace_permissions(ns, user, permissions):
+ """ check if user has permission to namespace granted from organisation or by itself
+
+ :param ns: namespace
+ :type ns: str
+
+ :param user: user
+ :type user: User
+
+ :param permissions: permissions to access to namespace
+ :type permissions: str
+
+ :return: true if user has same username with namespace, otherwise check for organisation
+ :rtype: bool
+ """
+ if user.is_anonymous:
+ return False
+ if user.username == ns:
+ return True
+ organisation = Organisation.query.filter_by(name=ns).first()
+ if not organisation:
+ return False
+ if permissions == "read":
+ return user.id in organisation.readers
+ elif permissions == "write":
+ return user.id in organisation.writers
+ elif permissions == "admin":
+ return user.id in organisation.admins
+ else:
+ return False
+
+
+def namespaces_query(permission):
+ return permission.query(current_user)
+
+
+class NamespacePermissions:
+ """ Get or check namespace by permission """
+
+ @staticmethod
+ def _query(user, permission):
+ """ return query of organisation """
+ if current_user.is_authenticated and current_user.is_admin:
+ return Namespace.query
+ if not current_user.is_authenticated:
+ return Namespace.query.filter(False)
+ namespaces = [org.name for org in organisations_query(permission)]
+ namespaces.append(user.username)
+ return Namespace.query.filter(Namespace.name.in_(namespaces)).all()
+
+ class Owner:
+ @staticmethod
+ def query(user):
+ return NamespacePermissions._query(user, OrganisationPermissions.Owner)
+
+ class Admin:
+ @staticmethod
+ def query(user):
+ return NamespacePermissions._query(user, OrganisationPermissions.Admin)
+
+ class Writer:
+ @staticmethod
+ def query(user):
+ return NamespacePermissions._query(user, OrganisationPermissions.Writer)
+
+ class Reader:
+ @staticmethod
+ def query(user):
+ return NamespacePermissions._query(user, OrganisationPermissions.Reader)
\ No newline at end of file
diff --git a/server/src/run_celery.py b/server/src/run_celery.py
new file mode 100644
index 00000000..35ab2ffe
--- /dev/null
+++ b/server/src/run_celery.py
@@ -0,0 +1,20 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from . import create_app
+from .celery import celery
+
+app = create_app()
+celery.conf.update(app.config)
+
+
+# configure celery with flask context https://flask.palletsprojects.com/en/1.1.x/patterns/celery/
+# e.g. for using flask-mail
+class ContextTask(celery.Task):
+ """ Attach flask app context to celery task """
+ def __call__(self, *args, **kwargs):
+ with app.app_context():
+ return self.run(*args, **kwargs)
+
+
+celery.Task = ContextTask
diff --git a/server/src/storages/__init__.py b/server/src/storages/__init__.py
new file mode 100644
index 00000000..c18440bb
--- /dev/null
+++ b/server/src/storages/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+from .disk import DiskStorage
+from .storage import InvalidProject, FileNotFound
diff --git a/server/src/storages/disk.py b/server/src/storages/disk.py
new file mode 100644
index 00000000..ad45d2d4
--- /dev/null
+++ b/server/src/storages/disk.py
@@ -0,0 +1,357 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+import os
+import io
+import time
+import uuid
+import logging
+from datetime import datetime
+from flask import current_app
+from pygeodiff import GeoDiff, GeoDiffLibError
+from pygeodiff.geodifflib import GeoDiffLibConflictError
+from gevent import sleep
+from .storage import ProjectStorage, FileNotFound, DataSyncError, InitializationError
+from ..mergin_utils import resolve_tags, generate_checksum, int_version, is_versioned_file
+from ..util import mergin_secure_filename
+
+
+def save_to_file(stream, path, max_size=None):
+ """ Save readable object in file while yielding to gevent hub.
+
+ :param stream: object implementing readable interface
+ :param path: destination file path
+ :param max_size: limit for file size
+ """
+ directory = os.path.abspath(os.path.dirname(path))
+ os.makedirs(directory, exist_ok=True)
+ with open(path, 'wb') as output:
+ writer = io.BufferedWriter(output, buffer_size=32768)
+ size = 0
+ while True:
+ part = stream.read(4096)
+ sleep(0) # to unblock greenlet
+ if part:
+ size += len(part)
+ if max_size and size > max_size:
+ raise IOError()
+ writer.write(part)
+ else:
+ writer.flush()
+ break
+
+
+def copy_file(src, dest):
+ """ Custom implementation of copying file by chunk with yielding to gevent hub.
+
+ see save_to_file
+
+ :params src: abs path to file
+ :type src: str, path-like object
+ :params dest: abs path to destination file
+ :type dest: str, path-like object
+ """
+ if not os.path.isfile(src):
+ raise FileNotFoundError(src)
+ directory = os.path.abspath(os.path.dirname(dest))
+ os.makedirs(directory, exist_ok=True)
+ with open(src, 'rb') as input:
+ save_to_file(input, dest)
+
+
+def copy_dir(src, dest):
+ """ Custom implementation of recursive copy of directory with yielding to gevent hub.
+
+ :params src: abs path to dir
+ :type src: str, path-like object
+ :params dest: destination folder
+ :type dest: str, path-like object
+ """
+ if not os.path.isdir(src):
+ raise NotADirectoryError(src)
+ for root, dirs, files in os.walk(src):
+ for file in files:
+ abs_path = os.path.abspath(os.path.join(root, file))
+ rel_path = os.path.relpath(abs_path, start=src)
+ copy_file(abs_path, os.path.join(dest, rel_path))
+
+
+def move_to_tmp(src, dest=None):
+ """ Custom handling of file/directory removal by moving it to regularly cleaned tmp folder.
+ This is mainly to avoid using standard tools which could cause blocking gevent hub for large files.
+
+ :params src: abs path to file/directory
+ :type src: str, path-like object
+ :params dest: subdir in temp folder (e.g. transaction_id), defaults to None
+ :type dest: str, path-like object
+ :returns: path where file is moved to
+ :rtype: str, path-like object
+ """
+ if not os.path.exists(src):
+ return
+ dest = dest if dest else str(uuid.uuid4())
+ rel_path = os.path.relpath(src, start=current_app.config['LOCAL_PROJECTS']) # take relative path from parent of all project files
+ temp_path = os.path.join(current_app.config['TEMP_DIR'], dest, rel_path)
+ os.renames(src, temp_path)
+ return temp_path
+
+
+class DiskStorage(ProjectStorage):
+
+ def __init__(self, project):
+ super(DiskStorage, self).__init__(project)
+ self.projects_dir = current_app.config['LOCAL_PROJECTS']
+ self.project_dir = self._project_dir()
+ self.geodiff = GeoDiff()
+
+ def _project_dir(self):
+ project_dir = os.path.abspath(
+ os.path.join(self.projects_dir, self.project.storage_params["location"])
+ )
+ return project_dir
+
+ def initialize(self, template_project=None):
+ if os.path.exists(self.project_dir):
+ raise InitializationError("Project directory already exists: {}".format(self.project_dir))
+
+ os.makedirs(self.project_dir)
+
+ if template_project:
+ from ..models.db_models import Namespace
+ ns = Namespace.query.filter_by(name=self.project.namespace).first()
+ if ns.disk_usage() + template_project.disk_usage > ns.storage:
+ self.delete()
+ raise InitializationError("Disk quota reached")
+ forked_files = []
+
+ for file in template_project.files:
+ forked_file = dict(file)
+ forked_file['location'] = os.path.join('v1/', file['path'])
+ forked_file['mtime'] = datetime.utcnow()
+ forked_files.append(forked_file)
+
+ src = os.path.join(template_project.storage.project_dir, file['location'])
+ dest = os.path.join(self.project_dir, forked_file['location'])
+ try:
+ copy_file(src, dest)
+ except (FileNotFoundError, IOError):
+ self.delete()
+ raise InitializationError("IOError: failed to copy '{}' to '{}'", src, dest)
+ except Exception as e:
+ self.delete()
+ raise InitializationError(str(e))
+
+ self.project.files = forked_files
+ self.project.tags = template_project.tags
+ self.project.disk_usage = sum([f['size'] for f in self.project.files])
+
+ def file_size(self, file):
+ file_path = os.path.join(self.project_dir, file)
+ if not os.path.exists(file_path):
+ raise FileNotFound("File {} not found.".format(file))
+ return os.path.getsize(file_path)
+
+ def file_path(self, file):
+ path = os.path.join(self.project_dir, file)
+ if not os.path.exists(path):
+ raise FileNotFound("File {} not found.".format(file))
+ return path
+
+ def read_file(self, path, block_size=4096):
+ file_path = os.path.join(self.project_dir, path)
+
+ # do input validation outside generator to execute immediately
+ if not os.path.exists(file_path):
+ raise FileNotFound("File {} not found.".format(path))
+
+ def _generator():
+ with open(file_path, 'rb') as f:
+ while True:
+ data = f.read(block_size)
+ sleep(0)
+ if data:
+ yield data
+ else:
+ break
+
+ return _generator()
+
+ def apply_changes(self, changes, version, transaction_id):
+ sync_errors = {}
+ modified_files = []
+
+ to_remove = [i['path'] for i in changes['removed']]
+ files = list(filter(lambda i: i['path'] not in to_remove, self.project.files))
+ for item in changes['renamed']:
+ renamed = next((i for i in files if i['path'] == item['path']), None)
+ if renamed:
+ renamed['path'] = item['new_path']
+ else:
+ sync_errors[item['new_path']] = "renaming error"
+ continue
+
+ for f in changes['updated']:
+ sleep(0) # yield to gevent hub since geodiff action can take some time to prevent worker timeout
+ old_item = next((i for i in files if i["path"] == f["path"]), None)
+ if not old_item:
+ sync_errors[f['path']] = "file does not found on server "
+ continue
+ if 'diff' in f:
+ basefile = os.path.join(self.project_dir, old_item["location"])
+ changeset = os.path.join(self.project_dir, version, f['diff']['path'])
+ patchedfile = os.path.join(self.project_dir, version, f['path'])
+ modified_files.append(changeset)
+ modified_files.append(patchedfile)
+ # create copy of basefile which will be updated in next version
+ # TODO this can potentially fail for large files
+ logging.info(f"Apply changes: copying {basefile} to {patchedfile}")
+ start = time.time()
+ copy_file(basefile, patchedfile)
+ logging.info(f"Copying finished in {time.time()-start} s")
+ try:
+ logging.info(f"Geodiff: apply changeset {changeset} of size {os.path.getsize(changeset)} to {patchedfile}")
+ start = time.time()
+ self.geodiff.apply_changeset(patchedfile, changeset)
+ logging.info(f"Changeset applied in {time.time() - start} s")
+ except (GeoDiffLibError, GeoDiffLibConflictError) as err:
+ sync_errors[f["path"]] = f"project: {self.project.namespace}/{self.project.name}, geodiff error {str(err)}"
+ continue
+
+ f["diff"]["location"] = os.path.join(
+ version, f['diff']['sanitized_path'] if 'sanitized_path' in f['diff'] else mergin_secure_filename(f['diff']['path']))
+
+ # we can now replace old basefile metadata with the new one (patchedfile)
+ # TODO this can potentially fail for large files
+ logging.info(f"Apply changes: calculating checksum of {patchedfile}")
+ start = time.time()
+ f['checksum'] = generate_checksum(patchedfile)
+ logging.info(f"Checksum calculated in {time.time() - start} s")
+ f['size'] = os.path.getsize(patchedfile)
+ else:
+ old_item.pop("diff", None)
+
+ if 'chunks' in f:
+ f.pop("chunks")
+ f['location'] = os.path.join(
+ version,
+ f['sanitized_path'] if 'sanitized_path' in f else mergin_secure_filename(f['path']))
+ if not sync_errors:
+ old_item.update(f)
+
+ if sync_errors:
+ for file in modified_files:
+ move_to_tmp(file, transaction_id)
+ msg = ""
+ for key, value in sync_errors.items():
+ msg += key + " error=" + value + "\n"
+ raise DataSyncError(msg)
+
+ for item in changes['added']:
+ files.append({
+ 'path': item['path'],
+ 'size': item['size'],
+ 'checksum': item['checksum'],
+ 'mtime': item['mtime'],
+ 'location': os.path.join(
+ version,
+ item['sanitized_path'] if 'sanitized_path' in item else mergin_secure_filename(item['path']))
+ })
+
+ self.project.files = files
+ self.project.tags = resolve_tags(files)
+
+ def delete(self):
+ move_to_tmp(self.project_dir)
+
+ def optimize_storage(self):
+ """ Optimize disk storage for project.
+
+ Clean up for recently updated versioned files. Removes expired file versions.
+ It applies only on files that can be recreated when needed.
+ """
+ files = [f for f in self.project.files if 'diff' in f.keys()]
+ last_version = sorted(self.project.versions, key=lambda ver: int_version(ver.name))[-1]
+ for f in files:
+ f_history = self.project.file_history(f['path'], 'v1', last_version.name)
+ if not f_history:
+ continue
+ for item in f_history.values():
+ if 'diff' in item:
+ if item['location'] == f['location']:
+ continue # skip latest file version
+ abs_path = os.path.join(self.project_dir, item['location'])
+ if not os.path.exists(abs_path):
+ continue # already removed
+ age = time.time() - os.path.getmtime(abs_path)
+ if age > current_app.config['FILE_EXPIRATION']:
+ move_to_tmp(abs_path)
+
+ def restore_versioned_file(self, file, version):
+ """
+ For removed versioned files tries to restore full file in particular project version
+ using file diffs history (latest basefile and sequence of diffs).
+
+ :param file: path of file in project to recover
+ :type file: str
+ :param version: project version (e.g. v2)
+ :type version: str
+ """
+ if not is_versioned_file(file):
+ return
+
+ # if project version is not found, return it
+ project_version = next((v for v in self.project.versions if v.name == version), None)
+ if not project_version:
+ return
+
+ # check actual file from the version files
+ file_found = next((i for i in project_version.files if i['path'] == file), None)
+
+ # check the location that we found on the file
+ if not file_found or os.path.exists(os.path.join(self.project_dir, file_found['location'])):
+ return
+
+ basefile_meta = {}
+ diffs = []
+ f_history = self.project.file_history(file, 'v1', version)
+ if not f_history:
+ return
+ # history starts from the latest change, we stop when reaching basefile
+ for item in f_history.values():
+ if item['change'] in ['added', 'updated']:
+ if 'diff' in item:
+ diffs.append(item['diff'])
+ else:
+ basefile_meta = item
+ break
+ else:
+ continue
+
+ if not (basefile_meta and diffs):
+ return
+
+ basefile = os.path.join(self.project_dir, basefile_meta['location'])
+ tmp_dir = os.path.join(current_app.config['TEMP_DIR'], str(uuid.uuid4()))
+ os.mkdir(tmp_dir)
+ restored_file = os.path.join(tmp_dir, os.path.basename(basefile)) # this is final restored file
+ logging.info(f"Restore file: copying {basefile} to {restored_file}")
+ start = time.time()
+ copy_file(basefile, restored_file)
+ logging.info(f"File copied in {time.time() - start} s")
+ logging.info(f"Restoring gpkg file with {len(diffs)} diffs")
+ for diff in reversed(diffs):
+ sleep(0) # yield to gevent hub since geodiff action can take some time, and in case of a lot of diffs it could time out
+ changeset = os.path.join(self.project_dir, diff['location'])
+ try:
+ logging.info(f"Geodiff: apply changeset {changeset} of size {os.path.getsize(changeset)}")
+ start = time.time()
+ self.geodiff.apply_changeset(restored_file, changeset)
+ logging.info(f"Changeset applied in {time.time() - start} s")
+ except (GeoDiffLibError, GeoDiffLibConflictError) as e:
+ logging.exception(f"Failed to restore file: {str(e)} from project {self.project.namespace}/{self.project.name}")
+ return
+ # move final restored file to place where it is expected (only after it is successfully created)
+ logging.info(f"Copying restored file to expected location {file_found['location']}")
+ start = time.time()
+ copy_file(restored_file, os.path.join(self.project_dir, file_found['location']))
+ logging.info(f"File copied in {time.time() - start} s")
diff --git a/server/src/storages/storage.py b/server/src/storages/storage.py
new file mode 100644
index 00000000..3dd9b5c2
--- /dev/null
+++ b/server/src/storages/storage.py
@@ -0,0 +1,108 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+from urllib.parse import quote
+
+from flask import Response
+from requests_toolbelt import MultipartEncoder
+from gevent import sleep
+import zipfly
+
+
+class InvalidProject(Exception):
+ pass
+
+
+class FileNotFound(Exception):
+ pass
+
+
+class DataSyncError(Exception):
+ pass
+
+
+class InitializationError(Exception):
+ pass
+
+
+class StorageFile(object):
+ def __init__(self, storage, file):
+ self.storage = storage
+ self.file = file
+ self.fp = 0
+ self._stream = None
+
+ @property
+ def len(self):
+ if not hasattr(self, '_total_len'):
+ self._total_len = self.storage.file_size(self.file)
+ return self._total_len - self.fp
+
+ def read(self, chunk_size):
+ if not self._stream:
+ self._preload = b''
+ self._stream = self.storage.read_file(self.file, chunk_size)
+
+ data = self._preload
+ while len(data) < chunk_size:
+ try:
+ chunk = next(self._stream)
+ except StopIteration:
+ chunk = None
+ if not chunk:
+ self._preload = b''
+ self.fp += len(data)
+ return data
+ data += chunk
+
+ self._preload = data[chunk_size:]
+ data = data[:chunk_size]
+ self.fp += len(data)
+ return data
+
+
+class ProjectStorage:
+
+ def __init__(self, project):
+ self.project = project
+
+ def read_file(self, path, block_size=4096):
+ raise NotImplementedError
+
+ def file_size(self, file):
+ raise NotImplementedError
+
+ def file_path(self, file):
+ raise NotImplementedError
+
+ def restore_versioned_file(self, file, version):
+ raise NotImplementedError
+
+ def download_files(self, files, files_format=None, version=None):
+ """ Download files
+ :type files: list of dict
+ """
+ if version:
+ for f in files:
+ self.restore_versioned_file(f['path'], version)
+ if files_format == 'zip':
+ paths = [{'fs': self.file_path(f['location']), 'n': f['path']} for f in files]
+ z = zipfly.ZipFly(mode='w', paths=paths)
+ response = Response(z.generator(), mimetype='application/zip')
+ response.headers['Content-Disposition'] = 'attachment; filename={}{}.zip'.format(
+ quote(self.project.name.encode("utf-8")), '-' + version if version else '')
+ return response
+ files_dict = {}
+ for f in files:
+ path = f['path']
+ files_dict[path] = (path, StorageFile(self, f['location']))
+ encoder = MultipartEncoder(files_dict)
+
+ def _generator():
+ while True:
+ data = encoder.read(4096)
+ sleep(0)
+ if data:
+ yield data
+ else:
+ break
+ return Response(_generator(), mimetype=encoder.content_type)
diff --git a/server/src/templates/email/account_has_been_closed_warning.html b/server/src/templates/email/account_has_been_closed_warning.html
new file mode 100644
index 00000000..f28a90bd
--- /dev/null
+++ b/server/src/templates/email/account_has_been_closed_warning.html
@@ -0,0 +1,8 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ account_name }},
+
{% if account_type == "user" %} We're sorry you're leaving us. {% endif %} Your account has now been deactivated and will be permanently deleted in {{ days }} days.
+ Should you change your mind in the meantime, please contact us at {{ contact_email }} and we can reactivate your account for you.
+ After {{ days }} days your account will have been deleted and will not be recoverable.
+
+{% endblock %}
\ No newline at end of file
diff --git a/server/src/templates/email/components/base.html b/server/src/templates/email/components/base.html
new file mode 100644
index 00000000..fd97a251
--- /dev/null
+++ b/server/src/templates/email/components/base.html
@@ -0,0 +1,258 @@
+{% set base_url = config['MERGIN_BASE_URL'] or "https://public.cloudmergin.com" %}
+{% set logo_url = config['MERGIN_LOGO_URL'] %}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{# {% include 'email/components/logo.html' %}#}
+
+
+
+
+
+
+
+ {% if subject is defined %}
+
+
+
+
+
+
+ {{ subject }}
+
+
+
+
+
+ {% endif %}
+
+
+ {% block content %}
+
+
+
+
+
Dear user,
+
+
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
+
+
\ No newline at end of file
diff --git a/server/src/templates/email/components/content.html b/server/src/templates/email/components/content.html
new file mode 100644
index 00000000..50faf29c
--- /dev/null
+++ b/server/src/templates/email/components/content.html
@@ -0,0 +1,8 @@
+{% extends "email/components/base.html" %}
+{% import "email/components/content_row.html" as content_row with context %}
+
+{% block content %}
+ {% call content_row.content() %}
+ {% block html %} {% endblock %}
+ {% endcall %}
+{% endblock %}
\ No newline at end of file
diff --git a/server/src/templates/email/components/content_row.html b/server/src/templates/email/components/content_row.html
new file mode 100644
index 00000000..0a283eee
--- /dev/null
+++ b/server/src/templates/email/components/content_row.html
@@ -0,0 +1,9 @@
+{% macro content() -%}
+
+
+
+ {{ caller() }}
+
+
+
+{%- endmacro %}
\ No newline at end of file
diff --git a/server/src/templates/email/components/logo.html b/server/src/templates/email/components/logo.html
new file mode 100644
index 00000000..d65966a1
--- /dev/null
+++ b/server/src/templates/email/components/logo.html
@@ -0,0 +1,470 @@
+
\ No newline at end of file
diff --git a/server/src/templates/email/email_confirmation.html b/server/src/templates/email/email_confirmation.html
new file mode 100644
index 00000000..74cbc154
--- /dev/null
+++ b/server/src/templates/email/email_confirmation.html
@@ -0,0 +1,6 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ user.username }},
+
To verify your email address please follow this link:
You have been invited to join the organisation {{ invitation.org_name }} as
+ {% if invitation.role in ['owner', 'admin'] %}
+ an {{ invitation.role }}.
+ {% else %}
+ a {{ invitation.role }}.
+ {% endif %}
+
+ The invitation will expire on {{ invitation.expire.strftime('%Y-%m-%d %H:%M') }}.
+ You can manage your invitations here.
+
+{% endblock %}
diff --git a/server/src/templates/email/organisation_invitation_revoke.html b/server/src/templates/email/organisation_invitation_revoke.html
new file mode 100644
index 00000000..1e6ae237
--- /dev/null
+++ b/server/src/templates/email/organisation_invitation_revoke.html
@@ -0,0 +1,6 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ username }},
+
+
Your invitation to organisation {{ org_name }} has been revoked by its sender.
+{% endblock %}
diff --git a/server/src/templates/email/password_reset.html b/server/src/templates/email/password_reset.html
new file mode 100644
index 00000000..4dba00e2
--- /dev/null
+++ b/server/src/templates/email/password_reset.html
@@ -0,0 +1,6 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
+{% endblock %}
diff --git a/server/src/templates/email/project_access_request.html b/server/src/templates/email/project_access_request.html
new file mode 100644
index 00000000..68ee8fbd
--- /dev/null
+++ b/server/src/templates/email/project_access_request.html
@@ -0,0 +1,7 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ username }},
+
A user {{ user }} has requested access to the project {{ project_name }}.
+ This request will expire on {{ expire.strftime('%Y-%m-%d %H:%M') }}. You can manage your access requests here.
+
+{% endblock %}
diff --git a/server/src/templates/email/project_transfer_request.html b/server/src/templates/email/project_transfer_request.html
new file mode 100644
index 00000000..6bfb1445
--- /dev/null
+++ b/server/src/templates/email/project_transfer_request.html
@@ -0,0 +1,7 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ username }},
+
A request has been made to transfer the project {{ project_name }} from namespace {{ namespace_from }} to namespace {{ namescape_to }}.
+ This project transfer request will expire on {{ expire.strftime('%Y-%m-%d %H:%M') }}. You can manage your projects transfers here.
+
+{% endblock %}
diff --git a/server/src/templates/email/removed_project.html b/server/src/templates/email/removed_project.html
new file mode 100644
index 00000000..d230a692
--- /dev/null
+++ b/server/src/templates/email/removed_project.html
@@ -0,0 +1,5 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ username }},
+
Project {{ project.namespace }}/{{ project.name }} which you had access to has now been removed.
+{% endblock %}
diff --git a/server/src/templates/email/removed_project_access.html b/server/src/templates/email/removed_project_access.html
new file mode 100644
index 00000000..deb8731b
--- /dev/null
+++ b/server/src/templates/email/removed_project_access.html
@@ -0,0 +1,5 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ user.username }},
+
Your access to the Mergin project {{ project.namespace }}/{{ project.name }} has been removed.
+{% endblock %}
diff --git a/server/src/templates/email/simple_template.html b/server/src/templates/email/simple_template.html
new file mode 100644
index 00000000..08c3a1ad
--- /dev/null
+++ b/server/src/templates/email/simple_template.html
@@ -0,0 +1,5 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ username }},
+
{{ message }}
+{% endblock %}
diff --git a/server/src/templates/email/user_registration.html b/server/src/templates/email/user_registration.html
new file mode 100644
index 00000000..67aad4b1
--- /dev/null
+++ b/server/src/templates/email/user_registration.html
@@ -0,0 +1,6 @@
+{% extends "email/components/content.html" %}
+{% block html %}
+
Dear {{ user.username }},
+
Thank you! You have successfully registered with Mergin. To verify your email address please follow this link:
+
+
+
+
+
+
+
+ Some Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment. His many legs, pitifully thin compared with the size of the rest of him, waved about helplessly as he looked. "What's happened to me?" he thought.
+
+
+
+
+ Another Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment. His many legs, pitifully thin compared with the size of the rest of him, waved about helplessly as he looked. "What's happened to me?" he thought.
+
+
+
+
+ Some Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment.
+
+
+
+
+ Another Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment.
+
+
+
+
+ Another Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment.
+
+
+
+
+ Some Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment.
+
+
+
+
+ Another Title Here
+
+
+ One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment. One morning, when Gregor Samsa woke from troubled dreams, he found himself transformed in his bed into a horrible vermin. He lay on his armour-like back, and if he lifted his head a little he could see his brown belly, slightly domed and divided by arches into stiff sections. The bedding was hardly able to cover it and seemed ready to slide off any moment.
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/component/Icons.vue b/web-app/src/admin/views/dashboard/component/Icons.vue
new file mode 100644
index 00000000..e4cc1cbb
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/component/Icons.vue
@@ -0,0 +1,301 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ icon }}
+
+
+ {{ icon }}
+
+
+
+
+
+
+
+
+
+ mdi-material-design
+
+ See all icons
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/component/Notifications.vue b/web-app/src/admin/views/dashboard/component/Notifications.vue
new file mode 100644
index 00000000..4cd3a9da
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/component/Notifications.vue
@@ -0,0 +1,406 @@
+
+
+
+
+
+
+
+
+
+
+
+ This is a plain notification.
+
+
+
+ This is a notification with close button.
+
+
+
+ This is a notification with close button and icon and have many lines. You can see that the icon and the close button are always vertically aligned. This is a beautiful notification. So you don't have to worry about the style.
+
+
+
+ You can see that the icon and the close button are always vertically aligned. This is a beautiful notification. So you don't have to worry about the style.
+
+
+
+
+
+
+
+
+
+
+
+ — This is a regular alert made with the color of "{{ color }}"
+
+
+
+ PRIMARY — This is a regular alert made with the color "secondary"
+
+
+
+ PINK DARKEN-1 — This is a regular alert made with the color "pink darken-1"
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ dir }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Classic Dialog
+
+
+
+
+ Notice Modal
+
+
+
+
+ Small Alert Modal
+
+
+
+
+
+
+
+
+
+
+
+ Welcome to MATERIAL DASHBOARD — a beautiful admin panel for every web developer.
+
+
+
+
+
+ Dialog Title
+
+
+
+
+ mdi-close
+
+
+
+
+ Far far away, behind the word mountains, far from the countries Vokalia and Consonantia, there live the blind texts. Separated they live in Bookmarksgrove right at the coast of the Semantics, a large language ocean. A small river named Duden flows by their place and supplies it with the necessary regelialia. It is a paradisematic country, in which roasted parts of sentences fly into your mouth. Even the all-powerful Pointing has no control about the blind texts it is an almost unorthographic life One day however a small line of blind text by the name of Lorem Ipsum decided to leave for the far World of Grammar.
+
+
+
+
+
+
+ Close
+
+
+
+
+
+
+
+
+ How do you become an affiliate?
+
+
+
+
+ mdi-close
+
+
+
+
+
+
+
+
+ 1. Register
+
+
+
+ The first step is to create an account at Creative Tim. You can choose a social network or go for the classic version, whatever works best for you.
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 2. Apply
+
+
+
+ The first step is to create an account at Creative Tim. You can choose a social network or go for the classic version, whatever works best for you.
+
+
+
+
+
+
+
+
+
+
+
+ If you have more questions, don't hesitate to contact us or send us a tweet @creativetim. We're here to help!
+
+
+
+
+ Sounds good
+
+
+
+
+
+
+
+
+ Are you sure?
+
+
+
+
+ mdi-close
+
+
+
+
+
+ Nevermind
+
+
+
+ Yes
+
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/component/Tabs.vue b/web-app/src/admin/views/dashboard/component/Tabs.vue
new file mode 100644
index 00000000..fa990a60
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/component/Tabs.vue
@@ -0,0 +1,418 @@
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ tab }}
+
+
+
+
+
+
+ Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+ Dramatically visualize customer directed convergence without revolutionary ROI. Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
This is very nice.
+
+
+
+
+
+
+
+
+ Efficiently unleash cross-media information without cross-media value. Quickly maximize timely deliverables for real-time schemas.
+
+
+
+ Dramatically maintain clicks-and-mortar solutions without functional solutions.
+
+
+
+
+
+
+
+
+
+ Completely synergize resource taxing relationships via premier niche markets. Professionally cultivate one-to-one customer service with robust ideas.
+
+
+
+ Dynamically innovate resource-leveling customer service for state of the art customer service.
+
+ Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+ Dramatically visualize customer directed convergence without revolutionary ROI. Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
This is very nice.
+
+
+
+
+
+
+
+
+ Efficiently unleash cross-media information without cross-media value. Quickly maximize timely deliverables for real-time schemas.
+
+
+
+ Dramatically maintain clicks-and-mortar solutions without functional solutions.
+
+
+
+
+
+
+
+
+
+ Completely synergize resource taxing relationships via premier niche markets. Professionally cultivate one-to-one customer service with robust ideas.
+
+
+
+ Dynamically innovate resource-leveling customer service for state of the art customer service.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Collapsible Group Item #{{ n }}
+
+
+
+ Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ tab.text }}
+
+
+
+
+
+
+
+ Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+ Dramatically visualize customer directed convergence without revolutionary ROI. Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+ Dramatically visualize customer directed convergence without revolutionary ROI. Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+
+
+
+
+
+
+ Efficiently unleash cross-media information without cross-media value. Quickly maximize timely deliverables for real-time schemas.
+
+
+
+ Dramatically maintain clicks-and-mortar solutions without functional solutions.
+
+
+
+
+
+
+
+
+
+
+
+ Page Subcategories
+
+
+
+
+ {{ tab.text }}
+
+
+
+
+
+
+
+
+
+ More Information here
+
+
+
+
+ Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+ Dramatically visualize customer directed convergence without revolutionary ROI. Collaboratively administrate empowered markets via plug-and-play networks. Dynamically procrastinate B2C users after installed base benefits.
+
+
+
+
+
+
+
+
+
+
+ More Information here
+
+
+
+
+ Efficiently unleash cross-media information without cross-media value. Quickly maximize timely deliverables for real-time schemas.
+
+
+
+ Dramatically maintain clicks-and-mortar solutions without functional solutions
+
+
+
+
+
+
+
+
+
+
+ More Information here
+
+
+
+
+ Completely synergize resource taxing relationships via premier niche markets. Professionally cultivate one-to-one customer service with robust ideas.
+
+
+
+ Dynamically innovate resource-leveling customer service for state of the art customer service.
+
+
+
+
+
+
+
+
+
+
+ More Information here
+
+
+
+
+ From the seamless transition of glass and metal to the streamlined profile, every detail was carefully considered to enhance your experience. So while its display is larger, the phone feels just right.
+
+
+
+ Another Text. The first thing you notice when you hold the phone is how great it feels in your hand. The cover glass curves down around the sides to meet the anodized aluminum enclosure in a remarkable, simplified design.
+
+ Kanye West, Musician
+
+
+
+ Use 'small' tag for the headers
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/components/AccountDetail.vue b/web-app/src/admin/views/dashboard/components/AccountDetail.vue
new file mode 100644
index 00000000..c10f8e8f
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/components/AccountDetail.vue
@@ -0,0 +1,64 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+ edit
+ change storage
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/components/AdminProjectPermissions.vue b/web-app/src/admin/views/dashboard/components/AdminProjectPermissions.vue
new file mode 100644
index 00000000..15a4545b
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/components/AdminProjectPermissions.vue
@@ -0,0 +1,184 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+ This is public project
+
+
+ This is private project
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/components/ProjectsTable.vue b/web-app/src/admin/views/dashboard/components/ProjectsTable.vue
new file mode 100644
index 00000000..e2e1f59f
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/components/ProjectsTable.vue
@@ -0,0 +1,501 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/components/RemovedProjectsTable.vue b/web-app/src/admin/views/dashboard/components/RemovedProjectsTable.vue
new file mode 100644
index 00000000..a0a4a55b
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/components/RemovedProjectsTable.vue
@@ -0,0 +1,236 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/admin/views/dashboard/pages/OrganisationProfile.vue b/web-app/src/admin/views/dashboard/pages/OrganisationProfile.vue
new file mode 100644
index 00000000..0ba97a8d
--- /dev/null
+++ b/web-app/src/admin/views/dashboard/pages/OrganisationProfile.vue
@@ -0,0 +1,190 @@
+# Copyright (C) 2020 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+ Don't be scared of the truth because we need to restart the human foundation in truth And I love you like Kanye loves Kanye I love Rick Owens’ bed design but the back is...
+
+
+
+
+
+
diff --git a/web-app/src/components/FileIcon.vue b/web-app/src/components/FileIcon.vue
new file mode 100644
index 00000000..9724e3cf
--- /dev/null
+++ b/web-app/src/components/FileIcon.vue
@@ -0,0 +1,96 @@
+# Copyright (C) 2019 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
diff --git a/web-app/src/components/LoginDialog.vue b/web-app/src/components/LoginDialog.vue
new file mode 100644
index 00000000..dbd0d85f
--- /dev/null
+++ b/web-app/src/components/LoginDialog.vue
@@ -0,0 +1,285 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+ add_circle
+ Add
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/components/ProjectTransferForm.vue b/web-app/src/components/ProjectTransferForm.vue
new file mode 100644
index 00000000..5d05418f
--- /dev/null
+++ b/web-app/src/components/ProjectTransferForm.vue
@@ -0,0 +1,163 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+ Transfer project
+
+ It will move all files into new namespace once new owner accepts the request. You may lose access to it.
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {{ item.text }}
+
+
+
+
+
+
+
+
+ Close
+
+
+ Request transfer
+
+
+
+
+
+
+
diff --git a/web-app/src/components/ProjectsTable.vue b/web-app/src/components/ProjectsTable.vue
new file mode 100644
index 00000000..61de8e0f
--- /dev/null
+++ b/web-app/src/components/ProjectsTable.vue
@@ -0,0 +1,544 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
diff --git a/web-app/src/components/SideBar.vue b/web-app/src/components/SideBar.vue
new file mode 100644
index 00000000..8e8dd312
--- /dev/null
+++ b/web-app/src/components/SideBar.vue
@@ -0,0 +1,327 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/components/SideBarFooter.vue b/web-app/src/components/SideBarFooter.vue
new file mode 100644
index 00000000..0981ea4a
--- /dev/null
+++ b/web-app/src/components/SideBarFooter.vue
@@ -0,0 +1,88 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/organisation/components/OrganisationSideBar.vue b/web-app/src/organisation/components/OrganisationSideBar.vue
new file mode 100644
index 00000000..5936e2cb
--- /dev/null
+++ b/web-app/src/organisation/components/OrganisationSideBar.vue
@@ -0,0 +1,285 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/util.js b/web-app/src/util.js
new file mode 100644
index 00000000..58ee42c5
--- /dev/null
+++ b/web-app/src/util.js
@@ -0,0 +1,185 @@
+// Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+// Do not distribute without the express permission of the author.
+
+import Path from 'path'
+import diffInYears from 'date-fns/difference_in_years'
+import diffInMonths from 'date-fns/difference_in_months'
+import diffInWeeks from 'date-fns/difference_in_weeks'
+import diffInDays from 'date-fns/difference_in_days'
+import diffInHours from 'date-fns/difference_in_hours'
+import diffInMinutes from 'date-fns/difference_in_minutes'
+
+
+export function dirname (path) {
+ const dir = Path.dirname(path).replace(/\/$/, '')
+ return dir === '.' ? '' : dir
+}
+
+export function removeAccents (text) {
+ return text.normalize('NFD').replace(/[\u0300-\u036f]/g, '')
+}
+
+const SizeUnits = {
+ GB: value => (value / 1073741824).toFixed(2),
+ MB: value => (value / 1048576).toFixed(2),
+ kB: value => (value / 1024).toFixed(0),
+ B: value => Math.round(value)
+}
+
+export function formatFileSize (value, unit, digits = 2, minUnit = 'B') {
+ if (!value) {
+ return `${value} MB`
+ }
+ if (!unit) {
+ if (value >= 1073741824 || minUnit === 'GB') {
+ unit = 'GB'
+ } else if (value >= 1048576 || minUnit === 'MB') {
+ unit = 'MB'
+ } else if (value >= 1024 || minUnit === 'kB') {
+ unit = 'kB'
+ } else {
+ unit = 'B'
+ }
+ }
+ let unitsValue = parseFloat(SizeUnits[unit](value))
+ unitsValue = unitsValue === 0 ? Math.pow(10, -digits) : unitsValue
+ return `${unitsValue.toFixed(digits)} ${unit}`
+}
+
+export function formatDateTime (isoString) {
+ return isoString ? new Date(isoString).toUTCString() : ''
+}
+
+export function formatDate (isoString) {
+ return isoString ? new Date(isoString).toDateString() : ''
+}
+
+const DurationKeys = {
+ years: ['year', 'years'],
+ months: ['month', 'months'],
+ weeks: ['week', 'weeks'],
+ days: ['day', 'days'],
+ hours: ['hour', 'hours'],
+ minutes: ['minute', 'minutes']
+}
+
+function formatDuration (num, unit) {
+ return `${num} ${DurationKeys[unit][num === 1 ? 0 : 1]} ago`
+}
+
+function remainingFormatDuration (num, unit) {
+ return `${num} ${DurationKeys[unit][num === 1 ? 0 : 1]}`
+}
+
+export function formatTimeDiff (t1, t2 = new Date()) {
+ if (!t1) {
+ return '-'
+ }
+ const days = diffInDays(t2, t1)
+ if (days > 365) {
+ return formatDuration(diffInYears(t2, t1), 'years')
+ }
+ if (days > 31) {
+ return formatDuration(diffInMonths(t2, t1), 'months')
+ }
+ if (days > 6) {
+ return formatDuration(diffInWeeks(t2, t1), 'weeks')
+ }
+ if (days < 1) {
+ const hours = diffInHours(t2, t1)
+ if (hours < 1) {
+ const minutes = diffInMinutes(t2, t1)
+ if (minutes < 0) {
+ return 'N/A'
+ }
+ return formatDuration(minutes, 'minutes')
+ }
+ return formatDuration(hours, 'hours')
+ }
+ return formatDuration(days, 'days')
+}
+
+export function formatRemainingTime (t2, t1 = new Date()) {
+ if (!t1) {
+ return '-'
+ }
+ const days = diffInDays(t2, t1)
+ const hours = diffInHours(t2, t1)
+ switch (true) {
+ case (days > 365):
+ return remainingFormatDuration(diffInYears(t2, t1), 'years')
+ case (days > 31):
+ return remainingFormatDuration(diffInMonths(t2, t1), 'months')
+ case (days > 6):
+ return remainingFormatDuration(diffInWeeks(t2, t1), 'weeks')
+ case (days < 1):
+ if (days < 0) { return 'expired' }
+ if (hours < 1) {
+ const minutes = diffInMinutes(t2, t1)
+ return remainingFormatDuration(minutes, 'minutes')
+ }
+ return remainingFormatDuration(hours, 'hours')
+ default:
+ return remainingFormatDuration(days, 'days')
+ }
+}
+
+export function waitCursor (on) {
+ document.body.style.cursor = (on) ? 'wait' : 'default'
+}
+
+export function downloadJsonList (jsonList, headers, filename, output = 'csv') {
+ /** download json as csv, but we need to put it in one level
+ :param jsonList: dict **/
+ if (!jsonList || !headers) {
+ return
+ }
+ headers = headers.filter(h => h.value)
+ const headerTexts = Object.keys(headers).map(function (key) {
+ return headers[key].text
+ })
+ const headerValues = Object.keys(headers).map(function (key) {
+ return headers[key].value
+ })
+ if (output === 'csv') {
+ let content = headerTexts.join(';') + '\n'
+ jsonList.forEach(function (row) {
+ let currentContent = ''
+ headerValues.forEach(function (header) {
+ const headerSplit = header.split('.')
+ if (headerSplit.length > 1) {
+ currentContent += (row[headerSplit[0]][headerSplit[1]] !== null ? row[headerSplit[0]][headerSplit[1]] : '') + ';'
+ } else {
+ currentContent += (row[header] !== null ? row[header] : '') + ';'
+ }
+ })
+ content += currentContent + '\n'
+ })
+ const hiddenElement = document.createElement('a')
+ hiddenElement.href = 'data:text/csv;charset=utf-8,' + encodeURI(content)
+ hiddenElement.target = '_blank'
+ hiddenElement.download = filename + '.csv'
+ hiddenElement.click()
+ document.removeChild(hiddenElement)
+ }
+}
+
+export function formatToTitle (string) {
+ string = string.replace(/_/g, ' ')
+ return string.charAt(0).toUpperCase() + string.slice(1)
+}
+
+export function formatToCurrency (value, currency) {
+ return value.toLocaleString('en-UK', {
+ style: 'currency',
+ currency: currency,
+ currencySign: 'accounting'
+ })
+}
+
+export class CustomError extends Error {
+ constructor (message, response) {
+ super(message)
+ this.response = response
+ }
+}
diff --git a/web-app/src/views/Dashboard.vue b/web-app/src/views/Dashboard.vue
new file mode 100644
index 00000000..d8586ca8
--- /dev/null
+++ b/web-app/src/views/Dashboard.vue
@@ -0,0 +1,233 @@
+# Copyright (C) 2021 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+ Your email hasn't been confirmed yet
+ Send confirmation email
+
+
+
+
+
+ Your storage is almost full ({{usage}}%). Soon you will not able to sync your projects.
+
+
+
+
+
Welcome {{app.user.username}}, are you ready to start?
+
First create new project, add people to your organisation or explore public project for more
+ inspiration
+ New project
+
+
+
+
+
+
Download Input app
+
Capture geo-info easily through your mobile/tablet with the Input app.
+ Designed to be compatible with all mobile devices - even those with small screens.
+
+
+
+ Well done! The next step is adding some data.
+
+ There are two options:
+ 1. Use the Mergin plugin for QGIS to upload data. This is the easiest and recommended way. See the documentation on how to install and use the plugin.
+ 2. Drag and drop files from your computer to the lower part of this page. This option is convenient if your project files are already fully prepared and only need uploading.
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/FileDetail.vue b/web-app/src/views/FileDetail.vue
new file mode 100644
index 00000000..1f25dda3
--- /dev/null
+++ b/web-app/src/views/FileDetail.vue
@@ -0,0 +1,208 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/FileVersionDetail.vue b/web-app/src/views/FileVersionDetail.vue
new file mode 100644
index 00000000..9f7970fc
--- /dev/null
+++ b/web-app/src/views/FileVersionDetail.vue
@@ -0,0 +1,83 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
{{ item[header['value']]}}
+
+
+
+
+
+
+
diff --git a/web-app/src/views/PageView.vue b/web-app/src/views/PageView.vue
new file mode 100644
index 00000000..3933617a
--- /dev/null
+++ b/web-app/src/views/PageView.vue
@@ -0,0 +1,48 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
+
+
+ Version: {{ app.version }}
+
+
+
+
+
+
+
diff --git a/web-app/src/views/Project.vue b/web-app/src/views/Project.vue
new file mode 100644
index 00000000..56623ae1
--- /dev/null
+++ b/web-app/src/views/Project.vue
@@ -0,0 +1,354 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
+
+
+
+ archive
+ Download
+
+
+ fa-clone
+ Clone
+
+
+
+
+
+
+
+ Files
+ History
+ Settings
+
+
+
+
+
+
+
+
+ Request access
+
+ This is a private project
+ You don't have permissions to access this project.
+
+
+
+
+
+ Project not found
+ Please check if address is written correctly
+
+
+
+
+ You don't have permission to access this project
+ You already requested access
+
+
+
+
+
+ publish Drag & drop here or click and select file(s) to upload
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/ProjectSettings.vue b/web-app/src/views/ProjectSettings.vue
new file mode 100644
index 00000000..469985d1
--- /dev/null
+++ b/web-app/src/views/ProjectSettings.vue
@@ -0,0 +1,263 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+ {{ error }}
+
+
+
+
+
+
+
+ This is public project
+ Hide this project from everyone.
+
+
+ This is private project
+ Make this project visible to anyone.
+
+
+
+
+ Make private
+ Make public
+
+
+
+
+
+
+ Transfer project
+ Transfer this project to another user or organisation
+
+
+
+ Transfer
+
+
+
+
+
+
+ Delete project
+ All data will be lost
+
+
+
+ Delete
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/ProjectVersions.vue b/web-app/src/views/ProjectVersions.vue
new file mode 100644
index 00000000..5dcdf206
--- /dev/null
+++ b/web-app/src/views/ProjectVersions.vue
@@ -0,0 +1,163 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/ProjectsList.vue b/web-app/src/views/ProjectsList.vue
new file mode 100644
index 00000000..9e42dee3
--- /dev/null
+++ b/web-app/src/views/ProjectsList.vue
@@ -0,0 +1,118 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+ {{ header }}
+ chevron_right {{ namespace }}
+
+
+
+
+
+
+
+ add_circle
+ Create
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/Registration.vue b/web-app/src/views/Registration.vue
new file mode 100644
index 00000000..11738d6f
--- /dev/null
+++ b/web-app/src/views/Registration.vue
@@ -0,0 +1,130 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Create your account
+
+ Sign in
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/web-app/src/views/VersionDetail.vue b/web-app/src/views/VersionDetail.vue
new file mode 100644
index 00000000..31e16185
--- /dev/null
+++ b/web-app/src/views/VersionDetail.vue
@@ -0,0 +1,227 @@
+# Copyright (C) 2018 Lutra Consulting Limited. All rights reserved.
+# Do not distribute without the express permission of the author.
+
+
+