diff --git a/CHANGELOG.md b/CHANGELOG.md index 0458ab5d..344fe29a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ v1.6.0 - unreleased - Feature: Dynamic attachment size limit ([#731](https://github.com/Mailu/Mailu/issues/731)) - Feature: Certificate watcher for external certs to reload nginx ([#732](https://github.com/Mailu/Mailu/issues/732)) - Feature: Kubernetes +- Feature: Supports postgresql and mysql database backends ([#420](https://github.com/Mailu/Mailu/issues/420)) - Enhancement: Use pre-defined dhparam ([#322](https://github.com/Mailu/Mailu/issues/322)) - Enhancement: Disable ssl_session_tickets ([#329](https://github.com/Mailu/Mailu/issues/329)) - Enhancement: max attachment size in roundcube ([#338](https://github.com/Mailu/Mailu/issues/338)) diff --git a/core/admin/Dockerfile b/core/admin/Dockerfile index 95a0705a..33c0bde7 100644 --- a/core/admin/Dockerfile +++ b/core/admin/Dockerfile @@ -8,8 +8,9 @@ RUN mkdir -p /app WORKDIR /app COPY requirements-prod.txt requirements.txt -RUN apk add --no-cache openssl curl \ - && apk add --no-cache --virtual build-dep openssl-dev libffi-dev python3-dev build-base \ +RUN apk add --no-cache libressl curl postgresql-libs mariadb-connector-c \ + && apk add --no-cache --virtual build-dep \ + libressl-dev libffi-dev python3-dev build-base postgresql-dev mariadb-connector-c-dev \ && pip3 install -r requirements.txt \ && apk del --no-cache build-dep diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py index 10bf22ae..95004017 100644 --- a/core/admin/mailu/configuration.py +++ b/core/admin/mailu/configuration.py @@ -3,8 +3,6 @@ import os DEFAULT_CONFIG = { # Specific to the admin UI - 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db', - 'SQLALCHEMY_TRACK_MODIFICATIONS': False, 'DOCKER_SOCKET': 'unix:///var/run/docker.sock', 'BABEL_DEFAULT_LOCALE': 'en', 'BABEL_DEFAULT_TIMEZONE': 'UTC', @@ -14,6 +12,14 @@ DEFAULT_CONFIG = { 'DEBUG': False, 'DOMAIN_REGISTRATION': False, 'TEMPLATES_AUTO_RELOAD': True, + # Database settings + 'DB_FLAVOR': None, + 'DB_USER': 'mailu', + 'DB_PW': None, + 'DB_HOST': 'database', + 'DB_NAME': 'mailu', + 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db', + 'SQLALCHEMY_TRACK_MODIFICATIONS': False, # Statistics management 'INSTANCE_ID_PATH': '/data/instance', 'STATS_ENDPOINT': '0.{}.stats.mailu.io', @@ -59,15 +65,27 @@ class ConfigManager(dict): """ Naive configuration manager that uses environment only """ + DB_TEMPLATES = { + 'sqlite': 'sqlite:////{DB_HOST}', + 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}', + 'mysql': 'mysql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}' + } + def __init__(self): self.config = dict() def init_app(self, app): self.config.update(app.config) + # get environment variables self.config.update({ key: os.environ.get(key, value) for key, value in DEFAULT_CONFIG.items() }) + # automatically set the sqlalchemy string + if self.config['DB_FLAVOR']: + template = self.DB_TEMPLATES[self.config['DB_FLAVOR']] + self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config) + # update the app config itself app.config = self def setdefault(self, key, value): diff --git a/core/admin/mailu/models.py b/core/admin/mailu/models.py index eee351bf..42aecbf0 100644 --- a/core/admin/mailu/models.py +++ b/core/admin/mailu/models.py @@ -27,7 +27,7 @@ class IdnaDomain(db.TypeDecorator): impl = db.String(80) def process_bind_param(self, value, dialect): - return idna.encode(value).decode("ascii") + return idna.encode(value).decode("ascii").lower() def process_result_value(self, value, dialect): return idna.decode(value) @@ -37,7 +37,7 @@ class IdnaEmail(db.TypeDecorator): """ Stores a Unicode string in it's IDNA representation (ASCII only) """ - impl = db.String(255, collation="NOCASE") + impl = db.String(255) def process_bind_param(self, value, dialect): try: @@ -45,7 +45,7 @@ class IdnaEmail(db.TypeDecorator): return "{0}@{1}".format( localpart, idna.encode(domain_name).decode('ascii'), - ) + ).lower() except ValueError: pass @@ -88,32 +88,39 @@ class JSONEncoded(db.TypeDecorator): return json.loads(value) if value else None -class Config(db.Model): - """ In-database configuration values - """ - - name = db.Column(db.String(255), primary_key=True, nullable=False) - value = db.Column(JSONEncoded) - - -# Many-to-many association table for domain managers -managers = db.Table('manager', - db.Column('domain_name', IdnaDomain, db.ForeignKey('domain.name')), - db.Column('user_email', IdnaEmail, db.ForeignKey('user.email')) -) - - class Base(db.Model): """ Base class for all models """ __abstract__ = True + metadata = sqlalchemy.schema.MetaData( + naming_convention={ + "fk": "%(table_name)s_%(column_0_name)s_fkey", + "pk": "%(table_name)s_pkey" + } + ) + created_at = db.Column(db.Date, nullable=False, default=datetime.now) updated_at = db.Column(db.Date, nullable=True, onupdate=datetime.now) comment = db.Column(db.String(255), nullable=True) +# Many-to-many association table for domain managers +managers = db.Table('manager', Base.metadata, + db.Column('domain_name', IdnaDomain, db.ForeignKey('domain.name')), + db.Column('user_email', IdnaEmail, db.ForeignKey('user.email')) +) + + +class Config(Base): + """ In-database configuration values + """ + + name = db.Column(db.String(255), primary_key=True, nullable=False) + value = db.Column(JSONEncoded) + + class Domain(Base): """ A DNS domain that has mail addresses associated to it. """ @@ -201,7 +208,7 @@ class Relay(Base): __tablename__ = "relay" - name = db.Column(db.String(80), primary_key=True, nullable=False) + name = db.Column(IdnaDomain, primary_key=True, nullable=False) smtp = db.Column(db.String(80), nullable=True) def __str__(self): @@ -318,7 +325,7 @@ class User(Base, Email): # Settings displayed_name = db.Column(db.String(160), nullable=False, default="") spam_enabled = db.Column(db.Boolean(), nullable=False, default=True) - spam_threshold = db.Column(db.Integer(), nullable=False, default=80.0) + spam_threshold = db.Column(db.Integer(), nullable=False, default=80) # Flask-login attributes is_authenticated = True diff --git a/core/admin/migrations/env.py b/core/admin/migrations/env.py index 45938160..cdfd2248 100755 --- a/core/admin/migrations/env.py +++ b/core/admin/migrations/env.py @@ -3,6 +3,8 @@ from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig import logging +import tenacity +from tenacity import retry # this is the Alembic Config object, which provides # access to the values within the .ini file in use. @@ -20,7 +22,9 @@ logger = logging.getLogger('alembic.env') from flask import current_app config.set_main_option('sqlalchemy.url', current_app.config.get('SQLALCHEMY_DATABASE_URI')) -target_metadata = current_app.extensions['migrate'].db.metadata +#target_metadata = current_app.extensions['migrate'].db.metadata +from mailu import models +target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: @@ -69,7 +73,14 @@ def run_migrations_online(): prefix='sqlalchemy.', poolclass=pool.NullPool) - connection = engine.connect() + connection = tenacity.Retrying( + stop=tenacity.stop_after_attempt(100), + wait=tenacity.wait_random(min=2, max=5), + before=tenacity.before_log(logging.getLogger("tenacity.retry"), logging.DEBUG), + before_sleep=tenacity.before_sleep_log(logging.getLogger("tenacity.retry"), logging.INFO), + after=tenacity.after_log(logging.getLogger("tenacity.retry"), logging.DEBUG) + ).call(engine.connect) + context.configure(connection=connection, target_metadata=target_metadata, process_revision_directives=process_revision_directives, diff --git a/core/admin/migrations/versions/049fed905da7_.py b/core/admin/migrations/versions/049fed905da7_.py index ce410844..d8af41d3 100644 --- a/core/admin/migrations/versions/049fed905da7_.py +++ b/core/admin/migrations/versions/049fed905da7_.py @@ -16,10 +16,10 @@ import sqlalchemy as sa def upgrade(): with op.batch_alter_table('user') as batch: - batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE")) + batch.alter_column('email', type_=sa.String(length=255), nullable=False) def downgrade(): with op.batch_alter_table('user') as batch: - batch.alter_column('email', type_=sa.String(length=255)) + batch.alter_column('email', type_=sa.String(length=255), nullable=False) diff --git a/core/admin/migrations/versions/27ae2f102682_.py b/core/admin/migrations/versions/27ae2f102682_.py index f821aff2..8902a17e 100644 --- a/core/admin/migrations/versions/27ae2f102682_.py +++ b/core/admin/migrations/versions/27ae2f102682_.py @@ -35,7 +35,7 @@ def upgrade(): ) # set default to 80% with op.batch_alter_table('user') as batch: - batch.alter_column('spam_threshold', default=80.) + batch.alter_column('spam_threshold', server_default="80.") def downgrade(): connection = op.get_bind() @@ -50,4 +50,4 @@ def downgrade(): ) # set default to 10/15 with op.batch_alter_table('user') as batch: - batch.alter_column('spam_threshold', default=10.) + batch.alter_column('spam_threshold', server_default="10.") diff --git a/core/admin/migrations/versions/546b04c886f0_.py b/core/admin/migrations/versions/546b04c886f0_.py new file mode 100644 index 00000000..35534aa0 --- /dev/null +++ b/core/admin/migrations/versions/546b04c886f0_.py @@ -0,0 +1,79 @@ +""" Fix constraint naming by addint a name to all constraints + +Revision ID: 546b04c886f0 +Revises: 5aeb5811408e +Create Date: 2018-12-08 16:33:37.757634 + +""" + +# revision identifiers, used by Alembic. +revision = '546b04c886f0' +down_revision = 'cd79ed46d9c2' + +from alembic import op, context +import sqlalchemy as sa + + +def upgrade(): + # Only run this for somehow supported data types at the date we started naming constraints + # Among others, these will probably fail on MySQL + if context.get_bind().engine.name not in ('sqlite', 'postgresql'): + return + + metadata = context.get_context().opts['target_metadata'] + + # Drop every constraint on every table + with op.batch_alter_table('alias', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('alias_pkey', type_="primary") + batch_op.drop_constraint('alias_domain_name_fkey', type_="foreignkey") + with op.batch_alter_table('alternative', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('alternative_pkey', type_="primary") + batch_op.drop_constraint('alternative_domain_name_fkey', type_="foreignkey") + with op.batch_alter_table('manager', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('manager_domain_name_fkey', type_="foreignkey") + batch_op.drop_constraint('manager_user_email_fkey', type_="foreignkey") + with op.batch_alter_table('token', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('token_pkey', type_="primary") + batch_op.drop_constraint('token_user_email_fkey', type_="foreignkey") + with op.batch_alter_table('fetch', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('fetch_pkey', type_="primary") + batch_op.drop_constraint('fetch_user_email_fkey', type_="foreignkey") + with op.batch_alter_table('relay', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('relay_pkey', type_="primary") + with op.batch_alter_table('config', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('config_pkey', type_="primary") + with op.batch_alter_table('user', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('user_pkey', type_="primary") + batch_op.drop_constraint('user_domain_name_fkey', type_="foreignkey") + with op.batch_alter_table('domain', naming_convention=metadata.naming_convention) as batch_op: + batch_op.drop_constraint('domain_pkey', type_="primary") + + # Recreate constraints with proper names + with op.batch_alter_table('domain', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('domain_pkey', ['name']) + with op.batch_alter_table('alias', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('alias_pkey', ['email']) + batch_op.create_foreign_key('alias_domain_name_fkey', 'domain', ['domain_name'], ['name']) + with op.batch_alter_table('user', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('user_pkey', ['email']) + batch_op.create_foreign_key('user_domain_name_fkey', 'domain', ['domain_name'], ['name']) + with op.batch_alter_table('alternative', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('alternative_pkey', ['name']) + batch_op.create_foreign_key('alternative_domain_name_fkey', 'domain', ['domain_name'], ['name']) + with op.batch_alter_table('manager', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_foreign_key('manager_domain_name_fkey', 'domain', ['domain_name'], ['name']) + batch_op.create_foreign_key('manager_user_email_fkey', 'user', ['user_email'], ['email']) + with op.batch_alter_table('token', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('token_pkey', ['id']) + batch_op.create_foreign_key('token_user_email_fkey', 'user', ['user_email'], ['email']) + with op.batch_alter_table('fetch', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('fetch_pkey', ['id']) + batch_op.create_foreign_key('fetch_user_email_fkey', 'user', ['user_email'], ['email']) + with op.batch_alter_table('relay', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('relay_pkey', ['name']) + with op.batch_alter_table('config', naming_convention=metadata.naming_convention) as batch_op: + batch_op.create_primary_key('config_pkey', ['name']) + + +def downgrade(): + pass diff --git a/core/admin/migrations/versions/5aeb5811408e_.py b/core/admin/migrations/versions/5aeb5811408e_.py new file mode 100644 index 00000000..2ea06ea9 --- /dev/null +++ b/core/admin/migrations/versions/5aeb5811408e_.py @@ -0,0 +1,142 @@ +""" Convert all domains and emails to lowercase + +Revision ID: 5aeb5811408e +Revises: cd79ed46d9c2 +Create Date: 2018-12-06 16:07:23.380579 + +""" + +# revision identifiers, used by Alembic. +revision = '5aeb5811408e' +down_revision = 'f1393877871d' + +from alembic import op, config +import sqlalchemy as sa + + +name_column = lambda: sa.Column('name', sa.String(80), primary_key=True) +domain_name_column = lambda: sa.Column('domain_name', sa.String(80)) +user_email_column = lambda: sa.Column('user_email', sa.String(255)) +email_columns = lambda: [ + sa.Column('email', sa.String(255), primary_key=True), + sa.Column('localpart', sa.String(80)), + domain_name_column() +] +id_columns = lambda: [ + sa.Column('id', sa.Integer(), primary_key=True), + user_email_column() +] + + +domain_table = sa.Table('domain', sa.MetaData(), name_column()) +relay_table = sa.Table('relay', sa.MetaData(), name_column()) +alternative_table = sa.Table('alternative', sa.MetaData(), name_column(), domain_name_column()) +user_table = sa.Table('user', sa.MetaData(), *email_columns()) +alias_table = sa.Table('alias', sa.MetaData(), *email_columns()) +fetch_table = sa.Table('fetch', sa.MetaData(), *id_columns()) +token_table = sa.Table('token', sa.MetaData(), *id_columns()) +manager_table = sa.Table('manager', sa.MetaData(), domain_name_column(), user_email_column()) + + +def upgrade(): + connection = op.get_bind() + + # drop foreign key constraints + with op.batch_alter_table('alias') as batch_op: + batch_op.drop_constraint('alias_domain_name_fkey', type_='foreignkey') + with op.batch_alter_table('alternative') as batch_op: + batch_op.drop_constraint('alternative_domain_name_fkey', type_='foreignkey') + with op.batch_alter_table('manager') as batch_op: + batch_op.drop_constraint('manager_domain_name_fkey', type_='foreignkey') + batch_op.drop_constraint('manager_user_email_fkey', type_='foreignkey') + with op.batch_alter_table('token') as batch_op: + batch_op.drop_constraint('token_user_email_fkey', type_='foreignkey') + with op.batch_alter_table('fetch') as batch_op: + batch_op.drop_constraint('fetch_user_email_fkey', type_='foreignkey') + with op.batch_alter_table('user') as batch_op: + batch_op.drop_constraint('user_domain_name_fkey', type_='foreignkey') + + # lower domain names + for domain in connection.execute(domain_table.select()): + connection.execute(domain_table.update().where( + domain_table.c.name == domain.name + ).values( + name=domain.name.lower() + )) + # lower alternatives + for alternative in connection.execute(alternative_table.select()): + connection.execute(alternative_table.update().where( + alternative_table.c.name == alternative.name + ).values( + name=alternative.name.lower(), + domain_name=alternative.domain_name.lower() + )) + # lower users + for user in connection.execute(user_table.select()): + connection.execute(user_table.update().where( + user_table.c.email == user.email + ).values( + email=user.email.lower(), + localpart=user.localpart.lower(), + domain_name=user.domain_name.lower() + )) + # lower aliases + for alias in connection.execute(alias_table.select()): + connection.execute(alias_table.update().where( + alias_table.c.email == alias.email + ).values( + email=alias.email.lower(), + localpart=alias.localpart.lower(), + domain_name=alias.domain_name.lower() + )) + # lower fetches + for fetch in connection.execute(fetch_table.select()): + connection.execute(fetch_table.update().where( + fetch_table.c.id == fetch.id + ).values( + user_email=fetch.user_email.lower() + )) + # lower tokens + for token in connection.execute(token_table.select()): + connection.execute(token_table.update().where( + token_table.c.id == token.id + ).values( + user_email=token.user_email.lower() + )) + # lower relays + for relay in connection.execute(relay_table.select()): + connection.execute(relay_table.update().where( + relay_tbale.c.name == relay.name + ).values( + name=relay.name.lower() + )) + # lower managers + for manager in connection.execute(manager_table.select()): + connection.execute(manager_table.update().where( + sa.and_( + manager_table.c.domain_name == manager.domain_name, + manager_table.c.user_email == manager.user_email + ) + ).values( + domain_name=manager.domain_name.lower(), + user_email=manager.user_email.lower() + )) + + # restore foreign key constraints + with op.batch_alter_table('alias') as batch_op: + batch_op.create_foreign_key('alias_domain_name_fkey', 'domain', ['domain_name'], ['name']) + with op.batch_alter_table('user') as batch_op: + batch_op.create_foreign_key('user_domain_name_fkey', 'domain', ['domain_name'], ['name']) + with op.batch_alter_table('alternative') as batch_op: + batch_op.create_foreign_key('alternative_domain_name_fkey', 'domain', ['domain_name'], ['name']) + with op.batch_alter_table('manager') as batch_op: + batch_op.create_foreign_key('manager_domain_name_fkey', 'domain', ['domain_name'], ['name']) + batch_op.create_foreign_key('manager_user_email_fkey', 'user', ['user_email'], ['email']) + with op.batch_alter_table('token') as batch_op: + batch_op.create_foreign_key('token_user_email_fkey', 'user', ['user_email'], ['email']) + with op.batch_alter_table('fetch') as batch_op: + batch_op.create_foreign_key('fetch_user_email_fkey', 'user', ['user_email'], ['email']) + + +def downgrade(): + pass diff --git a/core/admin/migrations/versions/9400a032eb1a_.py b/core/admin/migrations/versions/9400a032eb1a_.py index f629a7eb..18379b4d 100644 --- a/core/admin/migrations/versions/9400a032eb1a_.py +++ b/core/admin/migrations/versions/9400a032eb1a_.py @@ -23,8 +23,8 @@ def upgrade(): sa.Column('user_email', sa.String(length=255), nullable=False), sa.Column('password', sa.String(length=255), nullable=False), sa.Column('ip', sa.String(length=255), nullable=True), - sa.ForeignKeyConstraint(['user_email'], ['user.email'], ), - sa.PrimaryKeyConstraint('id') + sa.ForeignKeyConstraint(['user_email'], ['user.email'], name=op.f('token_user_email_fkey')), + sa.PrimaryKeyConstraint('id', name=op.f('token_pkey')) ) diff --git a/core/admin/migrations/versions/9c28df23f77e_.py b/core/admin/migrations/versions/9c28df23f77e_.py index edf8d8ef..ba3e0098 100644 --- a/core/admin/migrations/versions/9c28df23f77e_.py +++ b/core/admin/migrations/versions/9c28df23f77e_.py @@ -16,13 +16,13 @@ import sqlalchemy as sa def upgrade(): with op.batch_alter_table('user') as batch: - batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE")) + batch.alter_column('email', type_=sa.String(length=255), nullable=False) with op.batch_alter_table('alias') as batch: - batch.alter_column('email', type_=sa.String(length=255, collation="NOCASE")) + batch.alter_column('email', type_=sa.String(length=255), nullable=False) def downgrade(): with op.batch_alter_table('user') as batch: - batch.alter_column('email', type_=sa.String(length=255)) + batch.alter_column('email', type_=sa.String(length=255), nullable=False) with op.batch_alter_table('alias') as batch: - batch.alter_column('email', type_=sa.String(length=255)) + batch.alter_column('email', type_=sa.String(length=255), nullable=False) diff --git a/core/admin/migrations/versions/c162ac88012a_.py b/core/admin/migrations/versions/c162ac88012a_.py index 914ba662..3a2fad06 100644 --- a/core/admin/migrations/versions/c162ac88012a_.py +++ b/core/admin/migrations/versions/c162ac88012a_.py @@ -21,7 +21,7 @@ def upgrade(): sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('smtp', sa.String(length=80), nullable=True), - sa.PrimaryKeyConstraint('name') + sa.PrimaryKeyConstraint('name', name=op.f('relay_pkey')) ) diff --git a/core/admin/migrations/versions/c9a0b4e653cf_.py b/core/admin/migrations/versions/c9a0b4e653cf_.py index 8882d079..45058442 100644 --- a/core/admin/migrations/versions/c9a0b4e653cf_.py +++ b/core/admin/migrations/versions/c9a0b4e653cf_.py @@ -21,8 +21,8 @@ def upgrade(): sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('domain_name', sa.String(length=80), nullable=True), - sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), - sa.PrimaryKeyConstraint('name') + sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], name=op.f('alternative_domain_name_fkey')), + sa.PrimaryKeyConstraint('name', name=op.f('alternative_pkey')) ) diff --git a/core/admin/migrations/versions/cd79ed46d9c2_.py b/core/admin/migrations/versions/cd79ed46d9c2_.py index ccf210fe..72e68234 100644 --- a/core/admin/migrations/versions/cd79ed46d9c2_.py +++ b/core/admin/migrations/versions/cd79ed46d9c2_.py @@ -17,7 +17,7 @@ def upgrade(): op.create_table('config', sa.Column('name', sa.String(length=255), nullable=False), sa.Column('value', sa.String(length=255), nullable=True), - sa.PrimaryKeyConstraint('name') + sa.PrimaryKeyConstraint('name', name=op.f('config_pkey')) ) diff --git a/core/admin/migrations/versions/f1393877871d_.py b/core/admin/migrations/versions/f1393877871d_.py new file mode 100644 index 00000000..a692e48f --- /dev/null +++ b/core/admin/migrations/versions/f1393877871d_.py @@ -0,0 +1,28 @@ +""" Add default columns to the configuration table + +Revision ID: f1393877871d +Revises: 546b04c886f0 +Create Date: 2018-12-09 16:15:42.317104 + +""" + +# revision identifiers, used by Alembic. +revision = 'f1393877871d' +down_revision = '546b04c886f0' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + with op.batch_alter_table('config') as batch_op: + batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True)) + batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01')) + batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True)) + + +def downgrade(): + with op.batch_alter_table('config') as batch_op: + batch_op.drop_column('updated_at') + batch_op.drop_column('created_at') + batch_op.drop_column('comment') diff --git a/core/admin/migrations/versions/ff0417f4318f_.py b/core/admin/migrations/versions/ff0417f4318f_.py index ca3b6d5a..7c92f241 100644 --- a/core/admin/migrations/versions/ff0417f4318f_.py +++ b/core/admin/migrations/versions/ff0417f4318f_.py @@ -22,18 +22,18 @@ def upgrade(): sa.Column('name', sa.String(length=80), nullable=False), sa.Column('max_users', sa.Integer(), nullable=False), sa.Column('max_aliases', sa.Integer(), nullable=False), - sa.PrimaryKeyConstraint('name') + sa.PrimaryKeyConstraint('name', name=op.f('domain_pkey')) ) op.create_table('alias', sa.Column('created_at', sa.Date(), nullable=False), sa.Column('updated_at', sa.Date(), nullable=True), sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('localpart', sa.String(length=80), nullable=False), - sa.Column('destination', sa.String(), nullable=False), + sa.Column('destination', sa.String(length=1023), nullable=False), sa.Column('domain_name', sa.String(length=80), nullable=False), sa.Column('email', sa.String(length=255), nullable=False), - sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), - sa.PrimaryKeyConstraint('email') + sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], name=op.f('alias_domain_name_fkey')), + sa.PrimaryKeyConstraint('email', name=op.f('alias_pkey')) ) op.create_table('user', sa.Column('created_at', sa.Date(), nullable=False), @@ -55,8 +55,8 @@ def upgrade(): sa.Column('spam_threshold', sa.Numeric(), nullable=False), sa.Column('domain_name', sa.String(length=80), nullable=False), sa.Column('email', sa.String(length=255), nullable=False), - sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), - sa.PrimaryKeyConstraint('email') + sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], name=op.f('user_domain_name_fkey')), + sa.PrimaryKeyConstraint('email', name=op.f('user_pkey')) ) op.create_table('fetch', sa.Column('created_at', sa.Date(), nullable=False), @@ -64,20 +64,20 @@ def upgrade(): sa.Column('comment', sa.String(length=255), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('user_email', sa.String(length=255), nullable=False), - sa.Column('protocol', sa.Enum('imap', 'pop3'), nullable=False), + sa.Column('protocol', sa.Enum('imap', 'pop3', name='enum_protocol'), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('port', sa.Integer(), nullable=False), sa.Column('tls', sa.Boolean(), nullable=False), sa.Column('username', sa.String(length=255), nullable=False), sa.Column('password', sa.String(length=255), nullable=False), - sa.ForeignKeyConstraint(['user_email'], ['user.email'], ), - sa.PrimaryKeyConstraint('id') + sa.ForeignKeyConstraint(['user_email'], ['user.email'], name=op.f('fetch_user_email_fkey')), + sa.PrimaryKeyConstraint('id', name=op.f('fetch_pkey')) ) op.create_table('manager', sa.Column('domain_name', sa.String(length=80), nullable=True), sa.Column('user_email', sa.String(length=255), nullable=True), - sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], ), - sa.ForeignKeyConstraint(['user_email'], ['user.email'], ) + sa.ForeignKeyConstraint(['domain_name'], ['domain.name'], name=op.f('manager_domain_name_fkey')), + sa.ForeignKeyConstraint(['user_email'], ['user.email'], name=op.f('manager_user_email_fkey')) ) diff --git a/core/admin/requirements-prod.txt b/core/admin/requirements-prod.txt index 3679b63f..6c55e192 100644 --- a/core/admin/requirements-prod.txt +++ b/core/admin/requirements-prod.txt @@ -28,7 +28,9 @@ Jinja2==2.10 limits==1.3 Mako==1.0.7 MarkupSafe==1.1.0 +mysqlclient==1.3.14 passlib==1.7.1 +psycopg2==2.7.6.1 pycparser==2.19 pyOpenSSL==18.0.0 python-dateutil==2.7.5 diff --git a/core/admin/requirements.txt b/core/admin/requirements.txt index 95a65bbe..0e37baf6 100644 --- a/core/admin/requirements.txt +++ b/core/admin/requirements.txt @@ -17,3 +17,6 @@ PyYAML PyOpenSSL dnspython bcrypt +tenacity +mysqlclient +psycopg2 diff --git a/docs/database.rst b/docs/database.rst new file mode 100644 index 00000000..c7b233e5 --- /dev/null +++ b/docs/database.rst @@ -0,0 +1,112 @@ +Changing the database back-end +============================== + +By default Mailu uses a SQLite database. Recently, we have changed the internals of Mailu +to enable the support of alternative database solutions as postgresql and mysql/mariadb. +This functionality should still be considered experimental! + +Mailu Postgresql +---------------- + +Mailu optionally comes with a pre-configured Postgresql image. +This images has the following features: + +- Automatic creation of users, db, extensions and password; +- TCP connections are only allowed from the mailu `SUBNET`; +- Automatic minutely *wal archiving* and weekly `pg_basebackup`; +- Automatic cleaning of *wal archives* and *base backups*; + Two versions always remain available; +- When `/data` is empty and backups are present, the backups are restored automatically; + Useful in swarm environments, since the /data directory should not be on any network + filesystem (performance). + +To make use of this functionality, just select *Postgresql* as database flavor. +Don't select the usage of an external database. The ``docker-compose.yml`` and ``mailu.env`` +will pull in ``mailu/postgresql``. This image and ``mailu/admin`` contain all the scripts +to automatically setup the database. + +After bring up the service, it might be useful to check the logs with: + +.. code-block:: bash + + docker-compose logs -f admin database + +External Postgresql +------------------- + +It is also possible to use a Postgresql database server, hosted elsewhere. +In this case you'll have to take to create an empty database for Mailu, corresponding user, +password and sufficient privileges on the database to ``CREATE TABLE``, ``DROP`` etc. +Usually making the user owner of the database would be the easiest thing to do. +Don't forget to set ``pg_hba.conf`` accordingly. + +The following commands can serve as an example on how to set up postgresql for Mailu usage. +Adjust this to your own liking. + +.. code-block:: bash + + $ sudo su - postgres + $ psql + psql (10.6) + Type "help" for help. + + postgres=# create user mailu; + CREATE ROLE + postgres=# alter user mailu password 'my_secure_pass'; + ALTER ROLE + postgres=# create database mailu owner mailu; + CREATE DATABASE + postgres=# \c mailu + You are now connected to database "mailu" as user "postgres". + mailu=# create extension citext; + CREATE EXTENSION + mailu=# \q + +In ``pg_hba.conf`` there should be a line like this: + +.. code-block:: bash + + host mailu mailu /32 md5 + +Note that this example is the bare-minimum to get Mailu working. It goes without saying that +the database admin will have to setup his own means of backups and TLS encrypted connections. + +External MySQL/Mariadb +---------------------- + +It is also possible to use a mysql/mariadb database server, hosted elsewhere. +In this case you'll have to take to create an empty database for Mailu, corresponding user, +password and sufficient privileges on the database to ``CREATE TABLE``, ``DROP`` etc. +Usually making the user owner of the database would be the easiest thing to do. + +The following commands can serve as an example on how to set up mysql/mariadb for Mailu usage. +Adjust this to your own liking. + +.. code-block:: sql + + mysql> CREATE DATABASE mailu; + mysql> CREATE USER 'mailu'@'%' IDENTIFIED BY 'my-strong-password-here'; + mysql> GRANT ALL PRIVILEGES ON mailu.* TO 'mailu'@'%'; + mysql> FLUSH PRIVILEGES; + +Note that if you get any errors related to ``caching_sha2_password`` it can be solved by changing the encryption +of the password to ``mysql_native_password`` instead of the latest authentication plugin ``caching_sha2_password``. + +.. code-block:: sql + + mysql> SELECT host, user, plugin FROM mysql.user; + + +-----------+-------+-----------------------+ + | host | user | plugin | + +-----------+-------+-----------------------+ + | % | mailu | caching_sha2_password | + +-----------+-------+-----------------------+ + + mysql> update mysql.user set plugin = 'mysql_native_password' where user = 'mailu'; + mysql> SELECT host, user, plugin FROM mysql.user; + + +------+-------+-----------------------+ + | host | user | plugin | + +------+-------+-----------------------+ + | % | mailu | mysql_native_password | + +------+-------+-----------------------+ diff --git a/docs/index.rst b/docs/index.rst index e1f924d2..98825ab6 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -59,6 +59,7 @@ the version of Mailu that you are running. kubernetes/mailu/index dns reverse + database .. toctree:: :maxdepth: 2 diff --git a/optional/postgresql/Dockerfile b/optional/postgresql/Dockerfile new file mode 100644 index 00000000..22409597 --- /dev/null +++ b/optional/postgresql/Dockerfile @@ -0,0 +1,33 @@ +FROM alpine:3.8 +# python3 shared with most images +RUN apk add --no-cache \ + python3 py3-pip \ + && pip3 install --upgrade pip +# Shared layer between rspamd, postfix, dovecot, unbound and nginx +RUN pip3 install jinja2 +# Image specific layers under this line +RUN apk add --no-cache \ + postgresql postgresql-libs busybox-suid sudo tar \ + && apk add --virtual .build-deps gcc musl-dev postgresql-dev python3-dev \ + && pip3 install psycopg2 anosql==0.3.1 \ + && apk --purge del .build-deps + +COPY start.py /start.py +COPY basebackup.sh /basebackup.sh +COPY conf /conf + +COPY postgres_crontab /etc/postgres_crontab +RUN crontab /etc/postgres_crontab + +ENV LANG en_US.UTF-8 + +RUN mkdir -p /data /backup /run/postgresql \ + && chown -R postgres:postgres /run/postgresql \ + && chmod 2777 /run/postgresql + +VOLUME /data +VOLUME /backup +EXPOSE 5432 + +CMD /start.py +HEALTHCHECK CMD psql -h 127.0.0.1 -d postgres -U health -c "select 1 as ok;" || exit 1 diff --git a/optional/postgresql/basebackup.sh b/optional/postgresql/basebackup.sh new file mode 100755 index 00000000..907da2ef --- /dev/null +++ b/optional/postgresql/basebackup.sh @@ -0,0 +1,28 @@ +#!/bin/sh + +dest="/backup/base-$(date +%F-%H%M)" +last=$(ls -d /backup/base* | tail -n1) +mkdir $dest || exit $? + +pg_basebackup --wal-method=none --pgdata=$dest --format=tar --gzip --username=postgres || exit $? + +# Clean old base backups, keep the last and the current. +for d in /backup/base*; do + if [ "$d" == "$last" ] || [ "$d" == "$dest" ]; then + continue + fi + rm -r $d || exit $? +done + +# Clean the wall archive +cd /backup/wal_archive || exit $? +if [ $(ls *.*.backup | wc -l) -lt 2 ]; then + exit 0 +fi +# Find the single last wal.backup point +prev_wal_start="$(ls *.*.backup | tail -n2 | head -n1 | cut -d '.' -f 1)" +for f in $(ls) ; do + if [ "$f" \< "$prev_wal_start" ]; then + rm -v /backup/wal_archive/$f + fi +done diff --git a/optional/postgresql/conf/pg_hba.conf b/optional/postgresql/conf/pg_hba.conf new file mode 100644 index 00000000..883fa3ad --- /dev/null +++ b/optional/postgresql/conf/pg_hba.conf @@ -0,0 +1,90 @@ +# PostgreSQL Client Authentication Configuration File +# =================================================== +# +# Refer to the "Client Authentication" section in the PostgreSQL +# documentation for a complete description of this file. A short +# synopsis follows. +# +# This file controls: which hosts are allowed to connect, how clients +# are authenticated, which PostgreSQL user names they can use, which +# databases they can access. Records take one of these forms: +# +# local DATABASE USER METHOD [OPTIONS] +# host DATABASE USER ADDRESS METHOD [OPTIONS] +# hostssl DATABASE USER ADDRESS METHOD [OPTIONS] +# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS] +# +# (The uppercase items must be replaced by actual values.) +# +# The first field is the connection type: "local" is a Unix-domain +# socket, "host" is either a plain or SSL-encrypted TCP/IP socket, +# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a +# plain TCP/IP socket. +# +# DATABASE can be "all", "sameuser", "samerole", "replication", a +# database name, or a comma-separated list thereof. The "all" +# keyword does not match "replication". Access to replication +# must be enabled in a separate record (see example below). +# +# USER can be "all", a user name, a group name prefixed with "+", or a +# comma-separated list thereof. In both the DATABASE and USER fields +# you can also write a file name prefixed with "@" to include names +# from a separate file. +# +# ADDRESS specifies the set of hosts the record matches. It can be a +# host name, or it is made up of an IP address and a CIDR mask that is +# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that +# specifies the number of significant bits in the mask. A host name +# that starts with a dot (.) matches a suffix of the actual host name. +# Alternatively, you can write an IP address and netmask in separate +# columns to specify the set of hosts. Instead of a CIDR-address, you +# can write "samehost" to match any of the server's own IP addresses, +# or "samenet" to match any address in any subnet that the server is +# directly connected to. +# +# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256", +# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert". +# Note that "password" sends passwords in clear text; "md5" or +# "scram-sha-256" are preferred since they send encrypted passwords. +# +# OPTIONS are a set of options for the authentication in the format +# NAME=VALUE. The available options depend on the different +# authentication methods -- refer to the "Client Authentication" +# section in the documentation for a list of which options are +# available for which authentication methods. +# +# Database and user names containing spaces, commas, quotes and other +# special characters must be quoted. Quoting one of the keywords +# "all", "sameuser", "samerole" or "replication" makes the name lose +# its special character, and just match a database or username with +# that name. +# +# This file is read on server startup and when the server receives a +# SIGHUP signal. If you edit the file on a running system, you have to +# SIGHUP the server for the changes to take effect, run "pg_ctl reload", +# or execute "SELECT pg_reload_conf()". +# +# Put your actual configuration here +# ---------------------------------- +# +# If you want to allow non-local connections, you need to add more +# "host" records. In that case you will also need to make PostgreSQL +# listen on a non-local interface via the listen_addresses +# configuration parameter, or via the -i or -h command line switches. + + + +# TYPE DATABASE USER ADDRESS METHOD + +# "local" is for Unix domain socket connections only +local all all peer map=local +# IPv4 connections: +host mailu mailu {{ SUBNET }} md5 +host postgres health 127.0.0.1/32 trust +# IPv6 local connections: +host all all ::1/128 reject +# Allow replication connections from localhost, by a user with the +# replication privilege. +local replication all peer map=local +host replication all 127.0.0.1/32 reject +host replication all ::1/128 reject diff --git a/optional/postgresql/conf/pg_ident.conf b/optional/postgresql/conf/pg_ident.conf new file mode 100644 index 00000000..4538ff9c --- /dev/null +++ b/optional/postgresql/conf/pg_ident.conf @@ -0,0 +1,44 @@ +# PostgreSQL User Name Maps +# ========================= +# +# Refer to the PostgreSQL documentation, chapter "Client +# Authentication" for a complete description. A short synopsis +# follows. +# +# This file controls PostgreSQL user name mapping. It maps external +# user names to their corresponding PostgreSQL user names. Records +# are of the form: +# +# MAPNAME SYSTEM-USERNAME PG-USERNAME +# +# (The uppercase quantities must be replaced by actual values.) +# +# MAPNAME is the (otherwise freely chosen) map name that was used in +# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the +# client. PG-USERNAME is the requested PostgreSQL user name. The +# existence of a record specifies that SYSTEM-USERNAME may connect as +# PG-USERNAME. +# +# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a +# regular expression. Optionally this can contain a capture (a +# parenthesized subexpression). The substring matching the capture +# will be substituted for \1 (backslash-one) if present in +# PG-USERNAME. +# +# Multiple maps may be specified in this file and used by pg_hba.conf. +# +# No map names are defined in the default configuration. If all +# system user names and PostgreSQL user names are the same, you don't +# need anything in this file. +# +# This file is read on server startup and when the postmaster receives +# a SIGHUP signal. If you edit the file on a running system, you have +# to SIGHUP the postmaster for the changes to take effect. You can +# use "pg_ctl reload" to do that. + +# Put your actual configuration here +# ---------------------------------- + +# MAPNAME SYSTEM-USERNAME PG-USERNAME +local postgres postgres +local root postgres diff --git a/optional/postgresql/conf/postgresql.conf b/optional/postgresql/conf/postgresql.conf new file mode 100644 index 00000000..97c5391f --- /dev/null +++ b/optional/postgresql/conf/postgresql.conf @@ -0,0 +1,658 @@ +# ----------------------------- +# PostgreSQL configuration file +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONNECTIONS AND AUTHENTICATION +#------------------------------------------------------------------------------ + +# - Connection Settings - + +listen_addresses = '*' # what IP address(es) to listen on; + # comma-separated list of addresses; + # defaults to 'localhost'; use '*' for all + # (change requires restart) +#port = 5432 # (change requires restart) +max_connections = 100 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - Security and Authentication - + +#authentication_timeout = 1min # 1s-600s +#ssl = off +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_dh_params_file = '' +#ssl_cert_file = 'server.crt' +#ssl_key_file = 'server.key' +#ssl_ca_file = '' +#ssl_crl_file = '' +#password_encryption = md5 # md5 or scram-sha-256 +#db_user_namespace = off +#row_security = on + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + + +#------------------------------------------------------------------------------ +# RESOURCE USAGE (except WAL) +#------------------------------------------------------------------------------ + +# - Memory - + +shared_buffers = 128MB # min 128kB + # (change requires restart) +#huge_pages = try # on, off, or try + # (change requires restart) +#temp_buffers = 8MB # min 800kB +#max_prepared_transactions = 0 # zero disables the feature + # (change requires restart) +# Caution: it is not advisable to set max_prepared_transactions nonzero unless +# you actively intend to use prepared transactions. +#work_mem = 4MB # min 64kB +#maintenance_work_mem = 64MB # min 1MB +#replacement_sort_tuples = 150000 # limits use of replacement selection sort +#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem +#max_stack_depth = 2MB # min 100kB +dynamic_shared_memory_type = posix # the default is the first option + # supported by the operating system: + # posix + # sysv + # windows + # mmap + # use none to disable dynamic shared memory + # (change requires restart) + +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kB, or -1 for no limit + +# - Kernel Resource Usage - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) +#shared_preload_libraries = '' # (change requires restart) + +# - Cost-Based Vacuum Delay - + +#vacuum_cost_delay = 0 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +#vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching +#max_worker_processes = 8 # (change requires restart) +#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers +#max_parallel_workers = 8 # maximum number of max_worker_processes that + # can be used in parallel queries +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + +#------------------------------------------------------------------------------ +# WRITE AHEAD LOG +#------------------------------------------------------------------------------ + +# - Settings - + +wal_level = replica # minimal, replica, or logical + # (change requires restart) +#fsync = on # flush data to disk for crash safety + # (turning this off can cause + # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables + +#commit_delay = 0 # range 0-100000, in microseconds +#commit_siblings = 5 # range 1-1000 + +# - Checkpoints - + +checkpoint_timeout = 60s # range 30s-1d +#max_wal_size = 1GB +#min_wal_size = 80MB +#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +checkpoint_warning = 15s # 0 disables + +# - Archiving - + +archive_mode = on # enables archiving; off, on, or always + # (change requires restart) +archive_command = 'test ! -f /backup/wal_archive/%f && gzip < %p > /backup/wal_archive/%f' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +archive_timeout = 60 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Server(s) - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + + +#------------------------------------------------------------------------------ +# QUERY TUNING +#------------------------------------------------------------------------------ + +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +#random_page_cost = 4.0 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above +#min_parallel_table_scan_size = 8MB +#min_parallel_index_scan_size = 512kB +#effective_cache_size = 4GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +#default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off + + +#------------------------------------------------------------------------------ +# ERROR REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic + +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +#log_lock_waits = off # log lock waits >= deadlock_timeout +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files +log_timezone = 'UTC' + + +# - Process Title - + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# RUNTIME STATISTICS +#------------------------------------------------------------------------------ + +# - Query/Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +#stats_temp_directory = 'pg_stat_tmp' + + +# - Statistics Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM PARAMETERS +#------------------------------------------------------------------------------ + +#autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#search_path = '"$user", public' # schema names +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + +# - Locale and Formatting - + +datestyle = 'iso, mdy' +#intervalstyle = 'postgres' +timezone = 'UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding + +# These settings are initialized by initdb, but they can be changed. +lc_messages = 'C' # locale for system error message + # strings +lc_monetary = 'C' # locale for monetary formatting +lc_numeric = 'C' # locale for number formatting +lc_time = 'C' # locale for time formatting + +# default configuration for text search +default_text_search_config = 'pg_catalog.english' + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#local_preload_libraries = '' +#session_preload_libraries = '' + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +#max_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION/PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. + +#include_dir = 'conf.d' # include files ending in '.conf' from + # directory 'conf.d' +#include_if_exists = 'exists.conf' # include file only if it exists +#include = 'special.conf' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/optional/postgresql/conf/queries.sql b/optional/postgresql/conf/queries.sql new file mode 100644 index 00000000..4ed9c58d --- /dev/null +++ b/optional/postgresql/conf/queries.sql @@ -0,0 +1,42 @@ +-- name: create_mailu_user! +-- Create the mailu user if it does not exist. +do $$ +begin + create user mailu; + exception when others then + raise notice 'mailu user not created -- already exists'; +end +$$; + +-- name: create_health_user! +-- Create the mailu user if it does not exist. +do $$ +begin + create user health; + exception when others then + raise notice 'health user not created -- already exists'; +end +$$; + +-- name: grant_health! +-- Grant connect permission for the health user +grant connect + on database postgres + to health; + +-- name: update_pw! +alter + user mailu + password :pw; + +-- name: check_db +-- check if the mailu db exists +select 1 + from pg_database + where datname = 'mailu'; + +-- name: create_db! +-- create the mailu db +create + database mailu + owner mailu; diff --git a/optional/postgresql/postgres_crontab b/optional/postgresql/postgres_crontab new file mode 100644 index 00000000..ce87e1e2 --- /dev/null +++ b/optional/postgresql/postgres_crontab @@ -0,0 +1 @@ +11 4 * * 7 /basebackup.sh > /proc/1/fd/1 2>/proc/1/fd/2 diff --git a/optional/postgresql/start.py b/optional/postgresql/start.py new file mode 100755 index 00000000..a715957f --- /dev/null +++ b/optional/postgresql/start.py @@ -0,0 +1,67 @@ +#!/usr/bin/python3 + +import anosql +import psycopg2 +import jinja2 +import glob +import os +import subprocess + +def setup(): + conn = psycopg2.connect(user='postgres') + queries = anosql.load_queries('postgres', '/conf/queries.sql') + # Mailu user + queries.create_mailu_user(conn) + queries.update_pw(conn, pw=os.environ.get("DB_PW")) + # Healthcheck user + queries.create_health_user(conn) + queries.grant_health(conn) + conn.commit() + # create db cannot be atomic. But this script is the only active connection, this is kinda safe. + if not queries.check_db(conn): + conn.set_isolation_level(0) + queries.create_db(conn) + conn.set_isolation_level(1) + conn.close() + +# Check if /data is empty +if not os.listdir("/data"): + os.system("chown -R postgres:postgres /data") + os.system("chmod 0700 /data") + base_backups=sorted(glob.glob("/backup/base-*")) + if base_backups: + # Restore the latest backup + subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"]) + if os.listdir("/backup/wal_archive"): + with open("/data/recovery.conf", "w") as rec: + rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n") + rec.write("standby_mode = off\n") + os.system("chown postgres:postgres /data/recovery.conf") + #os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '") + else: + # Bootstrap the database + os.system("sudo -u postgres initdb -D /data") + +# Create backup directory structure, if it does not yet exist +os.system("mkdir -p /backup/wal_archive") +os.system("chown -R postgres:postgres /backup") + +# Render config files +convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) +for pg_file in glob.glob("/conf/*.conf"): + convert(pg_file, os.path.join("/data", os.path.basename(pg_file))) + +# (Re)start postgresql locally for DB and user creation +os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '") +while os.path.isfile("recovery.conf"): + pass +os.system("sudo -u postgres pg_ctl -D /data promote") +setup() +os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data") + +out=open("/proc/1/fd/1", "w") +err=open("/proc/1/fd/2", "w") +# Run the cron deamon +subprocess.Popen(["crond", "-f"], stdout=out, stderr=err) +# Run postgresql service +os.system("sudo -u postgres postgres -D /data -h \*") diff --git a/setup/flavors/compose/docker-compose.yml b/setup/flavors/compose/docker-compose.yml index 579de56f..67408bee 100644 --- a/setup/flavors/compose/docker-compose.yml +++ b/setup/flavors/compose/docker-compose.yml @@ -13,7 +13,7 @@ services: restart: always volumes: - "{{ root }}/redis:/data" - + # Core services front: image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}nginx:${MAILU_VERSION:-{{ version }}} @@ -148,6 +148,16 @@ services: - imap {% endif %} + {% if db_flavor == 'postgresql' and postgresql == 'internal' %} + database: + image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}postgresql:${MAILU_VERSION:-{{ version }}} + restart: always + env_file: {{ env }} + volumes: + - "{{ root }}/data/psql_db:/data" + - "{{ root }}/data/psql_backup:/backup" + {% endif %} + networks: default: driver: bridge diff --git a/setup/flavors/compose/mailu.env b/setup/flavors/compose/mailu.env index 2d2b8735..7d160011 100644 --- a/setup/flavors/compose/mailu.env +++ b/setup/flavors/compose/mailu.env @@ -163,3 +163,18 @@ REJECT_UNLISTED_RECIPIENT={{ reject_unlisted_recipient }} # Log level threshold in start.py (value: CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET) LOG_LEVEL=WARNING + +################################### +# Database settings +################################### +DB_FLAVOR={{ db_flavor }} +{% if db_flavor == 'postgresql' and postgresql == 'internal' %} +DB_PW={{ secret(16) }} +{% endif %} +{% if postgresql == 'external' or db_flavor == 'mysql' %} +DB_USER={{ db_user }} +DB_PW={{ db_pw }} +DB_URL={{ db_url }} +DB_NAME={{ db_name }} +{% endif %} + diff --git a/setup/flavors/stack/docker-compose.yml b/setup/flavors/stack/docker-compose.yml index 38c07a40..480aafeb 100644 --- a/setup/flavors/stack/docker-compose.yml +++ b/setup/flavors/stack/docker-compose.yml @@ -137,6 +137,14 @@ services: replicas: 1 {% endif %} + {% if db_flavor == 'postgresql' and postgresql == 'internal' %} + database: + image: ${DOCKER_ORG:-mailu}/postgresql:${MAILU_VERSION:-{{ version }}} + env_file: {{ env }} + volumes: + - "{{ root }}/data/psql_backup:/backup" + {% endif %} + networks: default: driver: overlay diff --git a/setup/server.py b/setup/server.py index 4474cd6c..fea27ead 100644 --- a/setup/server.py +++ b/setup/server.py @@ -8,6 +8,7 @@ import uuid import string import random import ipaddress +import hashlib version = os.getenv("this_version") diff --git a/setup/static/render.js b/setup/static/render.js index a1c3fb0d..23afcbec 100644 --- a/setup/static/render.js +++ b/setup/static/render.js @@ -31,4 +31,58 @@ $(document).ready(function() { $("#admin_path").attr("value", ""); } }); -}); \ No newline at end of file +}); + +$(document).ready(function() { + if ($("#database").val() == 'sqlite') { + $("#postgres_db").hide(); + } else if ($("#database").val() == 'postgresql') { + $("#postgres_db").show(); + } else if ($("#database").val() == 'mysql') { + $("#external_db").show(); + } + if ($('#external_psql').prop('checked')) { + $("#external_db").show(); + } + $("#database").click(function() { + if (this.value == 'sqlite') { + $("#postgres_db").hide(); + $("#external_db").hide(); + } else if (this.value == 'postgresql') { + $("#postgres_db").show(); + $("#external_db").hide(); + $("#external_db").prop('checked', false); + } else if (this.value == 'mysql') { + $("#postgres_db").hide(); + $("#external_db").show(); + $("#db_user").prop('required',true); + $("#db_pw").prop('required',true); + $("#db_url").prop('required',true); + $("#db_name").prop('required',true); + } + }); + $("#external_psql").change(function() { + if ($(this).is(":checked")) { + $("#external_db").show(); + $("#db_user").prop('required',true); + $("#db_pw").prop('required',true); + $("#db_url").prop('required',true); + $("#db_name").prop('required',true); + } else { + $("#external_db").hide(); + } + }); + $("#internal_psql").change(function() { + if ($(this).is(":checked")) { + $("#external_db").hide(); + $("#db_user").val(""); + $("#db_pw").val(""); + $("#db_url").val(""); + $("#db_name").val(""); + $("#db_user").prop('required',false); + $("#db_pw").prop('required',false); + $("#db_url").prop('required',false); + $("#db_name").prop('required',false); + } + }); +}); diff --git a/setup/templates/steps/database.html b/setup/templates/steps/database.html new file mode 100644 index 00000000..af5334cc --- /dev/null +++ b/setup/templates/steps/database.html @@ -0,0 +1,40 @@ +{% call macros.panel("info", "Database preferences") %} + +
+ +
+ +

+ + +
+ +{% endcall %} \ No newline at end of file diff --git a/setup/templates/wizard.html b/setup/templates/wizard.html index 05de5a5c..e618b716 100644 --- a/setup/templates/wizard.html +++ b/setup/templates/wizard.html @@ -19,6 +19,7 @@ {%for file in steps %} {% include "steps/" + flavor + "/" + file %} {% endfor %} + {% include "steps/database.html" %} {% endif %} diff --git a/tests/build.yml b/tests/build.yml index e06327cd..dd56dc41 100644 --- a/tests/build.yml +++ b/tests/build.yml @@ -38,6 +38,10 @@ services: image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-local} build: ../core/admin + postgresql: + image: ${DOCKER_ORG:-mailu}/postgresql:${MAILU_VERSION:-local} + build: ../optional/postgresql + roundcube: image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}roundcube:${MAILU_VERSION:-local} build: ../webmails/roundcube