Merge pull request #788 from usrpro/feat-psql-support
Multiple databases support and postgresql imagemaster
commit
c484c5326b
@ -0,0 +1,79 @@
|
|||||||
|
""" Fix constraint naming by addint a name to all constraints
|
||||||
|
|
||||||
|
Revision ID: 546b04c886f0
|
||||||
|
Revises: 5aeb5811408e
|
||||||
|
Create Date: 2018-12-08 16:33:37.757634
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '546b04c886f0'
|
||||||
|
down_revision = 'cd79ed46d9c2'
|
||||||
|
|
||||||
|
from alembic import op, context
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
# Only run this for somehow supported data types at the date we started naming constraints
|
||||||
|
# Among others, these will probably fail on MySQL
|
||||||
|
if context.get_bind().engine.name not in ('sqlite', 'postgresql'):
|
||||||
|
return
|
||||||
|
|
||||||
|
metadata = context.get_context().opts['target_metadata']
|
||||||
|
|
||||||
|
# Drop every constraint on every table
|
||||||
|
with op.batch_alter_table('alias', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('alias_pkey', type_="primary")
|
||||||
|
batch_op.drop_constraint('alias_domain_name_fkey', type_="foreignkey")
|
||||||
|
with op.batch_alter_table('alternative', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('alternative_pkey', type_="primary")
|
||||||
|
batch_op.drop_constraint('alternative_domain_name_fkey', type_="foreignkey")
|
||||||
|
with op.batch_alter_table('manager', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('manager_domain_name_fkey', type_="foreignkey")
|
||||||
|
batch_op.drop_constraint('manager_user_email_fkey', type_="foreignkey")
|
||||||
|
with op.batch_alter_table('token', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('token_pkey', type_="primary")
|
||||||
|
batch_op.drop_constraint('token_user_email_fkey', type_="foreignkey")
|
||||||
|
with op.batch_alter_table('fetch', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('fetch_pkey', type_="primary")
|
||||||
|
batch_op.drop_constraint('fetch_user_email_fkey', type_="foreignkey")
|
||||||
|
with op.batch_alter_table('relay', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('relay_pkey', type_="primary")
|
||||||
|
with op.batch_alter_table('config', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('config_pkey', type_="primary")
|
||||||
|
with op.batch_alter_table('user', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('user_pkey', type_="primary")
|
||||||
|
batch_op.drop_constraint('user_domain_name_fkey', type_="foreignkey")
|
||||||
|
with op.batch_alter_table('domain', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.drop_constraint('domain_pkey', type_="primary")
|
||||||
|
|
||||||
|
# Recreate constraints with proper names
|
||||||
|
with op.batch_alter_table('domain', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('domain_pkey', ['name'])
|
||||||
|
with op.batch_alter_table('alias', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('alias_pkey', ['email'])
|
||||||
|
batch_op.create_foreign_key('alias_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
with op.batch_alter_table('user', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('user_pkey', ['email'])
|
||||||
|
batch_op.create_foreign_key('user_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
with op.batch_alter_table('alternative', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('alternative_pkey', ['name'])
|
||||||
|
batch_op.create_foreign_key('alternative_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
with op.batch_alter_table('manager', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_foreign_key('manager_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
batch_op.create_foreign_key('manager_user_email_fkey', 'user', ['user_email'], ['email'])
|
||||||
|
with op.batch_alter_table('token', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('token_pkey', ['id'])
|
||||||
|
batch_op.create_foreign_key('token_user_email_fkey', 'user', ['user_email'], ['email'])
|
||||||
|
with op.batch_alter_table('fetch', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('fetch_pkey', ['id'])
|
||||||
|
batch_op.create_foreign_key('fetch_user_email_fkey', 'user', ['user_email'], ['email'])
|
||||||
|
with op.batch_alter_table('relay', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('relay_pkey', ['name'])
|
||||||
|
with op.batch_alter_table('config', naming_convention=metadata.naming_convention) as batch_op:
|
||||||
|
batch_op.create_primary_key('config_pkey', ['name'])
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
pass
|
@ -0,0 +1,142 @@
|
|||||||
|
""" Convert all domains and emails to lowercase
|
||||||
|
|
||||||
|
Revision ID: 5aeb5811408e
|
||||||
|
Revises: cd79ed46d9c2
|
||||||
|
Create Date: 2018-12-06 16:07:23.380579
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '5aeb5811408e'
|
||||||
|
down_revision = 'f1393877871d'
|
||||||
|
|
||||||
|
from alembic import op, config
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
name_column = lambda: sa.Column('name', sa.String(80), primary_key=True)
|
||||||
|
domain_name_column = lambda: sa.Column('domain_name', sa.String(80))
|
||||||
|
user_email_column = lambda: sa.Column('user_email', sa.String(255))
|
||||||
|
email_columns = lambda: [
|
||||||
|
sa.Column('email', sa.String(255), primary_key=True),
|
||||||
|
sa.Column('localpart', sa.String(80)),
|
||||||
|
domain_name_column()
|
||||||
|
]
|
||||||
|
id_columns = lambda: [
|
||||||
|
sa.Column('id', sa.Integer(), primary_key=True),
|
||||||
|
user_email_column()
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
domain_table = sa.Table('domain', sa.MetaData(), name_column())
|
||||||
|
relay_table = sa.Table('relay', sa.MetaData(), name_column())
|
||||||
|
alternative_table = sa.Table('alternative', sa.MetaData(), name_column(), domain_name_column())
|
||||||
|
user_table = sa.Table('user', sa.MetaData(), *email_columns())
|
||||||
|
alias_table = sa.Table('alias', sa.MetaData(), *email_columns())
|
||||||
|
fetch_table = sa.Table('fetch', sa.MetaData(), *id_columns())
|
||||||
|
token_table = sa.Table('token', sa.MetaData(), *id_columns())
|
||||||
|
manager_table = sa.Table('manager', sa.MetaData(), domain_name_column(), user_email_column())
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
connection = op.get_bind()
|
||||||
|
|
||||||
|
# drop foreign key constraints
|
||||||
|
with op.batch_alter_table('alias') as batch_op:
|
||||||
|
batch_op.drop_constraint('alias_domain_name_fkey', type_='foreignkey')
|
||||||
|
with op.batch_alter_table('alternative') as batch_op:
|
||||||
|
batch_op.drop_constraint('alternative_domain_name_fkey', type_='foreignkey')
|
||||||
|
with op.batch_alter_table('manager') as batch_op:
|
||||||
|
batch_op.drop_constraint('manager_domain_name_fkey', type_='foreignkey')
|
||||||
|
batch_op.drop_constraint('manager_user_email_fkey', type_='foreignkey')
|
||||||
|
with op.batch_alter_table('token') as batch_op:
|
||||||
|
batch_op.drop_constraint('token_user_email_fkey', type_='foreignkey')
|
||||||
|
with op.batch_alter_table('fetch') as batch_op:
|
||||||
|
batch_op.drop_constraint('fetch_user_email_fkey', type_='foreignkey')
|
||||||
|
with op.batch_alter_table('user') as batch_op:
|
||||||
|
batch_op.drop_constraint('user_domain_name_fkey', type_='foreignkey')
|
||||||
|
|
||||||
|
# lower domain names
|
||||||
|
for domain in connection.execute(domain_table.select()):
|
||||||
|
connection.execute(domain_table.update().where(
|
||||||
|
domain_table.c.name == domain.name
|
||||||
|
).values(
|
||||||
|
name=domain.name.lower()
|
||||||
|
))
|
||||||
|
# lower alternatives
|
||||||
|
for alternative in connection.execute(alternative_table.select()):
|
||||||
|
connection.execute(alternative_table.update().where(
|
||||||
|
alternative_table.c.name == alternative.name
|
||||||
|
).values(
|
||||||
|
name=alternative.name.lower(),
|
||||||
|
domain_name=alternative.domain_name.lower()
|
||||||
|
))
|
||||||
|
# lower users
|
||||||
|
for user in connection.execute(user_table.select()):
|
||||||
|
connection.execute(user_table.update().where(
|
||||||
|
user_table.c.email == user.email
|
||||||
|
).values(
|
||||||
|
email=user.email.lower(),
|
||||||
|
localpart=user.localpart.lower(),
|
||||||
|
domain_name=user.domain_name.lower()
|
||||||
|
))
|
||||||
|
# lower aliases
|
||||||
|
for alias in connection.execute(alias_table.select()):
|
||||||
|
connection.execute(alias_table.update().where(
|
||||||
|
alias_table.c.email == alias.email
|
||||||
|
).values(
|
||||||
|
email=alias.email.lower(),
|
||||||
|
localpart=alias.localpart.lower(),
|
||||||
|
domain_name=alias.domain_name.lower()
|
||||||
|
))
|
||||||
|
# lower fetches
|
||||||
|
for fetch in connection.execute(fetch_table.select()):
|
||||||
|
connection.execute(fetch_table.update().where(
|
||||||
|
fetch_table.c.id == fetch.id
|
||||||
|
).values(
|
||||||
|
user_email=fetch.user_email.lower()
|
||||||
|
))
|
||||||
|
# lower tokens
|
||||||
|
for token in connection.execute(token_table.select()):
|
||||||
|
connection.execute(token_table.update().where(
|
||||||
|
token_table.c.id == token.id
|
||||||
|
).values(
|
||||||
|
user_email=token.user_email.lower()
|
||||||
|
))
|
||||||
|
# lower relays
|
||||||
|
for relay in connection.execute(relay_table.select()):
|
||||||
|
connection.execute(relay_table.update().where(
|
||||||
|
relay_tbale.c.name == relay.name
|
||||||
|
).values(
|
||||||
|
name=relay.name.lower()
|
||||||
|
))
|
||||||
|
# lower managers
|
||||||
|
for manager in connection.execute(manager_table.select()):
|
||||||
|
connection.execute(manager_table.update().where(
|
||||||
|
sa.and_(
|
||||||
|
manager_table.c.domain_name == manager.domain_name,
|
||||||
|
manager_table.c.user_email == manager.user_email
|
||||||
|
)
|
||||||
|
).values(
|
||||||
|
domain_name=manager.domain_name.lower(),
|
||||||
|
user_email=manager.user_email.lower()
|
||||||
|
))
|
||||||
|
|
||||||
|
# restore foreign key constraints
|
||||||
|
with op.batch_alter_table('alias') as batch_op:
|
||||||
|
batch_op.create_foreign_key('alias_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
with op.batch_alter_table('user') as batch_op:
|
||||||
|
batch_op.create_foreign_key('user_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
with op.batch_alter_table('alternative') as batch_op:
|
||||||
|
batch_op.create_foreign_key('alternative_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
with op.batch_alter_table('manager') as batch_op:
|
||||||
|
batch_op.create_foreign_key('manager_domain_name_fkey', 'domain', ['domain_name'], ['name'])
|
||||||
|
batch_op.create_foreign_key('manager_user_email_fkey', 'user', ['user_email'], ['email'])
|
||||||
|
with op.batch_alter_table('token') as batch_op:
|
||||||
|
batch_op.create_foreign_key('token_user_email_fkey', 'user', ['user_email'], ['email'])
|
||||||
|
with op.batch_alter_table('fetch') as batch_op:
|
||||||
|
batch_op.create_foreign_key('fetch_user_email_fkey', 'user', ['user_email'], ['email'])
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
pass
|
@ -0,0 +1,28 @@
|
|||||||
|
""" Add default columns to the configuration table
|
||||||
|
|
||||||
|
Revision ID: f1393877871d
|
||||||
|
Revises: 546b04c886f0
|
||||||
|
Create Date: 2018-12-09 16:15:42.317104
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'f1393877871d'
|
||||||
|
down_revision = '546b04c886f0'
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
with op.batch_alter_table('config') as batch_op:
|
||||||
|
batch_op.add_column(sa.Column('comment', sa.String(length=255), nullable=True))
|
||||||
|
batch_op.add_column(sa.Column('created_at', sa.Date(), nullable=False, server_default='1900-01-01'))
|
||||||
|
batch_op.add_column(sa.Column('updated_at', sa.Date(), nullable=True))
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade():
|
||||||
|
with op.batch_alter_table('config') as batch_op:
|
||||||
|
batch_op.drop_column('updated_at')
|
||||||
|
batch_op.drop_column('created_at')
|
||||||
|
batch_op.drop_column('comment')
|
@ -0,0 +1,112 @@
|
|||||||
|
Changing the database back-end
|
||||||
|
==============================
|
||||||
|
|
||||||
|
By default Mailu uses a SQLite database. Recently, we have changed the internals of Mailu
|
||||||
|
to enable the support of alternative database solutions as postgresql and mysql/mariadb.
|
||||||
|
This functionality should still be considered experimental!
|
||||||
|
|
||||||
|
Mailu Postgresql
|
||||||
|
----------------
|
||||||
|
|
||||||
|
Mailu optionally comes with a pre-configured Postgresql image.
|
||||||
|
This images has the following features:
|
||||||
|
|
||||||
|
- Automatic creation of users, db, extensions and password;
|
||||||
|
- TCP connections are only allowed from the mailu `SUBNET`;
|
||||||
|
- Automatic minutely *wal archiving* and weekly `pg_basebackup`;
|
||||||
|
- Automatic cleaning of *wal archives* and *base backups*;
|
||||||
|
Two versions always remain available;
|
||||||
|
- When `/data` is empty and backups are present, the backups are restored automatically;
|
||||||
|
Useful in swarm environments, since the /data directory should not be on any network
|
||||||
|
filesystem (performance).
|
||||||
|
|
||||||
|
To make use of this functionality, just select *Postgresql* as database flavor.
|
||||||
|
Don't select the usage of an external database. The ``docker-compose.yml`` and ``mailu.env``
|
||||||
|
will pull in ``mailu/postgresql``. This image and ``mailu/admin`` contain all the scripts
|
||||||
|
to automatically setup the database.
|
||||||
|
|
||||||
|
After bring up the service, it might be useful to check the logs with:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
docker-compose logs -f admin database
|
||||||
|
|
||||||
|
External Postgresql
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
It is also possible to use a Postgresql database server, hosted elsewhere.
|
||||||
|
In this case you'll have to take to create an empty database for Mailu, corresponding user,
|
||||||
|
password and sufficient privileges on the database to ``CREATE TABLE``, ``DROP`` etc.
|
||||||
|
Usually making the user owner of the database would be the easiest thing to do.
|
||||||
|
Don't forget to set ``pg_hba.conf`` accordingly.
|
||||||
|
|
||||||
|
The following commands can serve as an example on how to set up postgresql for Mailu usage.
|
||||||
|
Adjust this to your own liking.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ sudo su - postgres
|
||||||
|
$ psql
|
||||||
|
psql (10.6)
|
||||||
|
Type "help" for help.
|
||||||
|
|
||||||
|
postgres=# create user mailu;
|
||||||
|
CREATE ROLE
|
||||||
|
postgres=# alter user mailu password 'my_secure_pass';
|
||||||
|
ALTER ROLE
|
||||||
|
postgres=# create database mailu owner mailu;
|
||||||
|
CREATE DATABASE
|
||||||
|
postgres=# \c mailu
|
||||||
|
You are now connected to database "mailu" as user "postgres".
|
||||||
|
mailu=# create extension citext;
|
||||||
|
CREATE EXTENSION
|
||||||
|
mailu=# \q
|
||||||
|
|
||||||
|
In ``pg_hba.conf`` there should be a line like this:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
host mailu mailu <mailu_host>/32 md5
|
||||||
|
|
||||||
|
Note that this example is the bare-minimum to get Mailu working. It goes without saying that
|
||||||
|
the database admin will have to setup his own means of backups and TLS encrypted connections.
|
||||||
|
|
||||||
|
External MySQL/Mariadb
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
It is also possible to use a mysql/mariadb database server, hosted elsewhere.
|
||||||
|
In this case you'll have to take to create an empty database for Mailu, corresponding user,
|
||||||
|
password and sufficient privileges on the database to ``CREATE TABLE``, ``DROP`` etc.
|
||||||
|
Usually making the user owner of the database would be the easiest thing to do.
|
||||||
|
|
||||||
|
The following commands can serve as an example on how to set up mysql/mariadb for Mailu usage.
|
||||||
|
Adjust this to your own liking.
|
||||||
|
|
||||||
|
.. code-block:: sql
|
||||||
|
|
||||||
|
mysql> CREATE DATABASE mailu;
|
||||||
|
mysql> CREATE USER 'mailu'@'%' IDENTIFIED BY 'my-strong-password-here';
|
||||||
|
mysql> GRANT ALL PRIVILEGES ON mailu.* TO 'mailu'@'%';
|
||||||
|
mysql> FLUSH PRIVILEGES;
|
||||||
|
|
||||||
|
Note that if you get any errors related to ``caching_sha2_password`` it can be solved by changing the encryption
|
||||||
|
of the password to ``mysql_native_password`` instead of the latest authentication plugin ``caching_sha2_password``.
|
||||||
|
|
||||||
|
.. code-block:: sql
|
||||||
|
|
||||||
|
mysql> SELECT host, user, plugin FROM mysql.user;
|
||||||
|
|
||||||
|
+-----------+-------+-----------------------+
|
||||||
|
| host | user | plugin |
|
||||||
|
+-----------+-------+-----------------------+
|
||||||
|
| % | mailu | caching_sha2_password |
|
||||||
|
+-----------+-------+-----------------------+
|
||||||
|
|
||||||
|
mysql> update mysql.user set plugin = 'mysql_native_password' where user = 'mailu';
|
||||||
|
mysql> SELECT host, user, plugin FROM mysql.user;
|
||||||
|
|
||||||
|
+------+-------+-----------------------+
|
||||||
|
| host | user | plugin |
|
||||||
|
+------+-------+-----------------------+
|
||||||
|
| % | mailu | mysql_native_password |
|
||||||
|
+------+-------+-----------------------+
|
@ -0,0 +1,33 @@
|
|||||||
|
FROM alpine:3.8
|
||||||
|
# python3 shared with most images
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
python3 py3-pip \
|
||||||
|
&& pip3 install --upgrade pip
|
||||||
|
# Shared layer between rspamd, postfix, dovecot, unbound and nginx
|
||||||
|
RUN pip3 install jinja2
|
||||||
|
# Image specific layers under this line
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
postgresql postgresql-libs busybox-suid sudo tar \
|
||||||
|
&& apk add --virtual .build-deps gcc musl-dev postgresql-dev python3-dev \
|
||||||
|
&& pip3 install psycopg2 anosql==0.3.1 \
|
||||||
|
&& apk --purge del .build-deps
|
||||||
|
|
||||||
|
COPY start.py /start.py
|
||||||
|
COPY basebackup.sh /basebackup.sh
|
||||||
|
COPY conf /conf
|
||||||
|
|
||||||
|
COPY postgres_crontab /etc/postgres_crontab
|
||||||
|
RUN crontab /etc/postgres_crontab
|
||||||
|
|
||||||
|
ENV LANG en_US.UTF-8
|
||||||
|
|
||||||
|
RUN mkdir -p /data /backup /run/postgresql \
|
||||||
|
&& chown -R postgres:postgres /run/postgresql \
|
||||||
|
&& chmod 2777 /run/postgresql
|
||||||
|
|
||||||
|
VOLUME /data
|
||||||
|
VOLUME /backup
|
||||||
|
EXPOSE 5432
|
||||||
|
|
||||||
|
CMD /start.py
|
||||||
|
HEALTHCHECK CMD psql -h 127.0.0.1 -d postgres -U health -c "select 1 as ok;" || exit 1
|
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
dest="/backup/base-$(date +%F-%H%M)"
|
||||||
|
last=$(ls -d /backup/base* | tail -n1)
|
||||||
|
mkdir $dest || exit $?
|
||||||
|
|
||||||
|
pg_basebackup --wal-method=none --pgdata=$dest --format=tar --gzip --username=postgres || exit $?
|
||||||
|
|
||||||
|
# Clean old base backups, keep the last and the current.
|
||||||
|
for d in /backup/base*; do
|
||||||
|
if [ "$d" == "$last" ] || [ "$d" == "$dest" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
rm -r $d || exit $?
|
||||||
|
done
|
||||||
|
|
||||||
|
# Clean the wall archive
|
||||||
|
cd /backup/wal_archive || exit $?
|
||||||
|
if [ $(ls *.*.backup | wc -l) -lt 2 ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
# Find the single last wal.backup point
|
||||||
|
prev_wal_start="$(ls *.*.backup | tail -n2 | head -n1 | cut -d '.' -f 1)"
|
||||||
|
for f in $(ls) ; do
|
||||||
|
if [ "$f" \< "$prev_wal_start" ]; then
|
||||||
|
rm -v /backup/wal_archive/$f
|
||||||
|
fi
|
||||||
|
done
|
@ -0,0 +1,90 @@
|
|||||||
|
# PostgreSQL Client Authentication Configuration File
|
||||||
|
# ===================================================
|
||||||
|
#
|
||||||
|
# Refer to the "Client Authentication" section in the PostgreSQL
|
||||||
|
# documentation for a complete description of this file. A short
|
||||||
|
# synopsis follows.
|
||||||
|
#
|
||||||
|
# This file controls: which hosts are allowed to connect, how clients
|
||||||
|
# are authenticated, which PostgreSQL user names they can use, which
|
||||||
|
# databases they can access. Records take one of these forms:
|
||||||
|
#
|
||||||
|
# local DATABASE USER METHOD [OPTIONS]
|
||||||
|
# host DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||||
|
# hostssl DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||||
|
# hostnossl DATABASE USER ADDRESS METHOD [OPTIONS]
|
||||||
|
#
|
||||||
|
# (The uppercase items must be replaced by actual values.)
|
||||||
|
#
|
||||||
|
# The first field is the connection type: "local" is a Unix-domain
|
||||||
|
# socket, "host" is either a plain or SSL-encrypted TCP/IP socket,
|
||||||
|
# "hostssl" is an SSL-encrypted TCP/IP socket, and "hostnossl" is a
|
||||||
|
# plain TCP/IP socket.
|
||||||
|
#
|
||||||
|
# DATABASE can be "all", "sameuser", "samerole", "replication", a
|
||||||
|
# database name, or a comma-separated list thereof. The "all"
|
||||||
|
# keyword does not match "replication". Access to replication
|
||||||
|
# must be enabled in a separate record (see example below).
|
||||||
|
#
|
||||||
|
# USER can be "all", a user name, a group name prefixed with "+", or a
|
||||||
|
# comma-separated list thereof. In both the DATABASE and USER fields
|
||||||
|
# you can also write a file name prefixed with "@" to include names
|
||||||
|
# from a separate file.
|
||||||
|
#
|
||||||
|
# ADDRESS specifies the set of hosts the record matches. It can be a
|
||||||
|
# host name, or it is made up of an IP address and a CIDR mask that is
|
||||||
|
# an integer (between 0 and 32 (IPv4) or 128 (IPv6) inclusive) that
|
||||||
|
# specifies the number of significant bits in the mask. A host name
|
||||||
|
# that starts with a dot (.) matches a suffix of the actual host name.
|
||||||
|
# Alternatively, you can write an IP address and netmask in separate
|
||||||
|
# columns to specify the set of hosts. Instead of a CIDR-address, you
|
||||||
|
# can write "samehost" to match any of the server's own IP addresses,
|
||||||
|
# or "samenet" to match any address in any subnet that the server is
|
||||||
|
# directly connected to.
|
||||||
|
#
|
||||||
|
# METHOD can be "trust", "reject", "md5", "password", "scram-sha-256",
|
||||||
|
# "gss", "sspi", "ident", "peer", "pam", "ldap", "radius" or "cert".
|
||||||
|
# Note that "password" sends passwords in clear text; "md5" or
|
||||||
|
# "scram-sha-256" are preferred since they send encrypted passwords.
|
||||||
|
#
|
||||||
|
# OPTIONS are a set of options for the authentication in the format
|
||||||
|
# NAME=VALUE. The available options depend on the different
|
||||||
|
# authentication methods -- refer to the "Client Authentication"
|
||||||
|
# section in the documentation for a list of which options are
|
||||||
|
# available for which authentication methods.
|
||||||
|
#
|
||||||
|
# Database and user names containing spaces, commas, quotes and other
|
||||||
|
# special characters must be quoted. Quoting one of the keywords
|
||||||
|
# "all", "sameuser", "samerole" or "replication" makes the name lose
|
||||||
|
# its special character, and just match a database or username with
|
||||||
|
# that name.
|
||||||
|
#
|
||||||
|
# This file is read on server startup and when the server receives a
|
||||||
|
# SIGHUP signal. If you edit the file on a running system, you have to
|
||||||
|
# SIGHUP the server for the changes to take effect, run "pg_ctl reload",
|
||||||
|
# or execute "SELECT pg_reload_conf()".
|
||||||
|
#
|
||||||
|
# Put your actual configuration here
|
||||||
|
# ----------------------------------
|
||||||
|
#
|
||||||
|
# If you want to allow non-local connections, you need to add more
|
||||||
|
# "host" records. In that case you will also need to make PostgreSQL
|
||||||
|
# listen on a non-local interface via the listen_addresses
|
||||||
|
# configuration parameter, or via the -i or -h command line switches.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# TYPE DATABASE USER ADDRESS METHOD
|
||||||
|
|
||||||
|
# "local" is for Unix domain socket connections only
|
||||||
|
local all all peer map=local
|
||||||
|
# IPv4 connections:
|
||||||
|
host mailu mailu {{ SUBNET }} md5
|
||||||
|
host postgres health 127.0.0.1/32 trust
|
||||||
|
# IPv6 local connections:
|
||||||
|
host all all ::1/128 reject
|
||||||
|
# Allow replication connections from localhost, by a user with the
|
||||||
|
# replication privilege.
|
||||||
|
local replication all peer map=local
|
||||||
|
host replication all 127.0.0.1/32 reject
|
||||||
|
host replication all ::1/128 reject
|
@ -0,0 +1,44 @@
|
|||||||
|
# PostgreSQL User Name Maps
|
||||||
|
# =========================
|
||||||
|
#
|
||||||
|
# Refer to the PostgreSQL documentation, chapter "Client
|
||||||
|
# Authentication" for a complete description. A short synopsis
|
||||||
|
# follows.
|
||||||
|
#
|
||||||
|
# This file controls PostgreSQL user name mapping. It maps external
|
||||||
|
# user names to their corresponding PostgreSQL user names. Records
|
||||||
|
# are of the form:
|
||||||
|
#
|
||||||
|
# MAPNAME SYSTEM-USERNAME PG-USERNAME
|
||||||
|
#
|
||||||
|
# (The uppercase quantities must be replaced by actual values.)
|
||||||
|
#
|
||||||
|
# MAPNAME is the (otherwise freely chosen) map name that was used in
|
||||||
|
# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the
|
||||||
|
# client. PG-USERNAME is the requested PostgreSQL user name. The
|
||||||
|
# existence of a record specifies that SYSTEM-USERNAME may connect as
|
||||||
|
# PG-USERNAME.
|
||||||
|
#
|
||||||
|
# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a
|
||||||
|
# regular expression. Optionally this can contain a capture (a
|
||||||
|
# parenthesized subexpression). The substring matching the capture
|
||||||
|
# will be substituted for \1 (backslash-one) if present in
|
||||||
|
# PG-USERNAME.
|
||||||
|
#
|
||||||
|
# Multiple maps may be specified in this file and used by pg_hba.conf.
|
||||||
|
#
|
||||||
|
# No map names are defined in the default configuration. If all
|
||||||
|
# system user names and PostgreSQL user names are the same, you don't
|
||||||
|
# need anything in this file.
|
||||||
|
#
|
||||||
|
# This file is read on server startup and when the postmaster receives
|
||||||
|
# a SIGHUP signal. If you edit the file on a running system, you have
|
||||||
|
# to SIGHUP the postmaster for the changes to take effect. You can
|
||||||
|
# use "pg_ctl reload" to do that.
|
||||||
|
|
||||||
|
# Put your actual configuration here
|
||||||
|
# ----------------------------------
|
||||||
|
|
||||||
|
# MAPNAME SYSTEM-USERNAME PG-USERNAME
|
||||||
|
local postgres postgres
|
||||||
|
local root postgres
|
@ -0,0 +1,658 @@
|
|||||||
|
# -----------------------------
|
||||||
|
# PostgreSQL configuration file
|
||||||
|
# -----------------------------
|
||||||
|
#
|
||||||
|
# This file consists of lines of the form:
|
||||||
|
#
|
||||||
|
# name = value
|
||||||
|
#
|
||||||
|
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||||
|
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||||
|
# values can be found in the PostgreSQL documentation.
|
||||||
|
#
|
||||||
|
# The commented-out settings shown in this file represent the default values.
|
||||||
|
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||||
|
# you need to reload the server.
|
||||||
|
#
|
||||||
|
# This file is read on server startup and when the server receives a SIGHUP
|
||||||
|
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||||
|
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||||
|
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||||
|
# require a server shutdown and restart to take effect.
|
||||||
|
#
|
||||||
|
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||||
|
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||||
|
# with the "SET" SQL command.
|
||||||
|
#
|
||||||
|
# Memory units: kB = kilobytes Time units: ms = milliseconds
|
||||||
|
# MB = megabytes s = seconds
|
||||||
|
# GB = gigabytes min = minutes
|
||||||
|
# TB = terabytes h = hours
|
||||||
|
# d = days
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# FILE LOCATIONS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# The default values of these variables are driven from the -D command-line
|
||||||
|
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||||
|
|
||||||
|
#data_directory = 'ConfigDir' # use data in another directory
|
||||||
|
# (change requires restart)
|
||||||
|
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
|
||||||
|
# (change requires restart)
|
||||||
|
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||||
|
#external_pid_file = '' # write an extra PID file
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CONNECTIONS AND AUTHENTICATION
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Connection Settings -
|
||||||
|
|
||||||
|
listen_addresses = '*' # what IP address(es) to listen on;
|
||||||
|
# comma-separated list of addresses;
|
||||||
|
# defaults to 'localhost'; use '*' for all
|
||||||
|
# (change requires restart)
|
||||||
|
#port = 5432 # (change requires restart)
|
||||||
|
max_connections = 100 # (change requires restart)
|
||||||
|
#superuser_reserved_connections = 3 # (change requires restart)
|
||||||
|
unix_socket_directories = '/run/postgresql,/tmp' # comma-separated list of directories
|
||||||
|
# (change requires restart)
|
||||||
|
#unix_socket_group = '' # (change requires restart)
|
||||||
|
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||||
|
# (change requires restart)
|
||||||
|
#bonjour = off # advertise server via Bonjour
|
||||||
|
# (change requires restart)
|
||||||
|
#bonjour_name = '' # defaults to the computer name
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - Security and Authentication -
|
||||||
|
|
||||||
|
#authentication_timeout = 1min # 1s-600s
|
||||||
|
#ssl = off
|
||||||
|
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||||
|
#ssl_prefer_server_ciphers = on
|
||||||
|
#ssl_ecdh_curve = 'prime256v1'
|
||||||
|
#ssl_dh_params_file = ''
|
||||||
|
#ssl_cert_file = 'server.crt'
|
||||||
|
#ssl_key_file = 'server.key'
|
||||||
|
#ssl_ca_file = ''
|
||||||
|
#ssl_crl_file = ''
|
||||||
|
#password_encryption = md5 # md5 or scram-sha-256
|
||||||
|
#db_user_namespace = off
|
||||||
|
#row_security = on
|
||||||
|
|
||||||
|
# GSSAPI using Kerberos
|
||||||
|
#krb_server_keyfile = ''
|
||||||
|
#krb_caseins_users = off
|
||||||
|
|
||||||
|
# - TCP Keepalives -
|
||||||
|
# see "man 7 tcp" for details
|
||||||
|
|
||||||
|
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||||
|
# 0 selects the system default
|
||||||
|
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||||
|
# 0 selects the system default
|
||||||
|
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||||
|
# 0 selects the system default
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# RESOURCE USAGE (except WAL)
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Memory -
|
||||||
|
|
||||||
|
shared_buffers = 128MB # min 128kB
|
||||||
|
# (change requires restart)
|
||||||
|
#huge_pages = try # on, off, or try
|
||||||
|
# (change requires restart)
|
||||||
|
#temp_buffers = 8MB # min 800kB
|
||||||
|
#max_prepared_transactions = 0 # zero disables the feature
|
||||||
|
# (change requires restart)
|
||||||
|
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||||
|
# you actively intend to use prepared transactions.
|
||||||
|
#work_mem = 4MB # min 64kB
|
||||||
|
#maintenance_work_mem = 64MB # min 1MB
|
||||||
|
#replacement_sort_tuples = 150000 # limits use of replacement selection sort
|
||||||
|
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||||
|
#max_stack_depth = 2MB # min 100kB
|
||||||
|
dynamic_shared_memory_type = posix # the default is the first option
|
||||||
|
# supported by the operating system:
|
||||||
|
# posix
|
||||||
|
# sysv
|
||||||
|
# windows
|
||||||
|
# mmap
|
||||||
|
# use none to disable dynamic shared memory
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - Disk -
|
||||||
|
|
||||||
|
#temp_file_limit = -1 # limits per-process temp file space
|
||||||
|
# in kB, or -1 for no limit
|
||||||
|
|
||||||
|
# - Kernel Resource Usage -
|
||||||
|
|
||||||
|
#max_files_per_process = 1000 # min 25
|
||||||
|
# (change requires restart)
|
||||||
|
#shared_preload_libraries = '' # (change requires restart)
|
||||||
|
|
||||||
|
# - Cost-Based Vacuum Delay -
|
||||||
|
|
||||||
|
#vacuum_cost_delay = 0 # 0-100 milliseconds
|
||||||
|
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||||
|
#vacuum_cost_page_miss = 10 # 0-10000 credits
|
||||||
|
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||||
|
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||||
|
|
||||||
|
# - Background Writer -
|
||||||
|
|
||||||
|
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||||
|
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
|
||||||
|
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||||
|
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||||
|
|
||||||
|
# - Asynchronous Behavior -
|
||||||
|
|
||||||
|
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
|
||||||
|
#max_worker_processes = 8 # (change requires restart)
|
||||||
|
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||||
|
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||||
|
# can be used in parallel queries
|
||||||
|
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||||
|
# (change requires restart)
|
||||||
|
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# WRITE AHEAD LOG
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Settings -
|
||||||
|
|
||||||
|
wal_level = replica # minimal, replica, or logical
|
||||||
|
# (change requires restart)
|
||||||
|
#fsync = on # flush data to disk for crash safety
|
||||||
|
# (turning this off can cause
|
||||||
|
# unrecoverable data corruption)
|
||||||
|
#synchronous_commit = on # synchronization level;
|
||||||
|
# off, local, remote_write, remote_apply, or on
|
||||||
|
#wal_sync_method = fsync # the default is the first option
|
||||||
|
# supported by the operating system:
|
||||||
|
# open_datasync
|
||||||
|
# fdatasync (default on Linux)
|
||||||
|
# fsync
|
||||||
|
# fsync_writethrough
|
||||||
|
# open_sync
|
||||||
|
#full_page_writes = on # recover from partial page writes
|
||||||
|
#wal_compression = off # enable compression of full-page writes
|
||||||
|
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||||
|
# (change requires restart)
|
||||||
|
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||||
|
# (change requires restart)
|
||||||
|
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||||
|
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||||
|
|
||||||
|
#commit_delay = 0 # range 0-100000, in microseconds
|
||||||
|
#commit_siblings = 5 # range 1-1000
|
||||||
|
|
||||||
|
# - Checkpoints -
|
||||||
|
|
||||||
|
checkpoint_timeout = 60s # range 30s-1d
|
||||||
|
#max_wal_size = 1GB
|
||||||
|
#min_wal_size = 80MB
|
||||||
|
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
|
||||||
|
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||||
|
checkpoint_warning = 15s # 0 disables
|
||||||
|
|
||||||
|
# - Archiving -
|
||||||
|
|
||||||
|
archive_mode = on # enables archiving; off, on, or always
|
||||||
|
# (change requires restart)
|
||||||
|
archive_command = 'test ! -f /backup/wal_archive/%f && gzip < %p > /backup/wal_archive/%f' # command to use to archive a logfile segment
|
||||||
|
# placeholders: %p = path of file to archive
|
||||||
|
# %f = file name only
|
||||||
|
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||||
|
archive_timeout = 60 # force a logfile segment switch after this
|
||||||
|
# number of seconds; 0 disables
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# REPLICATION
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Sending Server(s) -
|
||||||
|
|
||||||
|
# Set these on the master and on any standby that will send replication data.
|
||||||
|
|
||||||
|
#max_wal_senders = 10 # max number of walsender processes
|
||||||
|
# (change requires restart)
|
||||||
|
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
|
||||||
|
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||||
|
|
||||||
|
#max_replication_slots = 10 # max number of replication slots
|
||||||
|
# (change requires restart)
|
||||||
|
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# - Master Server -
|
||||||
|
|
||||||
|
# These settings are ignored on a standby server.
|
||||||
|
|
||||||
|
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||||
|
# method to choose sync standbys, number of sync standbys,
|
||||||
|
# and comma-separated list of application_name
|
||||||
|
# from standby(s); '*' = all
|
||||||
|
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||||
|
|
||||||
|
# - Standby Servers -
|
||||||
|
|
||||||
|
# These settings are ignored on a master server.
|
||||||
|
|
||||||
|
#hot_standby = on # "off" disallows queries during recovery
|
||||||
|
# (change requires restart)
|
||||||
|
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||||
|
# when reading WAL from archive;
|
||||||
|
# -1 allows indefinite delay
|
||||||
|
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||||
|
# when reading streaming WAL;
|
||||||
|
# -1 allows indefinite delay
|
||||||
|
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||||
|
# 0 disables
|
||||||
|
#hot_standby_feedback = off # send info from standby to prevent
|
||||||
|
# query conflicts
|
||||||
|
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||||
|
# communication from master
|
||||||
|
# in milliseconds; 0 disables
|
||||||
|
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||||
|
# retrieve WAL after a failed attempt
|
||||||
|
|
||||||
|
# - Subscribers -
|
||||||
|
|
||||||
|
# These settings are ignored on a publisher.
|
||||||
|
|
||||||
|
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||||
|
# (change requires restart)
|
||||||
|
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# QUERY TUNING
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Planner Method Configuration -
|
||||||
|
|
||||||
|
#enable_bitmapscan = on
|
||||||
|
#enable_hashagg = on
|
||||||
|
#enable_hashjoin = on
|
||||||
|
#enable_indexscan = on
|
||||||
|
#enable_indexonlyscan = on
|
||||||
|
#enable_material = on
|
||||||
|
#enable_mergejoin = on
|
||||||
|
#enable_nestloop = on
|
||||||
|
#enable_seqscan = on
|
||||||
|
#enable_sort = on
|
||||||
|
#enable_tidscan = on
|
||||||
|
|
||||||
|
# - Planner Cost Constants -
|
||||||
|
|
||||||
|
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||||
|
#random_page_cost = 4.0 # same scale as above
|
||||||
|
#cpu_tuple_cost = 0.01 # same scale as above
|
||||||
|
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||||
|
#cpu_operator_cost = 0.0025 # same scale as above
|
||||||
|
#parallel_tuple_cost = 0.1 # same scale as above
|
||||||
|
#parallel_setup_cost = 1000.0 # same scale as above
|
||||||
|
#min_parallel_table_scan_size = 8MB
|
||||||
|
#min_parallel_index_scan_size = 512kB
|
||||||
|
#effective_cache_size = 4GB
|
||||||
|
|
||||||
|
# - Genetic Query Optimizer -
|
||||||
|
|
||||||
|
#geqo = on
|
||||||
|
#geqo_threshold = 12
|
||||||
|
#geqo_effort = 5 # range 1-10
|
||||||
|
#geqo_pool_size = 0 # selects default based on effort
|
||||||
|
#geqo_generations = 0 # selects default based on effort
|
||||||
|
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||||
|
#geqo_seed = 0.0 # range 0.0-1.0
|
||||||
|
|
||||||
|
# - Other Planner Options -
|
||||||
|
|
||||||
|
#default_statistics_target = 100 # range 1-10000
|
||||||
|
#constraint_exclusion = partition # on, off, or partition
|
||||||
|
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||||
|
#from_collapse_limit = 8
|
||||||
|
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||||
|
# JOIN clauses
|
||||||
|
#force_parallel_mode = off
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# ERROR REPORTING AND LOGGING
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Where to Log -
|
||||||
|
|
||||||
|
#log_destination = 'stderr' # Valid values are combinations of
|
||||||
|
# stderr, csvlog, syslog, and eventlog,
|
||||||
|
# depending on platform. csvlog
|
||||||
|
# requires logging_collector to be on.
|
||||||
|
|
||||||
|
# This is used when logging to stderr:
|
||||||
|
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||||
|
# into log files. Required to be on for
|
||||||
|
# csvlogs.
|
||||||
|
# (change requires restart)
|
||||||
|
|
||||||
|
# These are only used if logging_collector is on:
|
||||||
|
#log_directory = 'log' # directory where log files are written,
|
||||||
|
# can be absolute or relative to PGDATA
|
||||||
|
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||||
|
# can include strftime() escapes
|
||||||
|
#log_file_mode = 0600 # creation mode for log files,
|
||||||
|
# begin with 0 to use octal notation
|
||||||
|
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||||
|
# same name as the new log file will be
|
||||||
|
# truncated rather than appended to.
|
||||||
|
# But such truncation only occurs on
|
||||||
|
# time-driven rotation, not on restarts
|
||||||
|
# or size-driven rotation. Default is
|
||||||
|
# off, meaning append to existing files
|
||||||
|
# in all cases.
|
||||||
|
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||||
|
# happen after that time. 0 disables.
|
||||||
|
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||||
|
# happen after that much log output.
|
||||||
|
# 0 disables.
|
||||||
|
|
||||||
|
# These are relevant when logging to syslog:
|
||||||
|
#syslog_facility = 'LOCAL0'
|
||||||
|
#syslog_ident = 'postgres'
|
||||||
|
#syslog_sequence_numbers = on
|
||||||
|
#syslog_split_messages = on
|
||||||
|
|
||||||
|
# This is only relevant when logging to eventlog (win32):
|
||||||
|
# (change requires restart)
|
||||||
|
#event_source = 'PostgreSQL'
|
||||||
|
|
||||||
|
# - When to Log -
|
||||||
|
|
||||||
|
#client_min_messages = notice # values in order of decreasing detail:
|
||||||
|
# debug5
|
||||||
|
# debug4
|
||||||
|
# debug3
|
||||||
|
# debug2
|
||||||
|
# debug1
|
||||||
|
# log
|
||||||
|
# notice
|
||||||
|
# warning
|
||||||
|
# error
|
||||||
|
|
||||||
|
#log_min_messages = warning # values in order of decreasing detail:
|
||||||
|
# debug5
|
||||||
|
# debug4
|
||||||
|
# debug3
|
||||||
|
# debug2
|
||||||
|
# debug1
|
||||||
|
# info
|
||||||
|
# notice
|
||||||
|
# warning
|
||||||
|
# error
|
||||||
|
# log
|
||||||
|
# fatal
|
||||||
|
# panic
|
||||||
|
|
||||||
|
#log_min_error_statement = error # values in order of decreasing detail:
|
||||||
|
# debug5
|
||||||
|
# debug4
|
||||||
|
# debug3
|
||||||
|
# debug2
|
||||||
|
# debug1
|
||||||
|
# info
|
||||||
|
# notice
|
||||||
|
# warning
|
||||||
|
# error
|
||||||
|
# log
|
||||||
|
# fatal
|
||||||
|
# panic (effectively off)
|
||||||
|
|
||||||
|
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||||
|
# and their durations, > 0 logs only
|
||||||
|
# statements running at least this number
|
||||||
|
# of milliseconds
|
||||||
|
|
||||||
|
|
||||||
|
# - What to Log -
|
||||||
|
|
||||||
|
#debug_print_parse = off
|
||||||
|
#debug_print_rewritten = off
|
||||||
|
#debug_print_plan = off
|
||||||
|
#debug_pretty_print = on
|
||||||
|
#log_checkpoints = off
|
||||||
|
#log_connections = off
|
||||||
|
#log_disconnections = off
|
||||||
|
#log_duration = off
|
||||||
|
#log_error_verbosity = default # terse, default, or verbose messages
|
||||||
|
#log_hostname = off
|
||||||
|
#log_line_prefix = '%m [%p] ' # special values:
|
||||||
|
# %a = application name
|
||||||
|
# %u = user name
|
||||||
|
# %d = database name
|
||||||
|
# %r = remote host and port
|
||||||
|
# %h = remote host
|
||||||
|
# %p = process ID
|
||||||
|
# %t = timestamp without milliseconds
|
||||||
|
# %m = timestamp with milliseconds
|
||||||
|
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||||
|
# %i = command tag
|
||||||
|
# %e = SQL state
|
||||||
|
# %c = session ID
|
||||||
|
# %l = session line number
|
||||||
|
# %s = session start timestamp
|
||||||
|
# %v = virtual transaction ID
|
||||||
|
# %x = transaction ID (0 if none)
|
||||||
|
# %q = stop here in non-session
|
||||||
|
# processes
|
||||||
|
# %% = '%'
|
||||||
|
# e.g. '<%u%%%d> '
|
||||||
|
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||||
|
#log_statement = 'none' # none, ddl, mod, all
|
||||||
|
#log_replication_commands = off
|
||||||
|
#log_temp_files = -1 # log temporary files equal or larger
|
||||||
|
# than the specified size in kilobytes;
|
||||||
|
# -1 disables, 0 logs all temp files
|
||||||
|
log_timezone = 'UTC'
|
||||||
|
|
||||||
|
|
||||||
|
# - Process Title -
|
||||||
|
|
||||||
|
#cluster_name = '' # added to process titles if nonempty
|
||||||
|
# (change requires restart)
|
||||||
|
#update_process_title = on
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# RUNTIME STATISTICS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Query/Index Statistics Collector -
|
||||||
|
|
||||||
|
#track_activities = on
|
||||||
|
#track_counts = on
|
||||||
|
#track_io_timing = off
|
||||||
|
#track_functions = none # none, pl, all
|
||||||
|
#track_activity_query_size = 1024 # (change requires restart)
|
||||||
|
#stats_temp_directory = 'pg_stat_tmp'
|
||||||
|
|
||||||
|
|
||||||
|
# - Statistics Monitoring -
|
||||||
|
|
||||||
|
#log_parser_stats = off
|
||||||
|
#log_planner_stats = off
|
||||||
|
#log_executor_stats = off
|
||||||
|
#log_statement_stats = off
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# AUTOVACUUM PARAMETERS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||||
|
# requires track_counts to also be on.
|
||||||
|
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
|
||||||
|
# their durations, > 0 logs only
|
||||||
|
# actions running at least this number
|
||||||
|
# of milliseconds.
|
||||||
|
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||||
|
# (change requires restart)
|
||||||
|
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||||
|
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||||
|
# vacuum
|
||||||
|
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||||
|
# analyze
|
||||||
|
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||||
|
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||||
|
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||||
|
# (change requires restart)
|
||||||
|
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||||
|
# before forced vacuum
|
||||||
|
# (change requires restart)
|
||||||
|
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
|
||||||
|
# autovacuum, in milliseconds;
|
||||||
|
# -1 means use vacuum_cost_delay
|
||||||
|
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||||
|
# autovacuum, -1 means use
|
||||||
|
# vacuum_cost_limit
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CLIENT CONNECTION DEFAULTS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Statement Behavior -
|
||||||
|
|
||||||
|
#search_path = '"$user", public' # schema names
|
||||||
|
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||||
|
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||||
|
# only default tablespace
|
||||||
|
#check_function_bodies = on
|
||||||
|
#default_transaction_isolation = 'read committed'
|
||||||
|
#default_transaction_read_only = off
|
||||||
|
#default_transaction_deferrable = off
|
||||||
|
#session_replication_role = 'origin'
|
||||||
|
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||||
|
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||||
|
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||||
|
#vacuum_freeze_min_age = 50000000
|
||||||
|
#vacuum_freeze_table_age = 150000000
|
||||||
|
#vacuum_multixact_freeze_min_age = 5000000
|
||||||
|
#vacuum_multixact_freeze_table_age = 150000000
|
||||||
|
#bytea_output = 'hex' # hex, escape
|
||||||
|
#xmlbinary = 'base64'
|
||||||
|
#xmloption = 'content'
|
||||||
|
#gin_fuzzy_search_limit = 0
|
||||||
|
#gin_pending_list_limit = 4MB
|
||||||
|
|
||||||
|
# - Locale and Formatting -
|
||||||
|
|
||||||
|
datestyle = 'iso, mdy'
|
||||||
|
#intervalstyle = 'postgres'
|
||||||
|
timezone = 'UTC'
|
||||||
|
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||||
|
# abbreviations. Currently, there are
|
||||||
|
# Default
|
||||||
|
# Australia (historical usage)
|
||||||
|
# India
|
||||||
|
# You can create your own file in
|
||||||
|
# share/timezonesets/.
|
||||||
|
#extra_float_digits = 0 # min -15, max 3
|
||||||
|
#client_encoding = sql_ascii # actually, defaults to database
|
||||||
|
# encoding
|
||||||
|
|
||||||
|
# These settings are initialized by initdb, but they can be changed.
|
||||||
|
lc_messages = 'C' # locale for system error message
|
||||||
|
# strings
|
||||||
|
lc_monetary = 'C' # locale for monetary formatting
|
||||||
|
lc_numeric = 'C' # locale for number formatting
|
||||||
|
lc_time = 'C' # locale for time formatting
|
||||||
|
|
||||||
|
# default configuration for text search
|
||||||
|
default_text_search_config = 'pg_catalog.english'
|
||||||
|
|
||||||
|
# - Other Defaults -
|
||||||
|
|
||||||
|
#dynamic_library_path = '$libdir'
|
||||||
|
#local_preload_libraries = ''
|
||||||
|
#session_preload_libraries = ''
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# LOCK MANAGEMENT
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#deadlock_timeout = 1s
|
||||||
|
#max_locks_per_transaction = 64 # min 10
|
||||||
|
# (change requires restart)
|
||||||
|
#max_pred_locks_per_transaction = 64 # min 10
|
||||||
|
# (change requires restart)
|
||||||
|
#max_pred_locks_per_relation = -2 # negative values mean
|
||||||
|
# (max_pred_locks_per_transaction
|
||||||
|
# / -max_pred_locks_per_relation) - 1
|
||||||
|
#max_pred_locks_per_page = 2 # min 0
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# VERSION/PLATFORM COMPATIBILITY
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# - Previous PostgreSQL Versions -
|
||||||
|
|
||||||
|
#array_nulls = on
|
||||||
|
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||||
|
#default_with_oids = off
|
||||||
|
#escape_string_warning = on
|
||||||
|
#lo_compat_privileges = off
|
||||||
|
#operator_precedence_warning = off
|
||||||
|
#quote_all_identifiers = off
|
||||||
|
#standard_conforming_strings = on
|
||||||
|
#synchronize_seqscans = on
|
||||||
|
|
||||||
|
# - Other Platforms and Clients -
|
||||||
|
|
||||||
|
#transform_null_equals = off
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# ERROR HANDLING
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#exit_on_error = off # terminate session on any error?
|
||||||
|
#restart_after_crash = on # reinitialize after backend crash?
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CONFIG FILE INCLUDES
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# These options allow settings to be loaded from files other than the
|
||||||
|
# default postgresql.conf.
|
||||||
|
|
||||||
|
#include_dir = 'conf.d' # include files ending in '.conf' from
|
||||||
|
# directory 'conf.d'
|
||||||
|
#include_if_exists = 'exists.conf' # include file only if it exists
|
||||||
|
#include = 'special.conf' # include file
|
||||||
|
|
||||||
|
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
# CUSTOMIZED OPTIONS
|
||||||
|
#------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
# Add settings for extensions here
|
@ -0,0 +1,42 @@
|
|||||||
|
-- name: create_mailu_user!
|
||||||
|
-- Create the mailu user if it does not exist.
|
||||||
|
do $$
|
||||||
|
begin
|
||||||
|
create user mailu;
|
||||||
|
exception when others then
|
||||||
|
raise notice 'mailu user not created -- already exists';
|
||||||
|
end
|
||||||
|
$$;
|
||||||
|
|
||||||
|
-- name: create_health_user!
|
||||||
|
-- Create the mailu user if it does not exist.
|
||||||
|
do $$
|
||||||
|
begin
|
||||||
|
create user health;
|
||||||
|
exception when others then
|
||||||
|
raise notice 'health user not created -- already exists';
|
||||||
|
end
|
||||||
|
$$;
|
||||||
|
|
||||||
|
-- name: grant_health!
|
||||||
|
-- Grant connect permission for the health user
|
||||||
|
grant connect
|
||||||
|
on database postgres
|
||||||
|
to health;
|
||||||
|
|
||||||
|
-- name: update_pw!
|
||||||
|
alter
|
||||||
|
user mailu
|
||||||
|
password :pw;
|
||||||
|
|
||||||
|
-- name: check_db
|
||||||
|
-- check if the mailu db exists
|
||||||
|
select 1
|
||||||
|
from pg_database
|
||||||
|
where datname = 'mailu';
|
||||||
|
|
||||||
|
-- name: create_db!
|
||||||
|
-- create the mailu db
|
||||||
|
create
|
||||||
|
database mailu
|
||||||
|
owner mailu;
|
@ -0,0 +1 @@
|
|||||||
|
11 4 * * 7 /basebackup.sh > /proc/1/fd/1 2>/proc/1/fd/2
|
@ -0,0 +1,67 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
import anosql
|
||||||
|
import psycopg2
|
||||||
|
import jinja2
|
||||||
|
import glob
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
def setup():
|
||||||
|
conn = psycopg2.connect(user='postgres')
|
||||||
|
queries = anosql.load_queries('postgres', '/conf/queries.sql')
|
||||||
|
# Mailu user
|
||||||
|
queries.create_mailu_user(conn)
|
||||||
|
queries.update_pw(conn, pw=os.environ.get("DB_PW"))
|
||||||
|
# Healthcheck user
|
||||||
|
queries.create_health_user(conn)
|
||||||
|
queries.grant_health(conn)
|
||||||
|
conn.commit()
|
||||||
|
# create db cannot be atomic. But this script is the only active connection, this is kinda safe.
|
||||||
|
if not queries.check_db(conn):
|
||||||
|
conn.set_isolation_level(0)
|
||||||
|
queries.create_db(conn)
|
||||||
|
conn.set_isolation_level(1)
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
# Check if /data is empty
|
||||||
|
if not os.listdir("/data"):
|
||||||
|
os.system("chown -R postgres:postgres /data")
|
||||||
|
os.system("chmod 0700 /data")
|
||||||
|
base_backups=sorted(glob.glob("/backup/base-*"))
|
||||||
|
if base_backups:
|
||||||
|
# Restore the latest backup
|
||||||
|
subprocess.call(["tar", "--same-owner", "-zpxf", base_backups[-1] + "/base.tar.gz" , "-C", "/data"])
|
||||||
|
if os.listdir("/backup/wal_archive"):
|
||||||
|
with open("/data/recovery.conf", "w") as rec:
|
||||||
|
rec.write("restore_command = 'gunzip < /backup/wal_archive/%f > %p'\n")
|
||||||
|
rec.write("standby_mode = off\n")
|
||||||
|
os.system("chown postgres:postgres /data/recovery.conf")
|
||||||
|
#os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
|
||||||
|
else:
|
||||||
|
# Bootstrap the database
|
||||||
|
os.system("sudo -u postgres initdb -D /data")
|
||||||
|
|
||||||
|
# Create backup directory structure, if it does not yet exist
|
||||||
|
os.system("mkdir -p /backup/wal_archive")
|
||||||
|
os.system("chown -R postgres:postgres /backup")
|
||||||
|
|
||||||
|
# Render config files
|
||||||
|
convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ))
|
||||||
|
for pg_file in glob.glob("/conf/*.conf"):
|
||||||
|
convert(pg_file, os.path.join("/data", os.path.basename(pg_file)))
|
||||||
|
|
||||||
|
# (Re)start postgresql locally for DB and user creation
|
||||||
|
os.system("sudo -u postgres pg_ctl start -D /data -o '-h \"''\" '")
|
||||||
|
while os.path.isfile("recovery.conf"):
|
||||||
|
pass
|
||||||
|
os.system("sudo -u postgres pg_ctl -D /data promote")
|
||||||
|
setup()
|
||||||
|
os.system("sudo -u postgres pg_ctl stop -m smart -w -D /data")
|
||||||
|
|
||||||
|
out=open("/proc/1/fd/1", "w")
|
||||||
|
err=open("/proc/1/fd/2", "w")
|
||||||
|
# Run the cron deamon
|
||||||
|
subprocess.Popen(["crond", "-f"], stdout=out, stderr=err)
|
||||||
|
# Run postgresql service
|
||||||
|
os.system("sudo -u postgres postgres -D /data -h \*")
|
@ -0,0 +1,40 @@
|
|||||||
|
{% call macros.panel("info", "Database preferences") %}
|
||||||
|
|
||||||
|
<div class="form-group">
|
||||||
|
<label>Which database back end would you like to use?</label>
|
||||||
|
<br/>
|
||||||
|
<select class="btn btn-primary dropdown-toggle" name="db_flavor" id="database">
|
||||||
|
{% for dbflavor in ["sqlite", "postgresql", "mysql"] %}
|
||||||
|
<option value="{{ dbflavor }}" >{{ dbflavor }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</select>
|
||||||
|
<p></p>
|
||||||
|
<div id="postgres_db" style="display: none">
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input" type="radio" name="postgresql" id="internal_psql" value="internal" checked>
|
||||||
|
<label class="form-check-label" for="internal_psql">
|
||||||
|
Use the Postgresql from Mailu
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="form-check">
|
||||||
|
<input class="form-check-input" type="radio" name="postgresql" value="external" id="external_psql" >
|
||||||
|
<label class="form-check-label" for="external_psql">
|
||||||
|
I want to connect to an external database
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<br/>
|
||||||
|
</div>
|
||||||
|
<div class="form-group" id="external_db" style="display: none">
|
||||||
|
<p>Set external database parameters</p>
|
||||||
|
<label>DB User</label>
|
||||||
|
<input class="form-control" type="text" name="db_user" placeholder="Username" id="db_user">
|
||||||
|
<label>Db Password</label>
|
||||||
|
<input class="form-control" type="password" name="db_pw" placeholder="Password" id="db_pw">
|
||||||
|
<label>Db URL</label>
|
||||||
|
<input class="form-control" type="text" name="db_url" placeholder="URL" id="db_url">
|
||||||
|
<label>Db Name</label>
|
||||||
|
<input class="form-control" type="text" name="db_name" placeholder="Database Name" id="db_name">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{% endcall %}
|
Loading…
Reference in New Issue