Merge branch 'master' into feat-reply-startdate

master
hoellen 6 years ago committed by GitHub
commit 857ad50509
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -12,7 +12,7 @@ import docker
import socket import socket
import uuid import uuid
from werkzeug.contrib import fixers from werkzeug.contrib import fixers, profiler
# Create application # Create application
app = flask.Flask(__name__) app = flask.Flask(__name__)
@ -62,7 +62,10 @@ default_config = {
'HOST_IMAP': 'imap', 'HOST_IMAP': 'imap',
'HOST_POP3': 'imap', 'HOST_POP3': 'imap',
'HOST_SMTP': 'smtp', 'HOST_SMTP': 'smtp',
'HOST_WEBMAIL': 'webmail',
'HOST_FRONT': 'front',
'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'), 'HOST_AUTHSMTP': os.environ.get('HOST_SMTP', 'smtp'),
'POD_ADDRESS_RANGE': None
} }
# Load configuration from the environment if available # Load configuration from the environment if available
@ -80,6 +83,10 @@ if app.config.get("DEBUG"):
import flask_debugtoolbar import flask_debugtoolbar
toolbar = flask_debugtoolbar.DebugToolbarExtension(app) toolbar = flask_debugtoolbar.DebugToolbarExtension(app)
# Profiler
if app.config.get("DEBUG"):
app.wsgi_app = profiler.ProfilerMiddleware(app.wsgi_app, restrictions=[30])
# Manager commnad # Manager commnad
manager = flask_script.Manager(app) manager = flask_script.Manager(app)
manager.add_command('db', flask_migrate.MigrateCommand) manager.add_command('db', flask_migrate.MigrateCommand)
@ -129,4 +136,5 @@ class PrefixMiddleware(object):
environ['SCRIPT_NAME'] = prefix environ['SCRIPT_NAME'] = prefix
return self.app(environ, start_response) return self.app(environ, start_response)
app.wsgi_app = PrefixMiddleware(fixers.ProxyFix(app.wsgi_app)) app.wsgi_app = PrefixMiddleware(fixers.ProxyFix(app.wsgi_app))

@ -1,14 +1,24 @@
from mailu import db, models from mailu import db, models, app
from mailu.internal import internal from mailu.internal import internal
import flask import flask
import socket
@internal.route("/dovecot/passdb/<user_email>") @internal.route("/dovecot/passdb/<user_email>")
def dovecot_passdb_dict(user_email): def dovecot_passdb_dict(user_email):
user = models.User.query.get(user_email) or flask.abort(404) user = models.User.query.get(user_email) or flask.abort(404)
allow_nets = []
allow_nets.append(
app.config.get("POD_ADDRESS_RANGE") or
socket.gethostbyname(app.config["HOST_FRONT"])
)
allow_nets.append(socket.gethostbyname(app.config["HOST_WEBMAIL"]))
print(allow_nets)
return flask.jsonify({ return flask.jsonify({
"password": user.password, "password": None,
"nopassword": "Y",
"allow_nets": ",".join(allow_nets)
}) })

@ -278,6 +278,7 @@ class User(Base, Email):
else: else:
return self.email return self.email
@property @property
def reply_active(self): def reply_active(self):
now = date.today() now = date.today()
@ -287,7 +288,8 @@ class User(Base, Email):
self.reply_enddate > now self.reply_enddate > now
) )
scheme_dict = {'BLF-CRYPT': "bcrypt", scheme_dict = {'PBKDF2': "pbkdf2_sha512",
'BLF-CRYPT': "bcrypt",
'SHA512-CRYPT': "sha512_crypt", 'SHA512-CRYPT': "sha512_crypt",
'SHA256-CRYPT': "sha256_crypt", 'SHA256-CRYPT': "sha256_crypt",
'MD5-CRYPT': "md5_crypt", 'MD5-CRYPT': "md5_crypt",
@ -298,8 +300,14 @@ class User(Base, Email):
) )
def check_password(self, password): def check_password(self, password):
context = User.pw_context
reference = re.match('({[^}]+})?(.*)', self.password).group(2) reference = re.match('({[^}]+})?(.*)', self.password).group(2)
return User.pw_context.verify(password, reference) result = context.verify(password, reference)
if result and context.identify(reference) != context.default_scheme():
self.set_password(password)
db.session.add(self)
db.session.commit()
return result
def set_password(self, password, hash_scheme=app.config['PASSWORD_SCHEME'], raw=False): def set_password(self, password, hash_scheme=app.config['PASSWORD_SCHEME'], raw=False):
"""Set password for user with specified encryption scheme """Set password for user with specified encryption scheme

@ -2,7 +2,7 @@ FROM alpine:3.8
RUN apk add --no-cache \ RUN apk add --no-cache \
dovecot dovecot-pigeonhole-plugin dovecot-fts-lucene rspamd-client \ dovecot dovecot-pigeonhole-plugin dovecot-fts-lucene rspamd-client \
python3 py3-pip \ bash python3 py3-pip \
&& pip3 install --upgrade pip \ && pip3 install --upgrade pip \
&& pip3 install jinja2 podop tenacity && pip3 install jinja2 podop tenacity

@ -0,0 +1,4 @@
#!/bin/bash
tee >(rspamc -h antispam:11334 -P mailu learn_ham /dev/stdin) \
| rspamc -h antispam:11334 -P mailu -f 13 fuzzy_add /dev/stdin

@ -1,3 +0,0 @@
#!/bin/sh
rspamc -h antispam:11334 -P mailu "learn_$1" /dev/stdin <&0

@ -0,0 +1,4 @@
#!/bin/bash
tee >(rspamc -h antispam:11334 -P mailu learn_spam /dev/stdin) \
>(rspamc -h antispam:11334 -P mailu -f 11 fuzzy_add /dev/stdin)

@ -136,7 +136,8 @@ service managesieve {
} }
plugin { plugin {
sieve = dict:proxy:/tmp/podop.socket:sieve sieve = file:~/sieve;active=~/.dovecot.sieve
sieve_before = dict:proxy:/tmp/podop.socket:sieve
sieve_plugins = sieve_imapsieve sieve_extprograms sieve_plugins = sieve_imapsieve sieve_extprograms
sieve_extensions = +spamtest +spamtestplus +editheader sieve_extensions = +spamtest +spamtestplus +editheader
sieve_global_extensions = +vnd.dovecot.execute sieve_global_extensions = +vnd.dovecot.execute

@ -8,4 +8,4 @@ if string "${mailbox}" "Trash" {
stop; stop;
} }
execute :pipe "mailtrain" "ham"; execute :pipe "ham";

@ -1,3 +1,3 @@
require "vnd.dovecot.execute"; require "vnd.dovecot.execute";
execute :pipe "mailtrain" "spam"; execute :pipe "spam";

@ -36,5 +36,5 @@ for dovecot_file in glob.glob("/conf/*.conf"):
# Run Podop, then postfix # Run Podop, then postfix
multiprocessing.Process(target=start_podop).start() multiprocessing.Process(target=start_podop).start()
os.system("chown -R mail:mail /mail /var/lib/dovecot") os.system("chown -R mail:mail /mail /var/lib/dovecot /conf")
os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"]) os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])

@ -91,8 +91,10 @@ http {
{% endif %} {% endif %}
location {{ WEB_WEBMAIL }} { location {{ WEB_WEBMAIL }} {
{% if WEB_WEBMAIL != '/' %}
rewrite ^({{ WEB_WEBMAIL }})$ $1/ permanent; rewrite ^({{ WEB_WEBMAIL }})$ $1/ permanent;
rewrite ^{{ WEB_WEBMAIL }}/(.*) /$1 break; rewrite ^{{ WEB_WEBMAIL }}/(.*) /$1 break;
{% endif %}
include /etc/nginx/proxy.conf; include /etc/nginx/proxy.conf;
client_max_body_size {{ MESSAGE_SIZE_LIMIT|int + 8388608 }}; client_max_body_size {{ MESSAGE_SIZE_LIMIT|int + 8388608 }};
proxy_pass http://$webmail; proxy_pass http://$webmail;

@ -32,7 +32,7 @@ relayhost = {{ RELAYHOST }}
recipient_delimiter = {{ RECIPIENT_DELIMITER }} recipient_delimiter = {{ RECIPIENT_DELIMITER }}
# Only the front server is allowed to perform xclient # Only the front server is allowed to perform xclient
smtpd_authorized_xclient_hosts={{ FRONT_ADDRESS }} smtpd_authorized_xclient_hosts={{ FRONT_ADDRESS }} {{ POD_ADDRESS_RANGE }}
############### ###############
# TLS # TLS

@ -130,8 +130,8 @@ LOG_DRIVER=json-file
COMPOSE_PROJECT_NAME=mailu COMPOSE_PROJECT_NAME=mailu
# Default password scheme used for newly created accounts and changed passwords # Default password scheme used for newly created accounts and changed passwords
# (value: BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT, MD5-CRYPT, CRYPT) # (value: PBKDF2, BLF-CRYPT, SHA512-CRYPT, SHA256-CRYPT)
PASSWORD_SCHEME=BLF-CRYPT PASSWORD_SCHEME=PBKDF2
# Header to take the real ip from # Header to take the real ip from
REAL_IP_HEADER= REAL_IP_HEADER=

@ -0,0 +1,252 @@
# Install Mailu on a docker swarm
## Prequisites
### Swarm
In order to deploy Mailu on a swarm, you will first need to initialize the swarm:
The main command will be:
```bash
docker swarm init --advertise-addr <IP_ADDR>
```
See https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/
If you want to add other managers or workers, please use:
```bash
docker swarm join --token xxxxx
```
See https://docs.docker.com/engine/swarm/join-nodes/
You have now a working swarm, and you can check its status with:
```bash
core@coreos-01 ~/git/Mailu/docs/swarm/1.5 $ docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
xhgeekkrlttpmtgmapt5hyxrb black-pearl Ready Active 18.06.0-ce
sczlqjgfhehsfdjhfhhph1nvb * coreos-01 Ready Active Leader 18.03.1-ce
mzrm9nbdggsfz4sgq6dhs5i6n flying-dutchman Ready Active 18.06.0-ce
```
### Volume definition
For data persistance (the Mailu services might be launched/relaunched on any of the swarm nodes), we need to have Mailu data stored in a manner accessible by every manager or worker in the swarm.
Hereafter we will assume that "Mailu Data" is available on every node at "$ROOT/certs:/certs" (GlusterFS and nfs shares have been successfully used).
On this example, we are using:
- the mesh routing mode (default mode). With this mode, each service is given a virtual IP adress and docker manages the routing between this virtual IP and the container(s) providing this service.
- the default ingress mode.
### Allow authentification with the mesh routing
In order to allow every (front & webmail) container to access the other services, we will use the variable POD_ADDRESS_RANGE.
Let's create the mailu_default network:
```bash
core@coreos-01 ~ $ docker network create -d overlay --attachable mailu_default
core@coreos-01 ~ $ docker network inspect mailu_default | grep Subnet
"Subnet": "10.0.1.0/24",
```
In the docker-compose.yml file, we will then use POD_ADDRESS_RANGE = 10.0.1.0/24
In fact, imap & smtp logs doesn't show the IPs from the front(s) container(s), but the IP of "mailu_default-endpoint". So it is sufficient to set POD_ADDRESS_RANGE to this specific ip (which can be found by inspecting mailu_default network). The issue is that this endpoint is created while the stack is created, I did'nt figure a way to determine this IP before the stack creation...
### Limitation with the ingress mode
With the default ingress mode, the front(s) container(s) will see origin IP(s) all being 10.255.0.x (which is the ingress-endpoint, can be found by inspecting the ingress network)
This issue is known and discussed here:
https://github.com/moby/moby/issues/25526
A workaround (using network host mode and global deployment) is discussed here:
https://github.com/moby/moby/issues/25526#issuecomment-336363408
### Don't create an open relay !
As a side effect of this ingress mode "feature", make sure that the ingress subnet is not in your RELAYHOST, otherwise you would create an smtp open relay :-(
## Scalability
- smtp and imap are scalable
- front and webmail are scalable (pending POD_ADDRESS_RANGE is used), although the let's encrypt magic might not like it (race condidtion ? or risk to be banned by let's encrypt server if too many front containers attemps to renew the certs at the same time)
- redis, antispam, antivirus, fetchmail, admin, webdav have not been tested (hence replicas=1 in the following docker-compose.yml file)
## Variable substitution and docker-compose.yml
The docker stack deploy command doesn't support variable substitution in the .yml file itself.
As a consequence, we cannot simply use ``` docker stack deploy -c docker.compose.yml mailu ```
Instead, we will use the following work-around:
``` echo "$(docker-compose -f /mnt/docker/apps/mailu/docker-compose.yml config 2>/dev/null)" | docker stack deploy -c- mailu ```
We need also to:
- add a deploy section for every service
- modify the way the ports are defined for the front service
- add the POD_ADDRESS_RANGE definition for imap, smtp and antispam services
## Docker compose
An example of docker-compose-stack.yml file is available here:
```yaml
version: '3.2'
services:
front:
image: mailu/nginx:$VERSION
restart: always
env_file: .env
ports:
- target: 80
published: 80
- target: 443
published: 443
- target: 110
published: 110
- target: 143
published: 143
- target: 993
published: 993
- target: 995
published: 995
- target: 25
published: 25
- target: 465
published: 465
- target: 587
published: 587
volumes:
- "$ROOT/certs:/certs"
deploy:
replicas: 2
redis:
image: redis:alpine
restart: always
volumes:
- "$ROOT/redis:/data"
deploy:
replicas: 1
imap:
image: mailu/dovecot:$VERSION
restart: always
env_file: .env
environment:
- POD_ADDRESS_RANGE=10.0.1.0/24
volumes:
- "$ROOT/mail:/mail"
- "$ROOT/overrides:/overrides"
depends_on:
- front
deploy:
replicas: 2
smtp:
image: mailu/postfix:$VERSION
restart: always
env_file: .env
environment:
- POD_ADDRESS_RANGE=10.0.1.0/24
volumes:
- "$ROOT/overrides:/overrides"
depends_on:
- front
deploy:
replicas: 2
antispam:
image: mailu/rspamd:$VERSION
restart: always
env_file: .env
environment:
- POD_ADDRESS_RANGE=10.0.1.0/24
volumes:
- "$ROOT/filter:/var/lib/rspamd"
- "$ROOT/dkim:/dkim"
- "$ROOT/overrides/rspamd:/etc/rspamd/override.d"
depends_on:
- front
deploy:
replicas: 1
antivirus:
image: mailu/none:$VERSION
restart: always
env_file: .env
volumes:
- "$ROOT/filter:/data"
deploy:
replicas: 1
webdav:
image: mailu/none:$VERSION
restart: always
env_file: .env
volumes:
- "$ROOT/dav:/data"
deploy:
replicas: 1
admin:
image: mailu/admin:$VERSION
restart: always
env_file: .env
volumes:
- "$ROOT/data:/data"
- "$ROOT/dkim:/dkim"
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- redis
deploy:
replicas: 1
webmail:
image: mailu/roundcube:$VERSION
restart: always
env_file: .env
volumes:
- "$ROOT/webmail:/data"
depends_on:
- imap
deploy:
replicas: 2
fetchmail:
image: mailu/fetchmail:$VERSION
restart: always
env_file: .env
volumes:
deploy:
replicas: 1
networks:
default:
external:
name: mailu_default
```
## Deploy Mailu on the docker swarm
Run the following command:
```bash
echo "$(docker-compose -f /mnt/docker/apps/mailu/docker-compose.yml config 2>/dev/null)" | docker stack deploy -c- mailu
```
See how the services are being deployed:
```bash
core@coreos-01 ~ $ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
ywnsetmtkb1l mailu_antivirus replicated 1/1 mailu/none:master
pqokiaz0q128 mailu_fetchmail replicated 1/1 mailu/fetchmail:master
```
check a specific service:
```bash
core@coreos-01 ~ $ docker service ps mailu_fetchmail
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
tbu8ppgsdffj mailu_fetchmail.1 mailu/fetchmail:master coreos-01 Running Running 11 days ago
```
You might also have a look on the logs:
```bash
core@coreos-01 ~ $ docker service logs -f mailu_fetchmail
```
## Remove the stack
Run the follwoing command:
```bash
core@coreos-01 ~ $ docker stack rm mailu
```

@ -0,0 +1,357 @@
# Install Mailu on a docker swarm
## Prequisites
### Swarm
In order to deploy Mailu on a swarm, you will first need to initialize the swarm:
The main command will be:
```bash
docker swarm init --advertise-addr <IP_ADDR>
```
See https://docs.docker.com/engine/swarm/swarm-tutorial/create-swarm/
If you want to add other managers or workers, please use:
```bash
docker swarm join --token xxxxx
```
See https://docs.docker.com/engine/swarm/join-nodes/
You have now a working swarm, and you can check its status with:
```bash
core@coreos-01 ~/git/Mailu/docs/swarm/1.5 $ docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
xhgeekkrlttpmtgmapt5hyxrb black-pearl Ready Active 18.06.0-ce
sczlqjgfhehsfdjhfhhph1nvb * coreos-01 Ready Active Leader 18.03.1-ce
mzrm9nbdggsfz4sgq6dhs5i6n flying-dutchman Ready Active 18.06.0-ce
```
### Volume definition
For data persistance (the Mailu services might be launched/relaunched on any of the swarm nodes), we need to have Mailu data stored in a manner accessible by every manager or worker in the swarm.
Hereafter we will use a NFS share:
```bash
core@coreos-01 ~ $ showmount -e 192.168.0.30
Export list for 192.168.0.30:
/mnt/Pool1/pv 192.168.0.0
```
on the nfs server, I am using the following /etc/exports
```bash
$more /etc/exports
/mnt/Pool1/pv -alldirs -mapall=root -network 192.168.0.0 -mask 255.255.255.0
```
on the nfs server, I created the Mailu directory (in fact I copied a working Mailu set-up)
```bash
$mkdir /mnt/Pool1/pv/mailu
```
On your manager node, mount the nfs share to check that the share is available:
```bash
core@coreos-01 ~ $ sudo mount -t nfs 192.168.0.30:/mnt/Pool1/pv/mailu /mnt/local/
```
If this is ok, you can umount it:
```bash
core@coreos-01 ~ $ sudo umount /mnt/local/
```
## Networking mode
On this example, we are using:
- the mesh routing mode (default mode). With this mode, each service is given a virtual IP adress and docker manages the routing between this virtual IP and the container(s) providing this service.
- the default ingress mode.
### Allow authentification with the mesh routing
In order to allow every (front & webmail) container to access the other services, we will use the variable POD_ADDRESS_RANGE.
Let's create the mailu_default network:
```bash
core@coreos-01 ~ $ docker network create -d overlay --attachable mailu_default
core@coreos-01 ~ $ docker network inspect mailu_default | grep Subnet
"Subnet": "10.0.1.0/24",
```
In the docker-compose.yml file, we will then use POD_ADDRESS_RANGE = 10.0.1.0/24
In fact, imap & smtp logs doesn't show the IPs from the front(s) container(s), but the IP of "mailu_default-endpoint". So it is sufficient to set POD_ADDRESS_RANGE to this specific ip (which can be found by inspecting mailu_default network). The issue is that this endpoint is created while the stack is created, I did'nt figure a way to determine this IP before the stack creation...
### Limitation with the ingress mode
With the default ingress mode, the front(s) container(s) will see origin IP(s) all being 10.255.0.x (which is the ingress-endpoint, can be found by inspecting the ingress network)
This issue is known and discussed here:
https://github.com/moby/moby/issues/25526
A workaround (using network host mode and global deployment) is discussed here:
https://github.com/moby/moby/issues/25526#issuecomment-336363408
### Don't create an open relay !
As a side effect of this ingress mode "feature", make sure that the ingress subnet is not in your RELAYHOST, otherwise you would create an smtp open relay :-(
## Scalability
- smtp and imap are scalable
- front and webmail are scalable (pending POD_ADDRESS_RANGE is used), although the let's encrypt magic might not like it (race condidtion ? or risk to be banned by let's encrypt server if too many front containers attemps to renew the certs at the same time)
- redis, antispam, antivirus, fetchmail, admin, webdav have not been tested (hence replicas=1 in the following docker-compose.yml file)
## Variable substitution and docker-compose.yml
The docker stack deploy command doesn't support variable substitution in the .yml file itself. As a consequence, we need to use the following work-around:
``` echo "$(docker-compose -f /mnt/docker/apps/mailu/docker-compose.yml config 2>/dev/null)" | docker stack deploy -c- mailu ```
We need also to:
- change the way we define the volumes (nfs share in our case)
- add a deploy section for every service
- the way the ports are defined for the front service
## Docker compose
An example of docker-compose-stack.yml file is available here:
```yaml
version: '3.2'
services:
front:
image: mailu/nginx:$VERSION
restart: always
env_file: .env
ports:
- target: 80
published: 80
- target: 443
published: 443
- target: 110
published: 110
- target: 143
published: 143
- target: 993
published: 993
- target: 995
published: 995
- target: 25
published: 25
- target: 465
published: 465
- target: 587
published: 587
volumes:
# - "$ROOT/certs:/certs"
- type: volume
source: mailu_certs
target: /certs
deploy:
replicas: 2
redis:
image: redis:alpine
restart: always
volumes:
# - "$ROOT/redis:/data"
- type: volume
source: mailu_redis
target: /data
deploy:
replicas: 1
imap:
image: mailu/dovecot:$VERSION
restart: always
env_file: .env
environment:
- POD_ADDRESS_RANGE=10.0.1.0/24
volumes:
# - "$ROOT/mail:/mail"
- type: volume
source: mailu_mail
target: /mail
# - "$ROOT/overrides:/overrides"
- type: volume
source: mailu_overrides
target: /overrides
depends_on:
- front
deploy:
replicas: 2
smtp:
image: mailu/postfix:$VERSION
restart: always
env_file: .env
environment:
- POD_ADDRESS_RANGE=10.0.1.0/24
volumes:
# - "$ROOT/overrides:/overrides"
- type: volume
source: mailu_overrides
target: /overrides
depends_on:
- front
deploy:
replicas: 2
antispam:
image: mailu/rspamd:$VERSION
restart: always
env_file: .env
environment:
- POD_ADDRESS_RANGE=10.0.1.0/24
depends_on:
- front
volumes:
# - "$ROOT/filter:/var/lib/rspamd"
- type: volume
source: mailu_filter
target: /var/lib/rspamd
# - "$ROOT/dkim:/dkim"
- type: volume
source: mailu_dkim
target: /dkim
# - "$ROOT/overrides/rspamd:/etc/rspamd/override.d"
- type: volume
source: mailu_overrides_rspamd
target: /etc/rspamd/override.d
deploy:
replicas: 1
antivirus:
image: mailu/none:$VERSION
restart: always
env_file: .env
volumes:
# - "$ROOT/filter:/data"
- type: volume
source: mailu_filter
target: /data
deploy:
replicas: 1
webdav:
image: mailu/none:$VERSION
restart: always
env_file: .env
volumes:
# - "$ROOT/dav:/data"
- type: volume
source: mailu_dav
target: /data
deploy:
replicas: 1
admin:
image: mailu/admin:$VERSION
restart: always
env_file: .env
volumes:
# - "$ROOT/data:/data"
- type: volume
source: mailu_data
target: /data
# - "$ROOT/dkim:/dkim"
- type: volume
source: mailu_dkim
target: /dkim
- /var/run/docker.sock:/var/run/docker.sock:ro
depends_on:
- redis
deploy:
replicas: 1
webmail:
image: mailu/roundcube:$VERSION
restart: always
env_file: .env
volumes:
# - "$ROOT/webmail:/data"
- type: volume
source: mailu_data
target: /data
depends_on:
- imap
deploy:
replicas: 2
fetchmail:
image: mailu/fetchmail:$VERSION
restart: always
env_file: .env
volumes:
deploy:
replicas: 1
networks:
default:
external:
name: mailu_default
volumes:
mailu_filter:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/filter"
mailu_dkim:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/dkim"
mailu_overrides_rspamd:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/overrides/rspamd"
mailu_data:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/data"
mailu_mail:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/mail"
mailu_overrides:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/overrides"
mailu_dav:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/dav"
mailu_certs:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/certs"
mailu_redis:
driver_opts:
type: "nfs"
o: "addr=192.168.0.30,soft,rw"
device: ":/mnt/Pool1/pv/mailu/redis"
```
## Deploy Mailu on the docker swarm
Run the following command:
```bash
echo "$(docker-compose -f /mnt/docker/apps/mailu/docker-compose.yml config 2>/dev/null)" | docker stack deploy -c- mailu
```
See how the services are being deployed:
```bash
core@coreos-01 ~ $ docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
ywnsetmtkb1l mailu_antivirus replicated 1/1 mailu/none:master
pqokiaz0q128 mailu_fetchmail replicated 1/1 mailu/fetchmail:master
```
check a specific service:
```bash
core@coreos-01 ~ $ docker service ps mailu_fetchmail
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
tbu8ppgsdffj mailu_fetchmail.1 mailu/fetchmail:master coreos-01 Running Running 11 days ago
```
## Remove the stack
Run the follwoing command:
```bash
core@coreos-01 ~ $ docker stack rm mailu
```

@ -1,6 +1,6 @@
FROM alpine:3.8 FROM alpine:3.8
RUN apk add --no-cache python py-jinja2 rspamd rspamd-controller rspamd-proxy ca-certificates py-pip \ RUN apk add --no-cache python py-jinja2 rspamd rspamd-controller rspamd-proxy rspamd-fuzzy ca-certificates py-pip \
&& pip install --upgrade pip \ && pip install --upgrade pip \
&& pip install tenacity && pip install tenacity
@ -9,10 +9,7 @@ RUN mkdir /run/rspamd
COPY conf/ /conf COPY conf/ /conf
COPY start.py /start.py COPY start.py /start.py
# Temporary fix to remove references to rspamd-fuzzy for now EXPOSE 11332/tcp 11334/tcp 11335/tcp
RUN sed -i '/fuzzy/,$d' /etc/rspamd/rspamd.conf
EXPOSE 11332/tcp 11334/tcp
VOLUME ["/var/lib/rspamd"] VOLUME ["/var/lib/rspamd"]

@ -0,0 +1,34 @@
rule "local" {
# Fuzzy storage server list
servers = "localhost:11335";
# Default symbol for unknown flags
symbol = "LOCAL_FUZZY_UNKNOWN";
# Additional mime types to store/check
mime_types = ["application/*"];
# Hash weight threshold for all maps
max_score = 20.0;
# Whether we can learn this storage
read_only = no;
# Ignore unknown flags
skip_unknown = yes;
# Hash generation algorithm
algorithm = "mumhash";
# Map flags to symbols
fuzzy_map = {
LOCAL_FUZZY_DENIED {
# Local threshold
max_score = 20.0;
# Flag to match
flag = 11;
}
LOCAL_FUZZY_PROB {
max_score = 10.0;
flag = 12;
}
LOCAL_FUZZY_WHITE {
max_score = 2.0;
flag = 13;
}
}
}

@ -0,0 +1,19 @@
group "fuzzy" {
max_score = 12.0;
symbol "LOCAL_FUZZY_UNKNOWN" {
weight = 5.0;
description = "Generic fuzzy hash match";
}
symbol "LOCAL_FUZZY_DENIED" {
weight = 12.0;
description = "Denied fuzzy hash";
}
symbol "LOCAL_FUZZY_PROB" {
weight = 5.0;
description = "Probable fuzzy hash";
}
symbol "LOCAL_FUZZY_WHITE" {
weight = -2.1;
description = "Whitelisted fuzzy hash";
}
}

@ -1,3 +1,4 @@
type = "controller";
bind_socket = "*:11334"; bind_socket = "*:11334";
password = "mailu"; password = "mailu";
secure_ip = "{{ FRONT_ADDRESS }}"; secure_ip = "{% if POD_ADDRESS_RANGE %}{{ POD_ADDRESS_RANGE }}{% else %}{{ FRONT_ADDRESS }}{% endif %}";

@ -0,0 +1,6 @@
type = "fuzzy";
bind_socket = "*:11335";
count = 1;
backend = "redis";
expire = 90d;
allow_update = ["127.0.0.1"];

@ -1 +1,2 @@
type = "normal";
enabled = false; enabled = false;

Loading…
Cancel
Save