Merge remote-tracking branch 'nextgens/dynamic-resolution' into dynamic-resolution
commit
b59e4bbd91
@ -0,0 +1,22 @@
|
||||
# syntax=docker/dockerfile-upstream:1.4.3
|
||||
|
||||
FROM node:16-alpine3.16
|
||||
|
||||
WORKDIR /work
|
||||
|
||||
COPY package.json ./
|
||||
|
||||
RUN set -euxo pipefail \
|
||||
; npm config set update-notifier false \
|
||||
; npm install --no-audit --no-fund \
|
||||
; sed -i 's/#007bff/#55a5d9/' node_modules/admin-lte/build/scss/_bootstrap-variables.scss \
|
||||
; mkdir assets \
|
||||
; for l in ca da de:de-DE en:en-GB es:es-ES eu fr:fr-FR he hu is it:it-IT ja nb_NO:no-NB nl:nl-NL pl pt:pt-PT ru sv:sv-SE zh; do \
|
||||
cp node_modules/datatables.net-plugins/i18n/${l#*:}.json assets/${l%:*}.json; \
|
||||
done
|
||||
|
||||
COPY assets/ ./assets/
|
||||
COPY webpack.config.js ./
|
||||
|
||||
RUN set -euxo pipefail \
|
||||
; node_modules/.bin/webpack-cli --color
|
@ -1,79 +0,0 @@
|
||||
require('./app.css');
|
||||
|
||||
import logo from './mailu.png';
|
||||
import modules from "./*.json";
|
||||
|
||||
// TODO: conditionally (or lazy) load select2 and dataTable
|
||||
$('document').ready(function() {
|
||||
|
||||
// intercept anchors with data-clicked attribute and open alternate location instead
|
||||
$('[data-clicked]').click(function(e) {
|
||||
e.preventDefault();
|
||||
window.location.href = $(this).data('clicked');
|
||||
});
|
||||
|
||||
// use post for language selection
|
||||
$('#mailu-languages > a').click(function(e) {
|
||||
e.preventDefault();
|
||||
$.post({
|
||||
url: $(this).attr('href'),
|
||||
success: function() {
|
||||
window.location = window.location.href;
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
// allow en-/disabling of inputs in fieldset with checkbox in legend
|
||||
$('fieldset legend input[type=checkbox]').change(function() {
|
||||
var fieldset = $(this).parents('fieldset');
|
||||
if (this.checked) {
|
||||
fieldset.removeAttr('disabled');
|
||||
fieldset.find('input,textarea').not(this).removeAttr('disabled');
|
||||
} else {
|
||||
fieldset.attr('disabled', '');
|
||||
fieldset.find('input,textarea').not(this).attr('disabled', '');
|
||||
}
|
||||
});
|
||||
|
||||
// display of range input value
|
||||
$('input[type=range]').each(function() {
|
||||
var value_element = $('#'+this.id+'_value');
|
||||
if (value_element.length) {
|
||||
value_element = $(value_element[0]);
|
||||
var infinity = $(this).data('infinity');
|
||||
var step = $(this).attr('step');
|
||||
$(this).on('input', function() {
|
||||
var num = (infinity && this.value == 0) ? '∞' : (this.value/step).toFixed(2);
|
||||
if (num.endsWith('.00')) num = num.substr(0, num.length - 3);
|
||||
value_element.text(num);
|
||||
}).trigger('input');
|
||||
}
|
||||
});
|
||||
|
||||
// init select2
|
||||
$('.mailselect').select2({
|
||||
tags: true,
|
||||
tokenSeparators: [',', ' '],
|
||||
});
|
||||
|
||||
// init dataTable
|
||||
var d = $(document.documentElement);
|
||||
$('.dataTable').DataTable({
|
||||
'responsive': true,
|
||||
language: {
|
||||
url: d.data('static') + d.attr('lang') + '.json',
|
||||
},
|
||||
});
|
||||
|
||||
// init clipboard.js
|
||||
new ClipboardJS('.btn-clip');
|
||||
|
||||
// disable login if not possible
|
||||
var l = $('#login_needs_https');
|
||||
if (l.length && window.location.protocol != 'https:') {
|
||||
l.removeClass("d-none");
|
||||
$('form :input').prop('disabled', true);
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -0,0 +1,136 @@
|
||||
// Inspired from https://github.com/mehdibo/hibp-js/blob/master/hibp.js
|
||||
function sha1(string) {
|
||||
var buffer = new TextEncoder("utf-8").encode(string);
|
||||
return crypto.subtle.digest("SHA-1", buffer).then(function (buffer) {
|
||||
// Get the hex code
|
||||
var hexCodes = [];
|
||||
var view = new DataView(buffer);
|
||||
for (var i = 0; i < view.byteLength; i += 4) {
|
||||
// Using getUint32 reduces the number of iterations needed (we process 4 bytes each time)
|
||||
var value = view.getUint32(i);
|
||||
// toString(16) will give the hex representation of the number without padding
|
||||
var stringValue = value.toString(16);
|
||||
// We use concatenation and slice for padding
|
||||
var padding = '00000000';
|
||||
var paddedValue = (padding + stringValue).slice(-padding.length);
|
||||
hexCodes.push(paddedValue);
|
||||
}
|
||||
// Join all the hex strings into one
|
||||
return hexCodes.join("");
|
||||
});
|
||||
}
|
||||
|
||||
function hibpCheck(pwd) {
|
||||
// We hash the pwd first
|
||||
sha1(pwd).then(function(hash){
|
||||
// We send the first 5 chars of the hash to hibp's API
|
||||
const req = new XMLHttpRequest();
|
||||
req.open('GET', 'https://api.pwnedpasswords.com/range/'+hash.substr(0, 5));
|
||||
req.setRequestHeader('Add-Padding', 'true');
|
||||
req.addEventListener("load", function(){
|
||||
// When we get back a response from the server
|
||||
// We create an array of lines and loop through them
|
||||
const lines = this.responseText.split("\n");
|
||||
const hashSub = hash.slice(5).toUpperCase();
|
||||
for (var i in lines){
|
||||
// Check if the line matches the rest of the hash
|
||||
if (lines[i].substring(0, 35) == hashSub){
|
||||
const val = parseInt(lines[i].trimEnd("\r").split(":")[1]);
|
||||
if (val > 0) {
|
||||
$("#pwned").val(val);
|
||||
}
|
||||
return; // If found no need to continue the loop
|
||||
}
|
||||
}
|
||||
$("#pwned").val(0);
|
||||
});
|
||||
req.send();
|
||||
});
|
||||
}
|
||||
|
||||
// TODO: conditionally (or lazy) load select2 and dataTable
|
||||
$('document').ready(function() {
|
||||
|
||||
// intercept anchors with data-clicked attribute and open alternate location instead
|
||||
$('[data-clicked]').click(function(e) {
|
||||
e.preventDefault();
|
||||
window.location.href = $(this).data('clicked');
|
||||
});
|
||||
|
||||
// use post for language selection
|
||||
$('#mailu-languages > a').click(function(e) {
|
||||
e.preventDefault();
|
||||
$.post({
|
||||
url: $(this).attr('href'),
|
||||
success: function() {
|
||||
window.location = window.location.href;
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
// allow en-/disabling of inputs in fieldset with checkbox in legend
|
||||
$('fieldset legend input[type=checkbox]').change(function() {
|
||||
var fieldset = $(this).parents('fieldset');
|
||||
if (this.checked) {
|
||||
fieldset.removeAttr('disabled');
|
||||
fieldset.find('input,textarea').not(this).removeAttr('disabled');
|
||||
} else {
|
||||
fieldset.attr('disabled', '');
|
||||
fieldset.find('input,textarea').not(this).attr('disabled', '');
|
||||
}
|
||||
});
|
||||
|
||||
// display of range input value
|
||||
$('input[type=range]').each(function() {
|
||||
var value_element = $('#'+this.id+'_value');
|
||||
if (value_element.length) {
|
||||
value_element = $(value_element[0]);
|
||||
var infinity = $(this).data('infinity');
|
||||
var step = $(this).attr('step');
|
||||
$(this).on('input', function() {
|
||||
var num = (infinity && this.value == 0) ? '∞' : (this.value/step).toFixed(2);
|
||||
if (num.endsWith('.00')) num = num.substr(0, num.length - 3);
|
||||
value_element.text(num);
|
||||
}).trigger('input');
|
||||
}
|
||||
});
|
||||
|
||||
// init select2
|
||||
$('.mailselect').select2({
|
||||
tags: true,
|
||||
tokenSeparators: [',', ' '],
|
||||
});
|
||||
|
||||
// init dataTable
|
||||
var d = $(document.documentElement);
|
||||
$('.dataTable').DataTable({
|
||||
'responsive': true,
|
||||
language: {
|
||||
url: d.data('static') + d.attr('lang') + '.json',
|
||||
},
|
||||
});
|
||||
|
||||
// init clipboard.js
|
||||
new ClipboardJS('.btn-clip');
|
||||
|
||||
// disable login if not possible
|
||||
var l = $('#login_needs_https');
|
||||
if (l.length && window.location.protocol != 'https:') {
|
||||
l.removeClass("d-none");
|
||||
$('form :input').prop('disabled', true);
|
||||
}
|
||||
|
||||
if (window.isSecureContext) {
|
||||
$("#pw").on("change paste", function(){
|
||||
hibpCheck($(this).val());
|
||||
return true;
|
||||
});
|
||||
$("#pw").closest("form").submit(function(event){
|
||||
if (parseInt($("#pwned").val()) < 0) {
|
||||
hibpCheck($("#pw").val());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
});
|
||||
|
Before Width: | Height: | Size: 4.8 KiB After Width: | Height: | Size: 4.8 KiB |
@ -0,0 +1,22 @@
|
||||
""" Add user.allow_spoofing
|
||||
|
||||
Revision ID: 7ac252f2bbbf
|
||||
Revises: 8f9ea78776f4
|
||||
Create Date: 2022-11-20 08:57:16.879152
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '7ac252f2bbbf'
|
||||
down_revision = 'f4f0f89e0047'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column('user', sa.Column('allow_spoofing', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
|
||||
|
||||
|
||||
def downgrade():
|
||||
op.drop_column('user', 'allow_spoofing')
|
@ -0,0 +1,25 @@
|
||||
""" Add fetch.scan and fetch.folders
|
||||
|
||||
Revision ID: f4f0f89e0047
|
||||
Revises: 8f9ea78776f4
|
||||
Create Date: 2022-11-13 16:29:01.246509
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = 'f4f0f89e0047'
|
||||
down_revision = '8f9ea78776f4'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
import mailu
|
||||
|
||||
def upgrade():
|
||||
with op.batch_alter_table('fetch') as batch:
|
||||
batch.add_column(sa.Column('scan', sa.Boolean(), nullable=False, server_default=sa.sql.expression.false()))
|
||||
batch.add_column(sa.Column('folders', mailu.models.CommaSeparatedList(), nullable=True))
|
||||
|
||||
def downgrade():
|
||||
with op.batch_alter_table('fetch') as batch:
|
||||
batch.drop_column('fetch', 'folders')
|
||||
batch.drop_column('fetch', 'scan')
|
@ -1,78 +0,0 @@
|
||||
alembic==1.7.4
|
||||
appdirs==1.4.4
|
||||
Babel==2.9.1
|
||||
bcrypt==3.2.0
|
||||
blinker==1.4
|
||||
CacheControl==0.12.9
|
||||
certifi==2021.10.8
|
||||
# cffi==1.15.0
|
||||
chardet==4.0.0
|
||||
click==8.0.3
|
||||
colorama==0.4.4
|
||||
contextlib2==21.6.0
|
||||
cryptography==35.0.0
|
||||
decorator==5.1.0
|
||||
# distlib==0.3.1
|
||||
# distro==1.5.0
|
||||
dnspython==2.1.0
|
||||
dominate==2.6.0
|
||||
email-validator==1.1.3
|
||||
Flask==2.0.2
|
||||
Flask-Babel==2.0.0
|
||||
Flask-Bootstrap==3.3.7.1
|
||||
Flask-DebugToolbar==0.11.0
|
||||
Flask-Limiter==1.4
|
||||
Flask-Login==0.5.0
|
||||
flask-marshmallow==0.14.0
|
||||
Flask-Migrate==3.1.0
|
||||
Flask-Script==2.0.6
|
||||
Flask-SQLAlchemy==2.5.1
|
||||
Flask-WTF==0.15.1
|
||||
greenlet==1.1.2
|
||||
gunicorn==20.1.0
|
||||
html5lib==1.1
|
||||
idna==3.3
|
||||
infinity==1.5
|
||||
intervals==0.9.2
|
||||
itsdangerous==2.0.1
|
||||
Jinja2==3.0.2
|
||||
limits==1.5.1
|
||||
lockfile==0.12.2
|
||||
Mako==1.1.5
|
||||
MarkupSafe==2.0.1
|
||||
marshmallow==3.14.0
|
||||
marshmallow-sqlalchemy==0.26.1
|
||||
msgpack==1.0.2
|
||||
# mysqlclient==2.0.3
|
||||
mysql-connector-python==8.0.25
|
||||
ordered-set==4.0.2
|
||||
# packaging==20.9
|
||||
passlib==1.7.4
|
||||
# pep517==0.10.0
|
||||
progress==1.6
|
||||
#psycopg2==2.9.1
|
||||
psycopg2-binary==2.9.3
|
||||
pycparser==2.20
|
||||
Pygments==2.10.0
|
||||
pyOpenSSL==21.0.0
|
||||
pyparsing==3.0.4
|
||||
pytz==2021.3
|
||||
PyYAML==6.0
|
||||
redis==3.5.3
|
||||
requests==2.26.0
|
||||
retrying==1.3.3
|
||||
# six==1.15.0
|
||||
socrate==0.2.0
|
||||
SQLAlchemy==1.4.26
|
||||
srslib==0.1.4
|
||||
tabulate==0.8.9
|
||||
tenacity==8.0.1
|
||||
toml==0.10.2
|
||||
urllib3==1.26.7
|
||||
validators==0.18.2
|
||||
visitor==0.1.3
|
||||
webencodings==0.5.1
|
||||
Werkzeug==2.0.2
|
||||
WTForms==2.3.3
|
||||
WTForms-Components==0.10.5
|
||||
xmltodict==0.12.0
|
@ -1,28 +0,0 @@
|
||||
Flask
|
||||
Flask-Login
|
||||
Flask-SQLAlchemy
|
||||
Flask-bootstrap
|
||||
Flask-Babel
|
||||
Flask-migrate
|
||||
Flask-script
|
||||
Flask-wtf
|
||||
Flask-debugtoolbar
|
||||
limits
|
||||
redis
|
||||
WTForms-Components
|
||||
socrate
|
||||
passlib
|
||||
gunicorn
|
||||
tabulate
|
||||
PyYAML
|
||||
PyOpenSSL
|
||||
Pygments
|
||||
dnspython
|
||||
tenacity
|
||||
mysql-connector-python
|
||||
idna
|
||||
srslib
|
||||
marshmallow
|
||||
flask-marshmallow
|
||||
marshmallow-sqlalchemy
|
||||
xmltodict
|
@ -0,0 +1,141 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
### CONFIG
|
||||
|
||||
DEV_NAME="${DEV_NAME:-mailu-dev}"
|
||||
DEV_DB="${DEV_DB:-}"
|
||||
DEV_PROFILER="${DEV_PROFILER:-false}"
|
||||
DEV_LISTEN="${DEV_LISTEN:-127.0.0.1:8080}"
|
||||
[[ "${DEV_LISTEN}" == *:* ]] || DEV_LISTEN="127.0.0.1:${DEV_LISTEN}"
|
||||
DEV_ADMIN="${DEV_ADMIN:-admin@example.com}"
|
||||
DEV_PASSWORD="${DEV_PASSWORD:-letmein}"
|
||||
|
||||
### MAIN
|
||||
|
||||
[[ -n "${DEV_DB}" ]] && {
|
||||
[[ -f "${DEV_DB}" ]] || {
|
||||
echo "Sorry, can't find DEV_DB: '${DEV_DB}'"
|
||||
exit 1
|
||||
}
|
||||
DEV_DB="$(realpath "${DEV_DB}")"
|
||||
}
|
||||
|
||||
docker="$(command -v podman || command -v docker || echo false)"
|
||||
[[ "${docker}" == "false" ]] && {
|
||||
echo "Sorry, you'll need podman or docker to run this."
|
||||
exit 1
|
||||
}
|
||||
|
||||
tmp="$(mktemp -d)"
|
||||
[[ -n "${tmp}" && -d "${tmp}" ]] || {
|
||||
echo "Sorry, can't create temporary folder."
|
||||
exit 1
|
||||
}
|
||||
trap "rm -rf '${tmp}'" INT TERM EXIT
|
||||
|
||||
admin="$(realpath "$(pwd)/${0%/*}")"
|
||||
base="${admin}/../base"
|
||||
assets="${admin}/assets"
|
||||
|
||||
cd "${tmp}"
|
||||
|
||||
# base
|
||||
cp "${base}"/requirements-* .
|
||||
cp -r "${base}"/libs .
|
||||
sed -E '/^#/d;s:^FROM system$:FROM system AS base:' "${base}/Dockerfile" >Dockerfile
|
||||
|
||||
# assets
|
||||
cp "${assets}/package.json" .
|
||||
cp -r "${assets}/assets" ./assets
|
||||
awk '/new compress/{f=1}!f{print}/}),/{f=0}' <"${assets}/webpack.config.js" >webpack.config.js
|
||||
sed -E '/^#/d;s:^(FROM [^ ]+$):\1 AS assets:' "${assets}/Dockerfile" >>Dockerfile
|
||||
|
||||
# admin
|
||||
sed -E '/^#/d;/^(COPY|EXPOSE|HEALTHCHECK|VOLUME|CMD) /d; s:^(.* )[^ ]*pybabel[^\\]*(.*):\1true \2:' "${admin}/Dockerfile" >>Dockerfile
|
||||
|
||||
# development
|
||||
cat >>Dockerfile <<EOF
|
||||
COPY --from=assets /work/static/ ./static/
|
||||
|
||||
RUN set -euxo pipefail \
|
||||
; mkdir /data \
|
||||
; ln -s /app/audit.py / \
|
||||
; ln -s /app/start.py /
|
||||
|
||||
ENV \
|
||||
FLASK_DEBUG="true" \
|
||||
MEMORY_SESSIONS="true" \
|
||||
RATELIMIT_STORAGE_URL="memory://" \
|
||||
SESSION_COOKIE_SECURE="false" \
|
||||
\
|
||||
DEBUG="true" \
|
||||
DEBUG_PROFILER="${DEV_PROFILER}" \
|
||||
DEBUG_ASSETS="/app/static" \
|
||||
DEBUG_TB_INTERCEPT_REDIRECTS=False \
|
||||
\
|
||||
ADMIN_ADDRESS="127.0.0.1" \
|
||||
FRONT_ADDRESS="127.0.0.1" \
|
||||
SMTP_ADDRESS="127.0.0.1" \
|
||||
IMAP_ADDRESS="127.0.0.1" \
|
||||
REDIS_ADDRESS="127.0.0.1" \
|
||||
ANTIVIRUS_ADDRESS="127.0.0.1" \
|
||||
ANTISPAM_ADDRESS="127.0.0.1" \
|
||||
WEBMAIL_ADDRESS="127.0.0.1" \
|
||||
WEBDAV_ADDRESS="127.0.0.1"
|
||||
|
||||
CMD ["/bin/bash", "-c", "flask db upgrade &>/dev/null && flask mailu admin '${DEV_ADMIN/@*}' '${DEV_ADMIN#*@}' '${DEV_PASSWORD}' --mode ifmissing >/dev/null; flask --debug run --host=0.0.0.0 --port=8080"]
|
||||
EOF
|
||||
|
||||
# build
|
||||
chmod -R u+rwX,go+rX .
|
||||
"${docker}" build --tag "${DEV_NAME}:latest" .
|
||||
|
||||
# gather volumes to map into container
|
||||
volumes=()
|
||||
|
||||
[[ -n "${DEV_DB}" ]] && volumes+=( --volume "${DEV_DB}:/data/main.db" )
|
||||
|
||||
for vol in audit.py start.py mailu/ migrations/; do
|
||||
volumes+=( --volume "${admin}/${vol}:/app/${vol}" )
|
||||
done
|
||||
|
||||
for file in "${assets}/assets"/*; do
|
||||
[[ ! -f "${file}" || "${file}" == */vendor.js ]] && continue
|
||||
volumes+=( --volume "${file}:/app/static/${file/*\//}" )
|
||||
done
|
||||
|
||||
# show configuration
|
||||
cat <<EOF
|
||||
|
||||
=============================================================================
|
||||
The "${DEV_NAME}" container was built using this configuration:
|
||||
|
||||
DEV_NAME="${DEV_NAME}"
|
||||
DEV_DB="${DEV_DB}"
|
||||
DEV_PROFILER="${DEV_PROFILER}"
|
||||
DEV_LISTEN="${DEV_LISTEN}"
|
||||
DEV_ADMIN="${DEV_ADMIN}"
|
||||
DEV_PASSWORD="${DEV_PASSWORD}"
|
||||
=============================================================================
|
||||
|
||||
=============================================================================
|
||||
You can start the container later using this commandline:
|
||||
|
||||
${docker/*\/} run --rm -it --name "${DEV_NAME}" --publish ${DEV_LISTEN}:8080$(printf " %q" "${volumes[@]}") "${DEV_NAME}"
|
||||
=============================================================================
|
||||
|
||||
=============================================================================
|
||||
The Mailu UI can be found here: http://${DEV_LISTEN}/sso/login
|
||||
EOF
|
||||
[[ -z "${DEV_DB}" ]] && echo "You can log in with user ${DEV_ADMIN} and password ${DEV_PASSWORD}"
|
||||
cat <<EOF
|
||||
=============================================================================
|
||||
|
||||
Starting mailu dev environment...
|
||||
EOF
|
||||
|
||||
# run
|
||||
"${docker}" run --rm -it --name "${DEV_NAME}" --publish "${DEV_LISTEN}:8080" "${volumes[@]}" "${DEV_NAME}"
|
||||
|
@ -0,0 +1,86 @@
|
||||
# syntax=docker/dockerfile-upstream:1.4.3
|
||||
|
||||
# base system image (intermediate)
|
||||
ARG DISTRO=alpine:3.16.3
|
||||
FROM $DISTRO as system
|
||||
|
||||
ENV TZ=Etc/UTC LANG=C.UTF-8
|
||||
|
||||
ARG MAILU_UID=1000
|
||||
ARG MAILU_GID=1000
|
||||
|
||||
RUN set -euxo pipefail \
|
||||
; addgroup -Sg ${MAILU_GID} mailu \
|
||||
; adduser -Sg ${MAILU_UID} -G mailu -h /app -g "mailu app" -s /bin/bash mailu \
|
||||
; apk add --no-cache bash ca-certificates curl python3 tzdata libcap \
|
||||
; machine="$(uname -m)" \
|
||||
; ! [[ "${machine}" == x86_64 ]] \
|
||||
|| apk add --no-cache --repository=http://dl-cdn.alpinelinux.org/alpine/edge/testing hardened-malloc==11-r0
|
||||
|
||||
ENV LD_PRELOAD=/usr/lib/libhardened_malloc.so
|
||||
ENV CXXFLAGS="-g -O2 -fdebug-prefix-map=/app=. -fstack-protector-strong -Wformat -Werror=format-security -fstack-clash-protection -fexceptions"
|
||||
ENV CFLAGS="-g -O2 -fdebug-prefix-map=/app=. -fstack-protector-strong -Wformat -Werror=format-security -fstack-clash-protection -fexceptions"
|
||||
ENV CPPFLAGS="-Wdate-time -D_FORTIFY_SOURCE=2"
|
||||
ENV LDFLAGS="-Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now"
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
CMD /bin/bash
|
||||
|
||||
|
||||
# build virtual env (intermediate)
|
||||
FROM system as build
|
||||
|
||||
ARG MAILU_DEPS=prod
|
||||
|
||||
ENV VIRTUAL_ENV=/app/venv
|
||||
|
||||
COPY requirements-build.txt ./
|
||||
|
||||
RUN set -euxo pipefail \
|
||||
; apk add --no-cache py3-pip \
|
||||
; python3 -m venv ${VIRTUAL_ENV} \
|
||||
; ${VIRTUAL_ENV}/bin/pip install --no-cache-dir -r requirements-build.txt \
|
||||
; apk del -r py3-pip \
|
||||
; rm -f /tmp/*.pem
|
||||
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
||||
|
||||
COPY requirements-${MAILU_DEPS}.txt ./
|
||||
COPY libs/ libs/
|
||||
|
||||
ARG SNUFFLEUPAGUS_VERSION=0.8.3
|
||||
ENV SNUFFLEUPAGUS_URL https://github.com/jvoisin/snuffleupagus/archive/refs/tags/v$SNUFFLEUPAGUS_VERSION.tar.gz
|
||||
|
||||
RUN set -euxo pipefail \
|
||||
; machine="$(uname -m)" \
|
||||
; deps="build-base gcc libffi-dev python3-dev" \
|
||||
; [[ "${machine}" != x86_64 ]] && \
|
||||
deps="${deps} cargo git libressl-dev mariadb-connector-c-dev postgresql-dev" \
|
||||
; apk add --virtual .build-deps ${deps} \
|
||||
; [[ "${machine}" == armv7* ]] && \
|
||||
mkdir -p /root/.cargo/registry/index && \
|
||||
git clone --bare https://github.com/rust-lang/crates.io-index.git /root/.cargo/registry/index/github.com-1285ae84e5963aae \
|
||||
; pip install -r requirements-${MAILU_DEPS}.txt \
|
||||
; curl -sL ${SNUFFLEUPAGUS_URL} | tar xz \
|
||||
; cd snuffleupagus-$SNUFFLEUPAGUS_VERSION \
|
||||
; rm -rf src/tests/*php7*/ src/tests/*session*/ src/tests/broken_configuration/ src/tests/*cookie* src/tests/upload_validation/ \
|
||||
; apk add --virtual .build-deps php81-dev php81-cgi php81-simplexml php81-xml pcre-dev build-base php81-pear php81-openssl re2c \
|
||||
; ln -s /usr/bin/phpize81 /usr/bin/phpize \
|
||||
; ln -s /usr/bin/pecl81 /usr/bin/pecl \
|
||||
; ln -s /usr/bin/php-config81 /usr/bin/php-config \
|
||||
; ln -s /usr/bin/php81 /usr/bin/php \
|
||||
; pecl install vld-beta \
|
||||
; make -j $(grep -c processor /proc/cpuinfo) release \
|
||||
; cp src/.libs/snuffleupagus.so /app \
|
||||
; rm -rf /root/.cargo /tmp/*.pem /root/.cache
|
||||
|
||||
# base mailu image
|
||||
FROM system
|
||||
|
||||
COPY --from=build /app/venv/ /app/venv/
|
||||
COPY --chown=root:root --from=build /app/snuffleupagus.so /usr/lib/php81/modules/
|
||||
RUN setcap 'cap_net_bind_service=+ep' /app/venv/bin/gunicorn
|
||||
|
||||
ENV VIRTUAL_ENV=/app/venv
|
||||
ENV PATH="${VIRTUAL_ENV}/bin:${PATH}"
|
@ -0,0 +1,20 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
tmp
|
||||
|
||||
*.bak
|
||||
*~
|
||||
.*.swp
|
||||
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.egg-info/
|
||||
|
||||
.build
|
||||
.env*
|
||||
.venv
|
||||
|
||||
*.code-workspace
|
||||
|
||||
build/
|
@ -0,0 +1,7 @@
|
||||
This project is open source, and your contributions are all welcome. There are mostly three different ways one can contribute to the project:
|
||||
|
||||
1. use Podop, either on test or on production servers, and report meaningful bugs when you find some;
|
||||
2. write and publish, or contribute to mail distributions based on Podop, like Mailu;
|
||||
2. contribute code and/or configuration to the repository (see [the development guidelines](https://mailu.io/contributors/guide.html) for details);
|
||||
|
||||
Either way, keep in mind that the code you write must be licensed under the same conditions as the project itself. Additionally, all contributors are considered equal co-authors of the project.
|
@ -0,0 +1,25 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 All Podop contributors at the date
|
||||
|
||||
This software consists of voluntary contributions made by multiple individuals.
|
||||
For exact contribution history, see the revision history available at
|
||||
https://github.com/Mailu/podop.git
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -0,0 +1,2 @@
|
||||
include README.md
|
||||
include LICENSE.md
|
@ -0,0 +1,112 @@
|
||||
Podop is a piece of middleware designed to run between Postfix or Dovecot
|
||||
on one side, any Python implementation of a table lookup protocol on the
|
||||
other side.
|
||||
|
||||
It is thus able to forward Postfix maps and Dovecot dicts to the same
|
||||
(or multiple) backends in order to write a single, more flexible backend
|
||||
for a mail distribution.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
- Connect Postfix to a DNS lookup so that every domain that has a proper MX
|
||||
record to your Postfix is actually accepted as a local domain
|
||||
- Connect both Postfix and Dovecot to an HTTP microservice to run a high
|
||||
availability microservice-based mail service
|
||||
- Use a single database server running any Python-compatible API for both
|
||||
your Postfix and Dovecot servers
|
||||
|
||||
Configure Podop tables
|
||||
======================
|
||||
|
||||
Podop tables are configured through CLI arguments when running the server.
|
||||
You must provide a ``--name`` for the table, a ``--type`` for the table and
|
||||
a ``--param`` that parametrizes the map.
|
||||
|
||||
URL table
|
||||
---------
|
||||
|
||||
The URL table will initiate an HTTP GET request for read access and an HTTP
|
||||
POST request for write access to a table. The table is parametrized with
|
||||
a template URL containing ``§`` (or ``{}``) for inserting the table key.
|
||||
|
||||
```
|
||||
--name test --type url --param http://microservice/api/v1/map/tests/§
|
||||
```
|
||||
|
||||
GET requests should return ``200`` and a JSON-encoded object
|
||||
that will be passed either to Postfix or Dovecot. They should return ``4XX``
|
||||
for access issues that will result in lookup miss, and ``5XX`` for backend
|
||||
issues that will result in a temporary failure.
|
||||
|
||||
POST requests will contain a JSON-encoded object in the request body, that
|
||||
will be saved in the table.
|
||||
|
||||
Postfix usage
|
||||
=============
|
||||
|
||||
In order to access Podop tables from Postfix, you should setup ``socketmap``
|
||||
Postfix maps. For instance, in order to access the ``test`` table on a Podop
|
||||
socket at ``/tmp/podop.socket``, use the following setup:
|
||||
|
||||
```
|
||||
virtual_alias_maps = socketmap:unix:/tmp/podop.socket:test
|
||||
```
|
||||
|
||||
Multiple maps or identical maps can be configured for various usages.
|
||||
|
||||
```
|
||||
virtual_alias_maps = socketmap:unix:/tmp/podop.socket:alias
|
||||
virtual_mailbox_domains = socketmap:unix:/tmp/podop.socket:domain
|
||||
virtual_mailbox_maps = socketmap:unix:/tmp/podop.socket:alias
|
||||
```
|
||||
|
||||
In order to simplify the configuration, you can setup a shortcut.
|
||||
|
||||
```
|
||||
podop = socketmap:unix:/tmp/podop.socket
|
||||
virtual_alias_maps = ${podop}:alias
|
||||
virtual_mailbox_domains = ${podop}:domain
|
||||
virtual_mailbox_maps = ${podop}:alias
|
||||
```
|
||||
|
||||
Dovecot usage
|
||||
=============
|
||||
|
||||
In order to access Podop tables from Dovecot, you should setup a ``proxy``
|
||||
Dovecot dictionary. For instance, in order to access the ``test`` table on
|
||||
a Podop socket at ``/tmp/podop.socket``, use the following setup:
|
||||
|
||||
```
|
||||
mail_attribute_dict = proxy:/tmp/podop.socket:test
|
||||
```
|
||||
|
||||
Multiple maps or identical maps can be configured for various usages.
|
||||
|
||||
```
|
||||
mail_attribute_dict = proxy:/tmp/podop.socket:meta
|
||||
|
||||
passdb {
|
||||
driver = dict
|
||||
args = /etc/dovecot/auth.conf
|
||||
}
|
||||
|
||||
userdb {
|
||||
driver = dict
|
||||
args = /etc/dovecot/auth.conf
|
||||
}
|
||||
|
||||
# then in auth.conf
|
||||
uri = proxy:/tmp/podop.socket:auth
|
||||
iterate_disable = yes
|
||||
default_pass_scheme = plain
|
||||
password_key = passdb/%u
|
||||
user_key = userdb/%u
|
||||
```
|
||||
|
||||
Contributing
|
||||
============
|
||||
|
||||
Podop is free software, open to suggestions and contributions. All
|
||||
components are free software and compatible with the MIT license. All
|
||||
the code is placed under the MIT license.
|
@ -0,0 +1,46 @@
|
||||
""" Podop is a *Po*stfix and *Do*vecot proxy
|
||||
|
||||
It is able to proxify postfix maps and dovecot dicts to any table
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from podop import postfix, dovecot, table
|
||||
|
||||
|
||||
SERVER_TYPES = dict(
|
||||
postfix=postfix.SocketmapProtocol,
|
||||
dovecot=dovecot.DictProtocol
|
||||
)
|
||||
|
||||
TABLE_TYPES = dict(
|
||||
url=table.UrlTable
|
||||
)
|
||||
|
||||
|
||||
def run_server(verbosity, server_type, socket, tables):
|
||||
""" Run the server, given its type, socket path and table list
|
||||
|
||||
The table list must be a list of tuples (name, type, param)
|
||||
"""
|
||||
# Prepare the maps
|
||||
table_map = {
|
||||
name: TABLE_TYPES[table_type](param)
|
||||
for name, table_type, param in tables
|
||||
}
|
||||
# Run the main loop
|
||||
logging.basicConfig(stream=sys.stderr, level=max(3 - verbosity, 0) * 10,
|
||||
format='%(name)s (%(levelname)s): %(message)s')
|
||||
loop = asyncio.get_event_loop()
|
||||
server = loop.run_until_complete(loop.create_unix_server(
|
||||
SERVER_TYPES[server_type].factory(table_map), socket
|
||||
))
|
||||
try:
|
||||
loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
server.close()
|
||||
loop.run_until_complete(server.wait_closed())
|
||||
loop.close()
|
@ -0,0 +1,202 @@
|
||||
""" Dovecot dict proxy implementation
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import json
|
||||
|
||||
|
||||
class DictProtocol(asyncio.Protocol):
|
||||
""" Protocol to answer Dovecot dict requests, as implemented in Dict proxy.
|
||||
|
||||
Only a subset of operations is handled properly by this proxy: hello,
|
||||
lookup and transaction-based set.
|
||||
|
||||
There is very little documentation about the protocol, most of it was
|
||||
reverse-engineered from :
|
||||
|
||||
https://github.com/dovecot/core/blob/master/src/dict/dict-connection.c
|
||||
https://github.com/dovecot/core/blob/master/src/dict/dict-commands.c
|
||||
https://github.com/dovecot/core/blob/master/src/lib-dict/dict-client.h
|
||||
"""
|
||||
|
||||
DATA_TYPES = {0: str, 1: int}
|
||||
|
||||
def __init__(self, table_map):
|
||||
self.table_map = table_map
|
||||
# Minor and major versions are not properly checked yet, but stored
|
||||
# anyway
|
||||
self.major_version = None
|
||||
self.minor_version = None
|
||||
# Every connection starts with specifying which table is used, dovecot
|
||||
# tables are called dicts
|
||||
self.dict = None
|
||||
# Dictionary of active transaction lists per transaction id
|
||||
self.transactions = {}
|
||||
# Dictionary of user per transaction id
|
||||
self.transactions_user = {}
|
||||
super(DictProtocol, self).__init__()
|
||||
|
||||
def connection_made(self, transport):
|
||||
logging.info('Connect {}'.format(transport.get_extra_info('peername')))
|
||||
self.transport = transport
|
||||
self.transport_lock = asyncio.Lock()
|
||||
|
||||
def data_received(self, data):
|
||||
logging.debug("Received {}".format(data))
|
||||
results = []
|
||||
# Every command is separated by "\n"
|
||||
for line in data.split(b"\n"):
|
||||
# A command must at list have a type and one argument
|
||||
if len(line) < 2:
|
||||
continue
|
||||
# The command function will handle the command itself
|
||||
command = DictProtocol.COMMANDS.get(line[0])
|
||||
if command is None:
|
||||
logging.warning('Unknown command {}'.format(line[0]))
|
||||
return self.transport.abort()
|
||||
# Args are separated by "\t"
|
||||
args = line[1:].strip().split(b"\t")
|
||||
try:
|
||||
future = command(self, *args)
|
||||
if future:
|
||||
results.append(future)
|
||||
except Exception:
|
||||
logging.exception("Error when processing request")
|
||||
return self.transport.abort()
|
||||
# For asyncio consistency, wait for all results to fire before
|
||||
# actually returning control
|
||||
return asyncio.gather(*results)
|
||||
|
||||
def process_hello(self, major, minor, value_type, user, dict_name):
|
||||
""" Process a dict protocol hello message
|
||||
"""
|
||||
self.major, self.minor = int(major), int(minor)
|
||||
self.value_type = DictProtocol.DATA_TYPES[int(value_type)]
|
||||
self.user = user.decode("utf8")
|
||||
self.dict = self.table_map[dict_name.decode("ascii")]
|
||||
logging.debug("Client {}.{} type {}, user {}, dict {}".format(
|
||||
self.major, self.minor, self.value_type, self.user, dict_name))
|
||||
|
||||
async def process_lookup(self, key, user=None, is_iter=False):
|
||||
""" Process a dict lookup message
|
||||
"""
|
||||
logging.debug("Looking up {} for {}".format(key, user))
|
||||
orig_key = key
|
||||
# Priv and shared keys are handled slighlty differently
|
||||
key_type, key = key.decode("utf8").split("/", 1)
|
||||
try:
|
||||
result = await self.dict.get(
|
||||
key, ns=((user.decode("utf8") if user else self.user) if key_type == "priv" else None)
|
||||
)
|
||||
if type(result) is str:
|
||||
response = result.encode("utf8")
|
||||
elif type(result) is bytes:
|
||||
response = result
|
||||
else:
|
||||
response = json.dumps(result).encode("ascii")
|
||||
return await (self.reply(b"O", orig_key, response) if is_iter else self.reply(b"O", response))
|
||||
except KeyError:
|
||||
return await self.reply(b"N")
|
||||
|
||||
async def process_iterate(self, flags, max_rows, path, user=None):
|
||||
""" Process an iterate command
|
||||
"""
|
||||
logging.debug("Iterate flags {} max_rows {} on {} for {}".format(flags, max_rows, path, user))
|
||||
# Priv and shared keys are handled slighlty differently
|
||||
key_type, key = path.decode("utf8").split("/", 1)
|
||||
max_rows = int(max_rows.decode("utf-8"))
|
||||
flags = int(flags.decode("utf-8"))
|
||||
if flags != 0: # not implemented
|
||||
return await self.reply(b"F")
|
||||
rows = []
|
||||
try:
|
||||
result = await self.dict.iter(key)
|
||||
logging.debug("Found {} entries: {}".format(len(result), result))
|
||||
for i,k in enumerate(result):
|
||||
if max_rows > 0 and i >= max_rows:
|
||||
break
|
||||
rows.append(self.process_lookup((path.decode("utf8")+k).encode("utf8"), user, is_iter=True))
|
||||
await asyncio.gather(*rows)
|
||||
async with self.transport_lock:
|
||||
self.transport.write(b"\n") # ITER_FINISHED
|
||||
return
|
||||
except KeyError:
|
||||
return await self.reply(b"F")
|
||||
except Exception as e:
|
||||
for task in rows:
|
||||
task.cancel()
|
||||
raise e
|
||||
|
||||
def process_begin(self, transaction_id, user=None):
|
||||
""" Process a dict begin message
|
||||
"""
|
||||
self.transactions[transaction_id] = {}
|
||||
self.transactions_user[transaction_id] = user.decode("utf8") if user else self.user
|
||||
|
||||
def process_set(self, transaction_id, key, value):
|
||||
""" Process a dict set message
|
||||
"""
|
||||
# Nothing is actually set until everything is commited
|
||||
self.transactions[transaction_id][key] = value
|
||||
|
||||
async def process_commit(self, transaction_id):
|
||||
""" Process a dict commit message
|
||||
"""
|
||||
# Actually handle all set operations from the transaction store
|
||||
results = []
|
||||
for key, value in self.transactions[transaction_id].items():
|
||||
logging.debug("Storing {}={}".format(key, value))
|
||||
key_type, key = key.decode("utf8").split("/", 1)
|
||||
result = await self.dict.set(
|
||||
key, json.loads(value),
|
||||
ns=(self.transactions_user[transaction_id] if key_type == "priv" else None)
|
||||
)
|
||||
# Remove stored transaction
|
||||
del self.transactions[transaction_id]
|
||||
del self.transactions_user[transaction_id]
|
||||
return await self.reply(b"O", transaction_id)
|
||||
|
||||
async def reply(self, command, *args):
|
||||
async with self.transport_lock:
|
||||
logging.debug("Replying {} with {}".format(command, args))
|
||||
self.transport.write(command)
|
||||
self.transport.write(b"\t".join(map(tabescape, args)))
|
||||
self.transport.write(b"\n")
|
||||
|
||||
@classmethod
|
||||
def factory(cls, table_map):
|
||||
""" Provide a protocol factory for a given map instance.
|
||||
"""
|
||||
return lambda: cls(table_map)
|
||||
|
||||
COMMANDS = {
|
||||
ord("H"): process_hello,
|
||||
ord("L"): process_lookup,
|
||||
ord("I"): process_iterate,
|
||||
ord("B"): process_begin,
|
||||
ord("C"): process_commit,
|
||||
ord("S"): process_set
|
||||
}
|
||||
|
||||
|
||||
def tabescape(unescaped):
|
||||
""" Escape a string using the specific Dovecot tabescape
|
||||
See: https://github.com/dovecot/core/blob/master/src/lib/strescape.c
|
||||
"""
|
||||
return unescaped.replace(b"\x01", b"\x011")\
|
||||
.replace(b"\x00", b"\x010")\
|
||||
.replace(b"\t", b"\x01t")\
|
||||
.replace(b"\n", b"\x01n")\
|
||||
.replace(b"\r", b"\x01r")
|
||||
|
||||
|
||||
def tabunescape(escaped):
|
||||
""" Unescape a string using the specific Dovecot tabescape
|
||||
See: https://github.com/dovecot/core/blob/master/src/lib/strescape.c
|
||||
"""
|
||||
return escaped.replace(b"\x01r", b"\r")\
|
||||
.replace(b"\x01n", b"\n")\
|
||||
.replace(b"\x01t", b"\t")\
|
||||
.replace(b"\x010", b"\x00")\
|
||||
.replace(b"\x011", b"\x01")
|
@ -0,0 +1,116 @@
|
||||
""" Postfix map proxy implementation
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
|
||||
class NetstringProtocol(asyncio.Protocol):
|
||||
""" Netstring asyncio protocol implementation.
|
||||
|
||||
For protocol details, see https://cr.yp.to/proto/netstrings.txt
|
||||
"""
|
||||
|
||||
# Length of the smallest allocated buffer, larger buffers will be
|
||||
# allocated dynamically
|
||||
BASE_BUFFER = 1024
|
||||
|
||||
# Maximum length of a buffer, will crash when exceeded
|
||||
MAX_BUFFER = 65535
|
||||
|
||||
def __init__(self):
|
||||
super(NetstringProtocol, self).__init__()
|
||||
self.init_buffer()
|
||||
|
||||
def init_buffer(self):
|
||||
self.len = None # None when waiting for a length to be sent)
|
||||
self.separator = -1 # -1 when not yet detected (str.find)
|
||||
self.index = 0 # relative to the buffer
|
||||
self.buffer = bytearray(NetstringProtocol.BASE_BUFFER)
|
||||
|
||||
def data_received(self, data):
|
||||
# Manage the buffer
|
||||
missing = len(data) - len(self.buffer) + self.index
|
||||
if missing > 0:
|
||||
if len(self.buffer) + missing > NetstringProtocol.MAX_BUFFER:
|
||||
raise IOError("Not enough space when decoding netstring")
|
||||
self.buffer.append(bytearray(missing + 1))
|
||||
new_index = self.index + len(data)
|
||||
self.buffer[self.index:new_index] = data
|
||||
self.index = new_index
|
||||
# Try to detect a length at the beginning of the string
|
||||
if self.len is None:
|
||||
self.separator = self.buffer.find(0x3a)
|
||||
if self.separator != -1 and self.buffer[:self.separator].isdigit():
|
||||
self.len = int(self.buffer[:self.separator], 10)
|
||||
# Then get the complete string
|
||||
if self.len is not None:
|
||||
if self.index - self.separator == self.len + 2:
|
||||
string = self.buffer[self.separator + 1:self.index - 1]
|
||||
self.init_buffer()
|
||||
self.string_received(string)
|
||||
|
||||
def string_received(self, string):
|
||||
""" A new netstring was received
|
||||
"""
|
||||
pass
|
||||
|
||||
def send_string(self, string):
|
||||
""" Send a netstring
|
||||
"""
|
||||
logging.debug("Replying {}".format(string))
|
||||
self.transport.write(str(len(string)).encode('ascii'))
|
||||
self.transport.write(b':')
|
||||
self.transport.write(string)
|
||||
self.transport.write(b',')
|
||||
|
||||
|
||||
class SocketmapProtocol(NetstringProtocol):
|
||||
""" Protocol to answer Postfix socketmap and proxify lookups to
|
||||
an outside object.
|
||||
|
||||
See http://www.postfix.org/socketmap_table.5.html for details on the
|
||||
protocol.
|
||||
|
||||
A table map must be provided as a dictionary to lookup tables.
|
||||
"""
|
||||
|
||||
def __init__(self, table_map):
|
||||
self.table_map = table_map
|
||||
super(SocketmapProtocol, self).__init__()
|
||||
|
||||
def connection_made(self, transport):
|
||||
logging.info('Connect {}'.format(transport.get_extra_info('peername')))
|
||||
self.transport = transport
|
||||
|
||||
def string_received(self, string):
|
||||
# The postfix format contains a space for separating the map name and
|
||||
# the key
|
||||
logging.debug("Received {}".format(string))
|
||||
space = string.find(0x20)
|
||||
if space != -1:
|
||||
name = string[:space].decode('ascii')
|
||||
key = string[space+1:].decode('utf8')
|
||||
return asyncio.ensure_future(self.process_request(name, key))
|
||||
|
||||
async def process_request(self, name, key):
|
||||
""" Process a request by querying the provided map.
|
||||
"""
|
||||
logging.debug("Request {}/{}".format(name, key))
|
||||
try:
|
||||
table = self.table_map.get(name)
|
||||
except KeyError:
|
||||
return self.send_string(b'TEMP no such map')
|
||||
try:
|
||||
result = await table.get(key)
|
||||
return self.send_string(b'OK ' + str(result).encode('utf8'))
|
||||
except KeyError:
|
||||
return self.send_string(b'NOTFOUND ')
|
||||
except Exception:
|
||||
logging.exception("Error when processing request")
|
||||
return self.send_string(b'TEMP unknown error')
|
||||
|
||||
@classmethod
|
||||
def factory(cls, table_map):
|
||||
""" Provide a protocol factory for a given map instance.
|
||||
"""
|
||||
return lambda: cls(table_map)
|
@ -0,0 +1,55 @@
|
||||
""" Table lookup backends for podop
|
||||
"""
|
||||
|
||||
import aiohttp
|
||||
import logging
|
||||
from urllib.parse import quote
|
||||
|
||||
class UrlTable(object):
|
||||
""" Resolve an entry by querying a parametrized GET URL.
|
||||
"""
|
||||
|
||||
def __init__(self, url_pattern):
|
||||
""" url_pattern must contain a format ``{}`` so the key is injected in
|
||||
the url before the query, the ``§`` character will be replaced with
|
||||
``{}`` for easier setup.
|
||||
"""
|
||||
self.url_pattern = url_pattern.replace('§', '{}')
|
||||
|
||||
async def get(self, key, ns=None):
|
||||
""" Get the given key in the provided namespace
|
||||
"""
|
||||
logging.debug("Table get {}".format(key))
|
||||
if ns is not None:
|
||||
key += "/" + ns
|
||||
async with aiohttp.ClientSession() as session:
|
||||
quoted_key = quote(key)
|
||||
async with session.get(self.url_pattern.format(quoted_key)) as request:
|
||||
if request.status == 200:
|
||||
result = await request.json()
|
||||
logging.debug("Table get {} is {}".format(key, result))
|
||||
return result
|
||||
elif request.status == 404:
|
||||
raise KeyError()
|
||||
else:
|
||||
raise Exception(request.status)
|
||||
|
||||
async def set(self, key, value, ns=None):
|
||||
""" Set a value for the given key in the provided namespace
|
||||
"""
|
||||
logging.debug("Table set {} to {}".format(key, value))
|
||||
if ns is not None:
|
||||
key += "/" + ns
|
||||
async with aiohttp.ClientSession() as session:
|
||||
quoted_key = quote(key)
|
||||
await session.post(self.url_pattern.format(quoted_key), json=value)
|
||||
|
||||
async def iter(self, cat):
|
||||
""" Iterate the given key (experimental)
|
||||
"""
|
||||
logging.debug("Table iter {}".format(cat))
|
||||
async with aiohttp.ClientSession() as session:
|
||||
async with session.get(self.url_pattern.format(cat)) as request:
|
||||
if request.status == 200:
|
||||
result = await request.json()
|
||||
return result
|
@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
|
||||
from podop import run_server, SERVER_TYPES, TABLE_TYPES
|
||||
|
||||
|
||||
def main():
|
||||
""" Run a podop server based on CLI arguments
|
||||
"""
|
||||
parser = argparse.ArgumentParser("Postfix and Dovecot proxy")
|
||||
parser.add_argument("--socket", required=True,
|
||||
help="path to the listening unix socket")
|
||||
parser.add_argument("--mode", choices=SERVER_TYPES.keys(), required=True,
|
||||
help="select which server will connect to Podop")
|
||||
parser.add_argument("--name", action="append",
|
||||
help="name of each configured table")
|
||||
parser.add_argument("--type", choices=TABLE_TYPES.keys(), action="append",
|
||||
help="type of each configured table")
|
||||
parser.add_argument("--param", action="append",
|
||||
help="mandatory param for each table configured")
|
||||
parser.add_argument("-v", "--verbose", dest="verbosity",
|
||||
action="count", default=0,
|
||||
help="increases log verbosity for each occurence.")
|
||||
args = parser.parse_args()
|
||||
run_server(
|
||||
args.verbosity, args.mode, args.socket,
|
||||
zip(args.name, args.type, args.param) if args.name else []
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from setuptools import setup
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(
|
||||
name="podop",
|
||||
version="0.2.5",
|
||||
description="Postfix and Dovecot proxy",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
author="Pierre Jaury",
|
||||
author_email="pierre@jaury.eu",
|
||||
url="https://github.com/mailu/podop.git",
|
||||
packages=["podop"],
|
||||
include_package_data=True,
|
||||
scripts=["scripts/podop"],
|
||||
install_requires=[
|
||||
"aiohttp"
|
||||
]
|
||||
)
|
@ -0,0 +1,22 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
tmp
|
||||
|
||||
*.bak
|
||||
*~
|
||||
.*.swp
|
||||
|
||||
__pycache__/
|
||||
*.pyc
|
||||
*.pyo
|
||||
*.egg-info/
|
||||
|
||||
.build
|
||||
.env*
|
||||
.venv
|
||||
|
||||
*.code-workspace
|
||||
|
||||
venv/
|
||||
build/
|
||||
dist/
|
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Mailu
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@ -0,0 +1,2 @@
|
||||
include README.md
|
||||
include LICENSE.md
|
@ -0,0 +1,24 @@
|
||||
Socrate is a simple Python module providing a set of utility functions for
|
||||
Python daemon applications.
|
||||
|
||||
The scope includes:
|
||||
- configuration utilities (configuration parsing, etc.)
|
||||
- system utilities (access to DNS, stats, etc.)
|
||||
|
||||
Setup
|
||||
======
|
||||
|
||||
Socrate is available on Pypi, simpy run:
|
||||
|
||||
```
|
||||
pip install socrate
|
||||
```
|
||||
|
||||
|
||||
Contributing
|
||||
============
|
||||
|
||||
Podop is free software, open to suggestions and contributions. All
|
||||
components are free software and compatible with the MIT license. All
|
||||
the code is placed under the MIT license.
|
||||
|
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import setuptools
|
||||
from distutils.core import setup
|
||||
|
||||
with open("README.md", "r") as fh:
|
||||
long_description = fh.read()
|
||||
|
||||
setup(
|
||||
name="socrate",
|
||||
version="0.2.0",
|
||||
description="Socrate daemon utilities",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
author="Pierre Jaury",
|
||||
author_email="pierre@jaury.eu",
|
||||
url="https://github.com/mailu/socrate.git",
|
||||
packages=["socrate"],
|
||||
include_package_data=True,
|
||||
install_requires=[
|
||||
"jinja2",
|
||||
"tenacity"
|
||||
]
|
||||
)
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue