Compare commits

..

199 Commits

Author SHA1 Message Date
7aed24bfff Merge branch 'contabo' 2026-04-03 16:16:01 -05:00
2ebe0484a4 create topic, subscription and queues per Domain 2026-04-03 16:15:20 -05:00
61fce745af moving certs 2026-03-24 20:23:34 -05:00
b732cebd94 updated spam corrections 2026-03-20 10:33:33 -05:00
36c122bf53 new spam config 2026-03-19 18:18:42 -05:00
6e2a061cf3 add ip 2026-03-15 15:07:01 -05:00
688d49e218 remove python worker 2026-03-13 20:12:52 -05:00
6016fbe13d remove version 2026-03-13 20:11:35 -05:00
369be75066 fix 2026-03-13 17:52:54 -05:00
2192f146ea remove SRV Records 2026-03-13 17:03:17 -05:00
a11ed8c526 fix 2026-03-12 19:27:07 -05:00
4caa51991f update for mobile 2026-03-12 19:11:36 -05:00
386be31671 update autodiscover 2026-03-12 18:56:18 -05:00
bd8efc867a whitelist feature 2026-03-11 19:47:37 -05:00
d331bd13b5 no buffer 2026-03-11 19:38:02 -05:00
610b01eee7 whitelist helper 2026-03-11 19:26:32 -05:00
c2d4903bc9 ENABLE_FAIL2BAN 0 2026-03-11 09:38:00 -05:00
5e4859a5c4 changes from main branch 2026-03-08 16:32:16 -05:00
613aa30493 logs 2026-03-08 16:15:41 -05:00
29f360ece8 logger console + file 2026-03-08 16:09:30 -05:00
62221e8121 fix 2026-03-08 14:54:33 -05:00
74c4f5801e Prometheus, Grafana, blackbox_exporter 2026-03-08 14:50:43 -05:00
285ffffb3a add missing import 2026-03-07 17:08:50 -06:00
90b120957d add missing import 2026-03-07 17:07:50 -06:00
99ab2a07d8 send mail even if if parsing fails 2026-03-07 17:06:03 -06:00
cd44449067 send mail even in case of parsing error ... 2026-03-07 17:04:50 -06:00
757855866c printstats 2026-03-07 16:44:53 -06:00
d9a91c13ed printstats 2026-03-07 16:41:51 -06:00
12af8577f3 changes 2026-03-07 15:47:14 -06:00
1d53f2d357 pino 2026-03-07 15:34:15 -06:00
9586869c0c neue Ports 2026-03-07 15:26:56 -06:00
d1426afec5 new structure 2026-03-07 15:16:14 -06:00
3ab46f163a ipadresses 2026-03-07 15:05:44 -06:00
56c7b51e35 changed blocked sender list 2026-03-07 15:01:56 -06:00
c826d4c299 move and imports changed 2026-03-07 14:59:41 -06:00
908bb76c3a ip address change 2026-03-07 12:05:06 -06:00
41514a7f51 Merge branch 'contabo' of git.bizmatch.net:aknuth/email-amazon into contabo 2026-03-07 12:02:34 -06:00
4324a5785f Merge branch 'main' into contabo 2026-03-07 12:02:22 -06:00
a70ae78a93 Patch for blocklist 2026-03-07 11:56:54 -06:00
6db032bd4c fail2ban, ignoreip 2026-03-06 16:55:13 -06:00
206ef9b20c executable 2026-03-05 15:34:56 -06:00
f1b2c33996 ENABLE_FAIL2BAN=1 2026-03-05 14:28:39 -06:00
726df19a76 ignoreip for fail2ban 2026-03-05 11:03:32 -06:00
Andreas Knuth
f6601501c0 disabled fail2ban 2026-03-05 17:01:48 +00:00
22d937ddfd imapsync 2026-03-02 18:07:16 -06:00
c56cae16d6 sdfsdf 2026-03-02 16:58:26 -06:00
a090e940f1 sdfsdf 2026-03-02 16:55:47 -06:00
282298c361 change 2026-03-02 16:55:02 -06:00
d91152c035 autodiscover entfernt 2026-03-02 16:49:31 -06:00
80596ab347 autodiscover 2026-03-02 16:48:55 -06:00
7173da31d4 fix 2026-03-02 15:43:54 -06:00
8995cede7d flags SKIP_CLIENT_DNS and SKIP_DMARC 2026-03-02 15:40:55 -06:00
a077b38998 outlook adoptions 2026-02-25 16:43:12 -06:00
73dd442596 sdfsdf 2026-02-23 10:30:16 -06:00
7920ab07b8 gfhgfh 2026-02-23 10:00:36 -06:00
98c78d8dce dfgdfg 2026-02-23 09:51:46 -06:00
3381fd68c2 sdfsdf 2026-02-22 22:30:33 -06:00
3f91936098 dfgdfg 2026-02-22 22:26:49 -06:00
ee02d505c6 sdfsdf 2026-02-22 22:22:28 -06:00
eea0fcc35d sdfsdf 2026-02-22 22:16:22 -06:00
7bc8cbb9f7 sdfsdf 2026-02-22 21:58:27 -06:00
69fbb670f1 move 2026-02-22 21:51:09 -06:00
39e862cdd5 dfgdfg 2026-02-22 21:47:03 -06:00
b2d41e2baa sdfsd 2026-02-22 21:44:13 -06:00
552dd73f0a sdfsd 2026-02-22 21:28:07 -06:00
51405a3ec5 sdfsdf 2026-02-22 21:18:06 -06:00
bd3b2db235 sdfsdf 2026-02-22 20:59:52 -06:00
bbc24cbb63 sdfsd 2026-02-22 20:57:58 -06:00
06e25b33e0 asdasd 2026-02-22 18:33:37 -06:00
a5a7096cc7 sdfsdf 2026-02-22 18:30:04 -06:00
c20d471036 removed 2026-02-22 18:20:35 -06:00
0b0b7ddb82 dfgdfg 2026-02-22 18:06:23 -06:00
42d16063a1 sdfdsf 2026-02-22 18:03:29 -06:00
bf96810d09 sdfsdf 2026-02-22 18:00:42 -06:00
4452dae34c dfgdfg 2026-02-22 17:58:08 -06:00
b1a295df85 sdfsdf 2026-02-22 16:06:54 -06:00
7956d2d6f5 dgdfg 2026-02-22 16:06:30 -06:00
915b0e59be sdfsdf 2026-02-22 16:00:09 -06:00
b90c8aec9e dfgdfg 2026-02-22 15:51:33 -06:00
dd41497f0b asdasd 2026-02-22 15:45:28 -06:00
8f0a899b66 sdsdf 2026-02-22 15:41:21 -06:00
4ac32f43d0 xvcxv 2026-02-22 15:39:02 -06:00
a1c7fecc27 sdf 2026-02-22 15:31:33 -06:00
173b3f382f dfgdf 2026-02-22 15:24:42 -06:00
a84bb23af0 update 2026-02-22 15:10:20 -06:00
3e656dacfa update 2026-02-22 14:55:00 -06:00
ce26d864b5 wildcard instead of * 2026-02-22 14:28:52 -06:00
f9723b2b68 mail-certs 2026-02-22 13:36:01 -06:00
956214f8c9 mail_network 2026-02-22 13:30:38 -06:00
aee2335c48 import mail_certs 2026-02-22 13:19:58 -06:00
8808d81113 update 2026-02-22 13:00:24 -06:00
ee19b5b659 changes 2026-02-22 12:58:24 -06:00
b072083318 caddy 2026-02-22 12:19:34 -06:00
b321e6d2ec BugFixes 2026-02-12 17:48:06 -06:00
16469de068 new node.js impl., removed old stuff 2026-02-12 17:03:00 -06:00
4343aefb76 lifecycle-configuration 14 days 2026-02-12 15:42:30 -06:00
68f00e3873 log output 2026-02-12 10:11:14 -06:00
e0555181a1 backup mails 2026-02-12 10:03:32 -06:00
b3fd560ee1 backup emails 2026-02-12 10:00:36 -06:00
9bb327eada cleanup 2026-02-11 18:33:44 -06:00
67c2440f4a change home folder for dovecot 2026-02-11 17:47:44 -06:00
94ec589a32 fix2 2026-02-10 18:50:19 -06:00
ec6bb989f2 Fix 2026-02-10 18:41:55 -06:00
63d12f8c7c requeue 2026-02-10 18:35:45 -06:00
663faa6a08 better logging 2026-02-10 18:22:31 -06:00
494bfd6a10 lowercase 2026-02-10 17:51:41 -06:00
665c1e611a runnable 2026-02-10 17:23:16 -06:00
9b8217cbd8 changes 2026-02-10 17:17:43 -06:00
07e2449d04 cloudflare next 2026-02-10 14:26:02 -06:00
3bd1ed14cf Forward-Rule mit smtp_override → Mail geht nur zum alten Provider, keine DMS-Delivery
Forward-Rule ohne smtp_override → normaler Forward + DMS-Delivery (bestehendes Verhalten)
Keine Rule → nur DMS-Delivery (bestehendes Verhalten)
2026-02-10 11:57:10 -06:00
994cf9055c FROM_ADDR changed 2026-02-09 18:10:33 -06:00
3849e3fc2d fix for logger 2026-02-09 15:59:29 -06:00
88d526aa00 log rotate 2026-02-09 15:47:24 -06:00
d9b6399dc7 forward raw email 2026-02-09 14:04:00 -06:00
a593db160b DMS_CONTAINER="mailserver" 2026-02-09 13:33:02 -06:00
c8bb77886a message in english 2026-02-09 13:25:35 -06:00
b3f84e91a8 executable 2026-02-09 13:14:51 -06:00
6bfe33d3af Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-02-09 13:13:39 -06:00
8c3db9db95 new scripts & new mail from 2026-02-09 13:13:30 -06:00
6fccc0b2f9 Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-02-09 10:58:31 -06:00
1d66485068 new method _send_via_legacy_smtp 2026-02-09 10:58:03 -06:00
38fcf8c4d8 script 2026-01-30 17:12:26 -06:00
2026e6afcd automated whitelist 2026-01-30 16:52:26 -06:00
fd3c9bedda whitelist based on Domains 2026-01-30 16:39:57 -06:00
d74eb93763 ip instead of radix 2026-01-30 16:22:09 -06:00
c3d992a479 moved 2026-01-30 16:14:29 -06:00
aaec33365e type = "radix"; 2026-01-30 15:43:29 -06:00
8d8b227f6b next fix 2026-01-30 15:33:28 -06:00
7fb7f33e72 try next fix 2026-01-30 14:18:01 -06:00
4d22969238 IP Whitelisting 2026-01-30 09:56:02 -06:00
81c62446f5 whitelisting 2026-01-29 21:23:59 -06:00
1cb469b49d move location 2026-01-29 21:10:56 -06:00
51c5cf673c rspamd pw 2026-01-29 20:59:57 -06:00
4687b8e582 RSPAMD Port only localhost 2026-01-29 17:16:07 -06:00
8dfb5d2aa6 Spam first version 2026-01-29 17:15:05 -06:00
f5c479feb4 Erweiterung fuer iPhones 2026-01-28 14:20:56 -06:00
041b58d4ae add region 2026-01-28 13:43:18 -06:00
98b9306290 autodiscover 2026-01-28 13:31:39 -06:00
96a1815b6a wait for dovecot 2026-01-27 21:21:43 -06:00
1d5e24f541 neuer cron 2026-01-27 21:05:16 -06:00
90e294de82 lang=en_US 2026-01-27 17:28:37 -06:00
e183d2ea2c Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-01-27 17:26:50 -06:00
b7b7142914 add certificate mounting from caddy volume 2026-01-27 17:26:39 -06:00
af4def0874 LANGUAGE=en_US 2026-01-25 21:18:22 -06:00
5acce1a75b wrong tab 2026-01-25 17:54:27 -06:00
406cce6270 _create_forward_message fixed for multipart messages 2026-01-25 17:23:08 -06:00
046111e267 set always header X-SES-Worker-Processed 2026-01-25 16:45:28 -06:00
5108ad5a7d catch exception 2026-01-25 15:36:56 -06:00
cf9bd4c9fb DISABLE_MANAGESIEVE 2026-01-25 14:43:08 -06:00
247d66ef8f removed version 2026-01-25 14:00:36 -06:00
d574f85efb container rename 2026-01-25 13:59:43 -06:00
db5d9e618f just python support 2026-01-25 13:58:41 -06:00
472ac97c56 add cron job 2026-01-25 13:43:26 -06:00
6e83c9e5ad INTERNAL_SMTP_PORT=25 2026-01-25 13:22:54 -06:00
2d9aba7e04 moved 2026-01-25 13:20:58 -06:00
3884abc695 sdfsdf 2026-01-24 16:56:37 -06:00
f9e866d948 dfgdfg 2026-01-24 16:54:33 -06:00
404168143a ASas 2026-01-24 16:49:54 -06:00
30e928c6e6 sdfdsf 2026-01-24 16:46:49 -06:00
adad46ce7d html content 2026-01-24 16:37:46 -06:00
424175fe72 dfgdfg 2026-01-24 16:32:12 -06:00
d42f59533e sdfsdf 2026-01-24 16:27:15 -06:00
92d3a1a858 asdasd 2026-01-24 16:24:11 -06:00
56c83ec182 sdfsdf 2026-01-24 16:20:00 -06:00
d516979b12 symlink 2026-01-24 16:15:58 -06:00
e7eb1eedaa sdfsd 2026-01-24 15:12:34 -06:00
c522c4fa73 activate sieve path 2026-01-24 15:09:07 -06:00
8e187985ea check for inbox folder 2026-01-24 14:56:00 -06:00
d1677a656c sieve generation 2026-01-24 14:51:52 -06:00
182598c402 enable managesieve tmp. 2026-01-24 14:03:26 -06:00
7e5ceae907 updates 2026-01-23 16:41:44 -06:00
8a04151bd2 changed dir 2026-01-23 13:13:31 -06:00
df37f59ff9 modular worker 2026-01-23 13:03:31 -06:00
e5188a064c moved 2026-01-18 18:12:09 -06:00
a616848787 no whitespace 2026-01-17 16:54:07 -06:00
054f894e7d too restrictive 2026-01-17 16:32:16 -06:00
4b08d0d40e roundcude Port 8888 2026-01-17 16:21:42 -06:00
44b792f8d3 port 8080 fuer roundcube 2026-01-17 16:15:26 -06:00
a8fb73c00c --break-system-packages 2026-01-17 15:48:00 -06:00
8ad8cdf1d4 Certificates 2026-01-17 15:39:02 -06:00
2c8963f638 AWS_ACCESS & custom image 2026-01-17 14:59:48 -06:00
049ed91d3d rename 2026-01-17 14:47:03 -06:00
826d4eab7b dummy content removed 2026-01-17 14:36:01 -06:00
23506403e6 new roundcube plugin 2026-01-17 14:35:26 -06:00
f7fe285200 cleanup 2026-01-16 22:16:09 -06:00
5122082914 updates 2026-01-16 21:53:34 -06:00
deed33c0cf forward/reply solution for internal mails 2026-01-16 17:55:54 -06:00
7f9ac1c9e6 avoid loops 2026-01-13 21:47:54 -06:00
397a2f7d98 Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-01-12 17:19:54 -06:00
be9c5b4ceb cleanwork 2026-01-12 17:19:44 -06:00
85d5eface6 new logging ... 2026-01-11 21:14:49 -06:00
87e00ae867 ConnectionResetError Spam, Waiting for messages all 300sec 2026-01-11 18:12:31 -06:00
7dfad647e9 sdfsdf 2026-01-10 18:42:31 -06:00
afe33ef381 logging fix 2026-01-10 18:28:10 -06:00
c27e4dff80 guard 2026-01-10 18:23:55 -06:00
f95461ad75 hot reload & better pooling 2026-01-10 18:03:08 -06:00
06195b9a60 reworked 2026-01-10 17:17:00 -06:00
f372082512 Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-01-10 16:48:36 -06:00
990218ba95 cleanup + unified worker 2026-01-10 16:48:05 -06:00
90 changed files with 8426 additions and 2304 deletions

3
.gitignore vendored
View File

@@ -1 +1,2 @@
.env .env
node_modules

3
DMS/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
mail-data
mail-logs
mail-state

31
DMS/Dockerfile Normal file
View File

@@ -0,0 +1,31 @@
FROM docker.io/mailserver/docker-mailserver:latest
LABEL maintainer="andreas.knuth@bayarea-cc.com"
LABEL description="Custom DMS with Python3 support and Sieve Sync"
# 1. Python, pip und dependencies installieren
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3 \
python3-pip \
&& pip3 install --break-system-packages --no-cache-dir boto3 croniter \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# 2. Verzeichnis erstellen
WORKDIR /scripts
# 3. Script kopieren
COPY sync_dynamodb_to_sieve.py /scripts/sync.py
RUN chmod +x /scripts/sync.py
# 4. Schedule Konfiguration kopieren
COPY sieve-schedule /etc/sieve-schedule
# 5. Supervisor Konfiguration kopieren
COPY sieve-supervisor.conf /etc/supervisor/conf.d/sieve-sync.conf
# 6. Dynamic Whitelist Script und Supervisor-Config kopieren
COPY dynamic_whitelist.py /scripts/dynamic_whitelist.py
RUN chmod +x /scripts/dynamic_whitelist.py
COPY whitelist-supervisor.conf /etc/supervisor/conf.d/dynamic-whitelist.conf

View File

@@ -1,18 +1,26 @@
services: services:
mailserver: mailserver:
image: docker.io/mailserver/docker-mailserver:latest build:
container_name: mailserver-new context: .
hostname: mail.email-srvr.com dockerfile: Dockerfile
domainname: email-srvr.com image: dms-custom:latest
container_name: mailserver
# Node-spezifischer Hostname - A-Record zeigt auf DIESEN Server.
# email-srvr.com selbst zeigt auf einen anderen Server und wird hier NICHT verwendet.
hostname: node1.email-srvr.com
ports: ports:
- "25:25" # SMTP (parallel zu MailCow auf Port 25) - "25:25"
- "587:587" # SMTP Submission - "587:587"
- "465:465" # SMTP SSL - "465:465"
- "143:143" # IMAP - "143:143"
- "993:993" # IMAP SSL - "993:993"
- "110:110" # POP3 - "110:110"
- "995:995" # POP3 SSL - "995:995"
- "127.0.0.1:11334:11334"
volumes: volumes:
- ./docker-data/dms/mail-data/:/var/mail/ - ./docker-data/dms/mail-data/:/var/mail/
- ./docker-data/dms/mail-state/:/var/mail-state/ - ./docker-data/dms/mail-state/:/var/mail-state/
@@ -20,45 +28,90 @@ services:
- ./docker-data/dms/config/:/tmp/docker-mailserver/ - ./docker-data/dms/config/:/tmp/docker-mailserver/
- ./docker-data/dms/config/dovecot/conf.d/95-sieve-redirect.conf:/etc/dovecot/conf.d/95-sieve-redirect.conf:ro - ./docker-data/dms/config/dovecot/conf.d/95-sieve-redirect.conf:/etc/dovecot/conf.d/95-sieve-redirect.conf:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- ./sync_dynamodb_to_sieve.py:/scripts/sync.py:ro
- ./sieve-cron:/etc/cron.d/sieve-sync:ro
# -------------------------------------------------------
# Caddy Zertifikate: gesamtes Cert-Verzeichnis mounten.
#
# Caddy legt Wildcard-Certs so ab:
# *.andreasknuth.de/
# *.andreasknuth.de.crt
# *.andreasknuth.de.key
# node1.email-srvr.com/
# node1.email-srvr.com.crt
# node1.email-srvr.com.key
#
# setup-dms-tls.sh referenziert per:
# /etc/mail/certs/*.domain/*.domain.crt|.key
# -------------------------------------------------------
# - /var/lib/docker/volumes/caddy_data/_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
- /home/aknuth/git/email-amazon/caddy/caddy-data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
# -------------------------------------------------------
# Dovecot SNI Konfiguration (generiert von setup-dms-tls.sh)
# DMS lädt /tmp/docker-mailserver/dovecot-sni.cf automatisch.
# -------------------------------------------------------
- ./docker-data/dms/config/dovecot-sni.cf:/etc/dovecot/conf.d/99-sni.conf:ro
environment: environment:
# Wichtig: Rspamd und andere Services deaktivieren für ersten Test # -------------------------------------------------------
- ENABLE_RSPAMD=0 # SSL Default-Cert: node1.email-srvr.com
# Das ist das Fallback-Cert wenn kein SNI-Match gefunden wird
# (z.B. bei direktem IP-Connect ohne Hostname).
# Kundendomain-SNI wird über postfix-main.cf + dovecot-sni.cf gesteuert.
# -------------------------------------------------------
- SSL_TYPE=manual
- SSL_CERT_PATH=/etc/mail/certs/node1.email-srvr.com/node1.email-srvr.com.crt
- SSL_KEY_PATH=/etc/mail/certs/node1.email-srvr.com/node1.email-srvr.com.key
# SPAM / Rspamd
- ENABLE_OPENDKIM=1 - ENABLE_OPENDKIM=1
- ENABLE_OPENDMARC=0 - ENABLE_OPENDMARC=0
- ENABLE_POLICYD_SPF=0 - ENABLE_POLICYD_SPF=0
- ENABLE_AMAVIS=0 - ENABLE_RSPAMD=1
- ENABLE_SPAMASSASSIN=0 - ENABLE_POSTGREY=0
- RSPAMD_GREYLISTING=0 - RSPAMD_GREYLISTING=0
- RSPAMD_CHECK_AUTHENTICATED=0
- RSPAMD_HFILTER=1
- MOVE_SPAM_TO_JUNK=1
- ENABLE_AMAVIS=0
- ENABLE_SPAMASSASSIN=0
- ENABLE_POSTGREY=0
- ENABLE_CLAMAV=0 - ENABLE_CLAMAV=0
#- ENABLE_FAIL2BAN=1
- ENABLE_FAIL2BAN=0 # Sicherheit
- ENABLE_MANAGESIEVE=1 - ENABLE_FAIL2BAN=1
- ENABLE_UNBOUND=1
# Sonstige
- ENABLE_MANAGESIEVE=0
- ENABLE_POP3=1 - ENABLE_POP3=1
- RSPAMD_LEARN=1 - RSPAMD_LEARN=1
- MOVE_SPAM_TO_JUNK=1
- RSPAMD_CHECK_AUTHENTICATED=0
- RSPAMD_HFILTER=0
- ONE_DIR=1 - ONE_DIR=1
- ENABLE_UPDATE_CHECK=0 - ENABLE_UPDATE_CHECK=0
- PERMIT_DOCKER=network - PERMIT_DOCKER=network
# - PERMIT_DOCKER=empty - SPOOF_PROTECTION=0
- SSL_TYPE=manual - ENABLE_SRS=0
- SSL_CERT_PATH=/tmp/docker-mailserver/ssl/cert.pem - LOG_LEVEL=info
- SSL_KEY_PATH=/tmp/docker-mailserver/ssl/key.pem
# Amazon SES SMTP Relay # Amazon SES Relay
- RELAY_HOST=email-smtp.us-east-2.amazonaws.com - RELAY_HOST=email-smtp.us-east-2.amazonaws.com
- RELAY_PORT=587 - RELAY_PORT=587
- RELAY_USER=${SES_SMTP_USER} - RELAY_USER=${SES_SMTP_USER}
- RELAY_PASSWORD=${SES_SMTP_PASSWORD} - RELAY_PASSWORD=${SES_SMTP_PASSWORD}
# Weitere Einstellungen
- POSTFIX_OVERRIDE_HOSTNAME=email-srvr.com # AWS Credentials
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- AWS_REGION=us-east-2
# Postfix
# POSTFIX_OVERRIDE_HOSTNAME: Was Postfix im EHLO/HELO Banner sendet.
# node1.email-srvr.com passt zum TLS-Cert und ist der echte Hostname.
- POSTFIX_OVERRIDE_HOSTNAME=node1.email-srvr.com
- POSTFIX_MYNETWORKS=172.16.0.0/12 172.17.0.0/12 172.18.0.0/12 [::1]/128 [fe80::]/64 - POSTFIX_MYNETWORKS=172.16.0.0/12 172.17.0.0/12 172.18.0.0/12 [::1]/128 [fe80::]/64
- POSTFIX_MAILBOX_SIZE_LIMIT=0 - POSTFIX_MAILBOX_SIZE_LIMIT=0
- POSTFIX_MESSAGE_SIZE_LIMIT=0 - POSTFIX_MESSAGE_SIZE_LIMIT=0
- SPOOF_PROTECTION=0
- ENABLE_SRS=0
# Debug-Einstellungen
- LOG_LEVEL=info
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN
- SYS_PTRACE - SYS_PTRACE
@@ -66,38 +119,48 @@ services:
networks: networks:
mail_network: mail_network:
aliases: aliases:
- mail.email-srvr.com
- mailserver - mailserver
- node1.email-srvr.com
roundcube: roundcube:
image: roundcube/roundcubemail:latest image: roundcube/roundcubemail:latest
container_name: roundcube-new container_name: roundcube
depends_on: depends_on:
- roundcube-db - roundcube-db
- mailserver - mailserver
environment: environment:
- ROUNDCUBEMAIL_DEFAULT_LANGUAGE=en_US
- ROUNDCUBEMAIL_DB_TYPE=pgsql - ROUNDCUBEMAIL_DB_TYPE=pgsql
- ROUNDCUBEMAIL_DB_HOST=roundcube-db - ROUNDCUBEMAIL_DB_HOST=roundcube-db
- ROUNDCUBEMAIL_DB_NAME=roundcube - ROUNDCUBEMAIL_DB_NAME=roundcube
- ROUNDCUBEMAIL_DB_USER=roundcube - ROUNDCUBEMAIL_DB_USER=roundcube
- ROUNDCUBEMAIL_DB_PASSWORD=${ROUNDCUBE_DB_PASSWORD} - ROUNDCUBEMAIL_DB_PASSWORD=${ROUNDCUBE_DB_PASSWORD}
# Einfache Konfiguration ohne SSL-Probleme (für ersten Test) # Roundcube verbindet intern über den Docker-Alias
- ROUNDCUBEMAIL_DEFAULT_HOST=ssl://mail.email-srvr.com - ROUNDCUBEMAIL_DEFAULT_HOST=ssl://node1.email-srvr.com
- ROUNDCUBEMAIL_DEFAULT_PORT=993 - ROUNDCUBEMAIL_DEFAULT_PORT=993
- ROUNDCUBEMAIL_SMTP_SERVER=tls://mail.email-srvr.com # Interner Traffic ohne TLS
- ROUNDCUBEMAIL_SMTP_PORT=587 - ROUNDCUBEMAIL_SMTP_SERVER=ssl://node1.email-srvr.com
- ROUNDCUBEMAIL_PLUGINS=password,managesieve - ROUNDCUBEMAIL_SMTP_PORT=465
# WICHTIG: Variablen LEER lassen, damit Roundcube keine Authentifizierung versucht!
- ROUNDCUBEMAIL_SMTP_USER=
- ROUNDCUBEMAIL_SMTP_PASSWORD=
- ROUNDCUBEMAIL_PLUGINS=password,email_config
# NEU: Schaltet die strikte PHP-Zertifikatsprüfung für interne Verbindungen ab
- ROUNDCUBEMAIL_IMAP_CONN_OPTIONS={"ssl":{"verify_peer":false,"verify_peer_name":false}}
- ROUNDCUBEMAIL_SMTP_CONN_OPTIONS={"ssl":{"verify_peer":false,"verify_peer_name":false}}
ports:
- "8888:80"
volumes: volumes:
- ./docker-data/roundcube/config:/var/roundcube/config # - ./docker-data/roundcube/config:/var/www/html/config
# ENTFERNEN Sie diese Zeile: - ./docker-data/roundcube/plugins/email_config:/var/www/html/plugins/email_config:ro
# - ./roundcube-config.php:/var/www/html/config/config.inc.php:ro
networks: networks:
- mail_network - mail_network
restart: unless-stopped restart: unless-stopped
roundcube-db: roundcube-db:
image: postgres:15 image: postgres:15
container_name: roundcube-db-new container_name: roundcube-db
environment: environment:
- POSTGRES_DB=roundcube - POSTGRES_DB=roundcube
- POSTGRES_USER=roundcube - POSTGRES_USER=roundcube
@@ -112,4 +175,4 @@ services:
networks: networks:
mail_network: mail_network:
external: true external: true

View File

@@ -0,0 +1,3 @@
# Eigene Dovecot-Optimierungen für Outlook
mail_max_userip_connections = 50
imap_client_workarounds = delay-newmail tb-extra-mailbox-sep tb-lsub-flags

View File

@@ -0,0 +1,11 @@
[DEFAULT]
# Whitelist: Localhost, private Docker-Netze und die Budd Electric Office-IP
ignoreip = 127.0.0.1/8 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 24.155.193.233 69.223.70.143 24.155.193.233
[dovecot]
# Erhöht die Anzahl der erlaubten Fehlversuche auf 20
maxretry = 20
[postfix]
# Erhöht die Anzahl der erlaubten Fehlversuche auf 20
maxretry = 20

View File

@@ -1,13 +0,0 @@
# persistente Overrides
smtp_host_lookup = dns
smtp_tls_security_level = encrypt
smtp_tls_note_starttls_offer = yes
# smtp_sasl_auth_enable = yes
# smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd
# smtp_sasl_security_options = noanonymous
# transport_maps = hash:/etc/postfix/transport
header_checks = pcre:/etc/postfix/header_checks
smtp_tls_loglevel = 1

View File

@@ -1,11 +0,0 @@
# X-SES-CONFIGURATION-SET für ausgehende Mails
/^Subject:/ PREPEND X-SES-CONFIGURATION-SET: relay-outbound
# === DEBUG SECTION - Logging für Weitergeleitete Mails ===
/^From:/ WARN Debugging: Original From Header
/^To:/ WARN Debugging: To Header
/^Return-Path:/ WARN Debugging: Return-Path
/^X-Forwarded/ WARN Debugging: Forwarding detected
# Entferne doppelte Delivered-To Headers bei Weiterleitungen
/^Delivered-To:/ IGNORE

View File

@@ -1 +0,0 @@
[email-smtp.us-east-2.amazonaws.com]:587 AKIAU6G......../ARbpotim1m...........

View File

@@ -1,22 +0,0 @@
# 1. EIGENE DOMAINS SCHÜTZEN (Whitelist)
# Wenn der Absender @bayarea-cc.com oder @email-srvr.com ist, tue NICHTS (DUNNO).
# Das Postfix bricht die Prüfung hier ab, die Mail bleibt original.
/.*@bayarea-cc\.com/ DUNNO
/.*@email-srvr\.com/ DUNNO
/.*@andreasknuth\.de/ DUNNO
# 2. FREMDE DOMAINS UMSCHREIBEN (Rewriting)
# Nur wenn wir hier ankommen (also keine eigene Domain), schreiben wir um.
# Ersetzt den Absender durch eine generische Adresse deiner Domain.
# Fall A: Mit Name -> "Name (original@email)" <relay@deine-domain>
/^From:(.*)\s+<(.*)>/ REPLACE From: "$1 ($2)" <ses@email-srvr.com>
# Fall B: Ohne Name -> "original@email" <relay@deine-domain>
/^From:\s*([^<>\s]+)$/ REPLACE From: "$1" <ses@email-srvr.com>
# 3. AUFRÄUMEN
# Return-Path im Header entfernen (verwirrt manche Clients, da SRS den Envelope regelt)
/^Return-Path:/ IGNORE
# Entferne Sieve-spezifische Headers bei Weiterleitungen
/^\s*Delivered-To:/ IGNORE

View File

@@ -1,10 +0,0 @@
outlook.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
.outlook.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
live.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
.live.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
msn.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
.msn.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
hotmail.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
.hotmail.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
iitwelders.com smtp:[email-smtp.us-east-2.amazonaws.com]:587
.iitwelderstp:[email-smtp.us-east-2.amazonaws.com]:587

View File

@@ -0,0 +1,8 @@
DOCKER_WL {
type = "from";
filter = "email:domain";
map = "/etc/rspamd/override.d/docker_whitelist.map";
symbol = "DOCKER_WHITELIST";
description = "Whitelist fuer eigene Domains";
score = -50.0;
}

View File

@@ -0,0 +1,4 @@
# Standard-Aktionen überschreiben
reject = 500; # Erst ab 500 Punkten ablehnen (passiert nie)
add_header = 6; # Ab 6 Punkten: X-Spam Header setzen
greylist = 500; # Greylisting faktisch deaktivieren (Schwellwert unerreichbar hoch)

View File

@@ -0,0 +1,6 @@
rules {
DOCKER_WHITELIST_FORCE {
action = "no action";
expression = "DOCKER_WHITELIST";
}
}

View File

@@ -0,0 +1 @@
enabled = false;

View File

@@ -0,0 +1 @@
password = "$2$eitni68mkfaaq957jx3bcx57sg1mmd9c$d9xyhjmmih7sjm3fpfu7r7tshhfm4ud93km65w5dkh151f5phiiy";

57
DMS/docker-data/dms/config/user-patches.sh Normal file → Executable file
View File

@@ -1,24 +1,47 @@
#!/bin/bash #!/bin/bash
set -euo pipefail # user-patches.sh laeuft bei jedem Start von DMS automatisch
CFG_ROOT="/tmp/docker-mailserver" ACCOUNTS_FILE="/tmp/docker-mailserver/postfix-accounts.cf"
SRC_DIR="$CFG_ROOT/postfix" WHITELIST_FILE="/etc/rspamd/override.d/docker_whitelist.map"
DST_DIR="/etc/postfix"
# Dateien nach /etc/postfix kopieren (oder aktualisieren) # --- Rspamd Whitelist generieren ---
# install -D -m 0644 "$SRC_DIR/transport" "$DST_DIR/transport" STATIC_DOMAINS=(
# install -D -m 0600 "$SRC_DIR/sasl_passwd" "$DST_DIR/sasl_passwd" "bayarea-cc.com"
install -D -m 0644 "$SRC_DIR/header_checks" "$DST_DIR/header_checks" "ruehrgedoens.de"
install -D -m 0644 "$SRC_DIR/smtp_header_checks" "$DST_DIR/maps/sender_header_filter.pcre" "annavillesda.org"
"bizmatch.net"
"biz-match.com"
"qrmaster.net"
"nqsltd.com"
"iitwelders.com"
)
# Maps bauen echo "Patching: Generiere Rspamd Whitelist aus Accounts + statischen Domains..."
# postmap "$DST_DIR/transport"
# postmap "$DST_DIR/sasl_passwd"
# Rechte auf die .db-Helferdatei {
# chmod 600 "$DST_DIR/sasl_passwd.db" || true for domain in "${STATIC_DOMAINS[@]}"; do
echo "$domain"
done
if [ -f "$ACCOUNTS_FILE" ]; then
awk -F'|' '{print $1}' "$ACCOUNTS_FILE" | cut -d'@' -f2
fi
} | sort | uniq > "$WHITELIST_FILE"
# rm -f /etc/dovecot/conf.d/95-sieve-redirect.conf chmod 644 "$WHITELIST_FILE"
chown _rspamd:_rspamd "$WHITELIST_FILE" 2>/dev/null || true
echo "Whitelist erstellt:"
cat "$WHITELIST_FILE"
# Postfix neu laden (nachdem docker-mailserver seine eigene Konfig geladen hat) # --- local.d configs manuell kopieren (DMS kopiert local.d nicht automatisch) ---
postfix reload || true echo "Patching: Kopiere custom rspamd local.d configs..."
SRC="/tmp/docker-mailserver/rspamd/local.d"
DST="/etc/rspamd/local.d"
if [ -d "$SRC" ]; then
for f in "$SRC"/*; do
[ -f "$f" ] || continue
cp "$f" "$DST/$(basename "$f")"
chown root:root "$DST/$(basename "$f")"
chmod 644 "$DST/$(basename "$f")"
echo " Kopiert: $(basename "$f") -> $DST/"
done
fi

View File

@@ -0,0 +1,16 @@
<?php
// WICHTIG: Zuerst die vom Docker-Container generierte Config einbinden!
// Deine Overrides (hier wird alles überschrieben, was wir brauchen)
$config['smtp_server'] = 'ssl://mailserver';
$config['smtp_port'] = 465;
$config['smtp_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,
'verify_peer_name' => false,
'allow_self_signed' => true,
),
);
$config['language'] = 'en_US';

View File

@@ -8,4 +8,4 @@
"php": ">=7.0.0", "php": ">=7.0.0",
"roundcube/plugin-installer": ">=0.1.3" "roundcube/plugin-installer": ">=0.1.3"
} }
} }

View File

@@ -0,0 +1,20 @@
/**
* Email Configuration Plugin - Client Side
*/
if (window.rcmail) {
rcmail.addEventListener('init', function(evt) {
rcmail.register_command('email_config_open', function() {
rcmail.http_post('plugin.email_config_generate_url', {},
rcmail.set_busy(true, 'loading'));
}, true);
});
rcmail.addEventListener('responseafterplugin.email_config_generate_url', function(response) {
rcmail.set_busy(false);
if (response && response.url) {
window.open(response.url, '_blank');
} else {
rcmail.display_message('Failed to generate configuration URL', 'error');
}
});
}

View File

@@ -34,7 +34,7 @@ class email_config extends rcube_plugin
$rcmail = rcube::get_instance(); $rcmail = rcube::get_instance();
$email = $rcmail->user->get_username(); $email = $rcmail->user->get_username();
$secret_key = 'SHARED_SECRET_KEY_987654321'; $secret_key = 'SHARED_SECRET_KEY_987654321';
$config_url = 'http://localhost:3008'; $config_url = 'https://config.email-bayarea.com';
$expires = time() + 3600; $expires = time() + 3600;
$data = $email . '|' . $expires; $data = $email . '|' . $expires;
$signature = hash_hmac('sha256', $data, $secret_key); $signature = hash_hmac('sha256', $data, $secret_key);
@@ -72,4 +72,4 @@ class email_config extends rcube_plugin
return $out; return $out;
} }
} }

87
DMS/dynamic_whitelist.py Normal file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
import os
import re
import time
import subprocess
import threading
from datetime import datetime
try:
from croniter import croniter
except ImportError:
print("Bitte 'croniter' via pip installieren!")
exit(1)
LOG_FILE = '/var/log/mail/mail.log'
WHITELIST_DURATION_SEC = 24 * 60 * 60 # 24 Stunden
CRON_SCHEDULE = "0 * * * *" # Jede Stunde
active_ips = {}
# Regex für Dovecot IMAP/POP3 erfolgreiche Logins
LOGIN_REGEX = re.compile(r"dovecot: (?:imap|pop3)-login: Login: user=<[^>]+>.*rip=([0-9]{1,3}(?:\.[0-9]{1,3}){3}),")
# Private Netze (Docker/Local) ignorieren
IGNORE_REGEX = re.compile(r"^(172\.|10\.|192\.168\.|127\.)")
def run_command(cmd):
try:
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
print(f"Fehler bei: {cmd} - {e}")
def cleanup_job():
"""Cron-Thread für das stündliche Aufräumen abgelaufener IPs."""
iter = croniter(CRON_SCHEDULE, datetime.now())
while True:
next_run = iter.get_next(datetime)
sleep_seconds = (next_run - datetime.now()).total_seconds()
if sleep_seconds > 0:
time.sleep(sleep_seconds)
print(f"[{datetime.now()}] Starte stündlichen Whitelist-Cleanup...")
now = time.time()
expired_ips = [ip for ip, timestamp in active_ips.items() if now - timestamp > WHITELIST_DURATION_SEC]
for ip in expired_ips:
print(f"[{datetime.now()}] Whitelist für {ip} abgelaufen. Entferne...")
run_command(f"fail2ban-client set dovecot delignoreip {ip}")
run_command(f"fail2ban-client set postfix delignoreip {ip}")
del active_ips[ip]
def follow_log():
"""Verwendet System 'tail -F', da dies Log-Rotation automatisch handhabt."""
print(f"[{datetime.now()}] Dynamic Whitelist Monitor gestartet...")
while not os.path.exists(LOG_FILE):
time.sleep(2)
process = subprocess.Popen(['tail', '-F', LOG_FILE], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)
for line in process.stdout:
match = LOGIN_REGEX.search(line)
if match:
ip = match.group(1)
if IGNORE_REGEX.match(ip):
continue
now = time.time()
# Neue IP in die Fail2ban Whitelist eintragen
if ip not in active_ips:
print(f"[{datetime.now()}] Neuer erfolgreicher Login von {ip}. Setze auf Whitelist...")
run_command(f"fail2ban-client set dovecot addignoreip {ip}")
run_command(f"fail2ban-client set postfix addignoreip {ip}")
# Timestamp (Last Seen) aktualisieren
active_ips[ip] = now
if __name__ == '__main__':
# Warte kurz, bis Fail2ban nach einem Container-Start hochgefahren ist
time.sleep(15)
# Cron-Cleanup im Hintergrund starten
threading.Thread(target=cleanup_job, daemon=True).start()
# Log-Überwachung in der Endlosschleife starten
follow_log()

41
DMS/run_sync.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# 1. Prüfen, ob die Umgebungsvariablen HOST1 und HOST2 gesetzt sind
if [ -z "$HOST1" ] || [ -z "$HOST2" ]; then
echo "Fehler: Die Umgebungsvariablen HOST1 und/oder HOST2 sind nicht gesetzt."
echo "Bitte setze diese vor dem Ausführen des Skripts, zum Beispiel mit:"
echo 'export HOST1="65.254.254.50"'
echo 'export HOST2="147.93.132.244"'
exit 1
fi
# 2. E-Mail-Adresse interaktiv abfragen
read -p "Bitte E-Mail-Adresse eingeben: " EMAIL
# 3. Passwort interaktiv und unsichtbar (-s) abfragen
read -s -p "Bitte Passwort eingeben: " PASSWORD
echo "" # Zeilenumbruch für eine saubere Darstellung nach der Passworteingabe
# 4. Log-Datei mit Zeitstempel und E-Mail definieren
LOGFILE="imapsync_${EMAIL}_$(date +%Y%m%d_%H%M%S).log"
echo "Starte imapsync für $EMAIL..."
echo "Quell-Host (HOST1): $HOST1"
echo "Ziel-Host (HOST2): $HOST2"
echo "Logs werden gespeichert in: $LOGFILE"
echo "---------------------------------------------------"
# 5. Docker-Container ausführen und Output mit 'tee' loggen
docker run --rm -i gilleslamiral/imapsync imapsync \
--host1 "$HOST1" \
--user1 "$EMAIL" \
--password1 "$PASSWORD" \
--ssl1 \
--host2 "$HOST2" \
--user2 "$EMAIL" \
--password2 "$PASSWORD" \
--ssl2 \
--automap 2>&1 | tee "$LOGFILE"
echo "---------------------------------------------------"
echo "Sync abgeschlossen. Das vollständige Log findest du in: $LOGFILE"

207
DMS/setup-dms-tls.sh Executable file
View File

@@ -0,0 +1,207 @@
#!/bin/bash
# setup-dms-tls.sh
# Gehört ins Root-Verzeichnis des DMS (neben docker-compose.yml).
#
# Generiert Dovecot- und Postfix-SNI-Konfigurationen für Multi-Domain TLS.
# Liest Domains aus dem laufenden DMS und erstellt:
# - docker-data/dms/config/dovecot-sni.cf
# - docker-data/dms/config/postfix-main.cf
# - docker-data/dms/config/postfix-sni.map (NEU für Postfix SNI)
#
# Cert-Konvention (Caddy Wildcard):
# Caddy speichert *.domain.tld unter: wildcard_.domain.tld/wildcard_.domain.tld.crt
# Im Container (gemountet unter /etc/mail/certs):
# /etc/mail/certs/wildcard_.domain.tld/wildcard_.domain.tld.crt
# /etc/mail/certs/wildcard_.domain.tld/wildcard_.domain.tld.key
#
# Usage:
# ./setup-dms-tls.sh
# DMS_CONTAINER=mailserver NODE_HOSTNAME=node1.email-srvr.com ./setup-dms-tls.sh
set -e
DMS_CONTAINER=${DMS_CONTAINER:-"mailserver"}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
CONFIG_DIR="$SCRIPT_DIR/docker-data/dms/config"
CERTS_BASE_PATH=${CERTS_BASE_PATH:-"/etc/mail/certs"}
# Node-Hostname: Fallback-Cert für DMS (kein Wildcard, direktes Cert)
# Muss mit dem 'hostname' in docker-compose.yml übereinstimmen.
NODE_HOSTNAME=${NODE_HOSTNAME:-"node1.email-srvr.com"}
echo "============================================================"
echo " 🔐 DMS TLS SNI Setup (Multi-Domain)"
echo " DMS Container: $DMS_CONTAINER"
echo " Config Dir: $CONFIG_DIR"
echo " Certs Base: $CERTS_BASE_PATH"
echo " Node Hostname: $NODE_HOSTNAME"
echo "============================================================"
# --- Domains aus DMS lesen ---
echo ""
echo "📋 Lese Domains aus DMS..."
DOMAINS=$(docker exec "$DMS_CONTAINER" setup email list 2>/dev/null \
| grep -oP '(?<=@)[^\s]+' \
| sort -u)
if [ -z "$DOMAINS" ]; then
echo "❌ Keine Accounts im DMS gefunden!"
echo " Bitte zuerst anlegen: ./manage_mail_user.sh add user@domain.com PW"
exit 1
fi
echo " Gefundene Domains:"
for d in $DOMAINS; do echo " - $d"; done
# --- Cert-Pfad Hilfsfunktionen ---
wildcard_cert_path() {
echo "$CERTS_BASE_PATH/wildcard_.${1}/wildcard_.${1}.crt"
}
wildcard_key_path() {
echo "$CERTS_BASE_PATH/wildcard_.${1}/wildcard_.${1}.key"
}
# --- Cert-Verfügbarkeit im Container prüfen ---
echo ""
echo "🔍 Prüfe Zertifikat-Verfügbarkeit..."
DOMAINS_OK=""
DOMAINS_MISSING=""
for domain in $DOMAINS; do
CERT_PATH=$(wildcard_cert_path "$domain")
KEY_PATH=$(wildcard_key_path "$domain")
if docker exec "$DMS_CONTAINER" test -f "$CERT_PATH" 2>/dev/null; then
echo "$domain$CERT_PATH"
DOMAINS_OK="$DOMAINS_OK $domain"
else
echo " ⚠️ $domain → KEIN Cert unter $CERT_PATH"
echo " → update-caddy-certs.sh ausführen + caddy reload!"
DOMAINS_MISSING="$DOMAINS_MISSING $domain"
fi
done
# Node-Hostname Cert prüfen (direktes Cert, kein Wildcard)
NODE_CERT_PATH="$CERTS_BASE_PATH/$NODE_HOSTNAME/$NODE_HOSTNAME.crt"
NODE_KEY_PATH="$CERTS_BASE_PATH/$NODE_HOSTNAME/$NODE_HOSTNAME.key"
if docker exec "$DMS_CONTAINER" test -f "$NODE_CERT_PATH" 2>/dev/null; then
echo "$NODE_HOSTNAME → Cert vorhanden (Node Default)"
else
echo " ⚠️ $NODE_HOSTNAME → KEIN Cert! Caddy-Block im Caddyfile prüfen."
fi
if [ -n "$DOMAINS_MISSING" ]; then
echo ""
echo " ⚠️ Fehlende Certs:$DOMAINS_MISSING"
echo " Diese Domains werden NICHT in SNI-Config eingetragen."
fi
if [ -z "$DOMAINS_OK" ]; then
echo "❌ Kein einziges Kundendomain-Cert gefunden!"
echo " Bitte zuerst update-caddy-certs.sh ausführen + caddy reload abwarten."
exit 1
fi
# ================================================================
# DOVECOT SNI Konfiguration
# ================================================================
DOVECOT_CFG="$CONFIG_DIR/dovecot-sni.cf"
echo ""
echo "📝 Generiere: $DOVECOT_CFG"
cat > "$DOVECOT_CFG" << 'HEADER'
# dovecot-sni.cf - Automatisch generiert von setup-dms-tls.sh
# SNI-basierte Zertifikat-Auswahl für Dovecot (IMAP/POP3).
# Dovecot liest dieses File über den Volume-Mount in /tmp/docker-mailserver/
# und wendet es automatisch an.
HEADER
for domain in $DOMAINS_OK; do
CERT_PATH=$(wildcard_cert_path "$domain")
KEY_PATH=$(wildcard_key_path "$domain")
cat >> "$DOVECOT_CFG" << EOF
# $domain
local_name mail.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
local_name imap.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
local_name smtp.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
local_name pop.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
EOF
done
echo " ✅ Dovecot SNI: $(echo $DOMAINS_OK | wc -w) Domain(s)"
# ================================================================
# POSTFIX SNI Konfiguration (Neu geschrieben für echte SNI Maps)
# ================================================================
POSTFIX_CFG="$CONFIG_DIR/postfix-main.cf"
POSTFIX_MAP="$CONFIG_DIR/postfix-sni.map"
echo ""
echo "📝 Generiere: $POSTFIX_CFG und $POSTFIX_MAP"
if [ -f "$POSTFIX_CFG" ]; then
cp "$POSTFIX_CFG" "${POSTFIX_CFG}.bak.$(date +%Y%m%d%H%M%S)"
fi
# 1. postfix-main.cf erstellen
cat > "$POSTFIX_CFG" << POSTFIX_EOF
# postfix-main.cf - Automatisch generiert von setup-dms-tls.sh
#
# 1. Fallback-Zertifikat (Wird genutzt, wenn kein SNI-Match gefunden wird)
smtpd_tls_chain_files = ${NODE_KEY_PATH}, ${NODE_CERT_PATH}
# 2. SNI-Mapping aktivieren
# Wir nutzen 'texthash', damit Postfix die Map direkt lesen kann,
# ohne dass 'postmap' ausgeführt werden muss!
tls_server_sni_maps = texthash:/tmp/docker-mailserver/postfix-sni.map
POSTFIX_EOF
# 2. postfix-sni.map erstellen
echo "# postfix-sni.map - Automatisch generiert (Format: host key_pfad cert_pfad)" > "$POSTFIX_MAP"
for domain in $DOMAINS_OK; do
KEY_PATH=$(wildcard_key_path "$domain")
CERT_PATH=$(wildcard_cert_path "$domain")
cat >> "$POSTFIX_MAP" << EOF
mail.${domain} ${KEY_PATH} ${CERT_PATH}
smtp.${domain} ${KEY_PATH} ${CERT_PATH}
imap.${domain} ${KEY_PATH} ${CERT_PATH}
pop.${domain} ${KEY_PATH} ${CERT_PATH}
${domain} ${KEY_PATH} ${CERT_PATH}
EOF
done
echo " ✅ Postfix SNI: $(echo $DOMAINS_OK | wc -w) Domain(s) konfiguriert"
# ================================================================
# Zusammenfassung
# ================================================================
echo ""
echo "============================================================"
echo "✅ Konfigurationen generiert."
echo ""
echo "🔄 Lade Postfix und Dovecot neu (ohne Downtime)..."
docker exec "$DMS_CONTAINER" postfix reload || echo "⚠️ Postfix Reload fehlgeschlagen"
docker exec "$DMS_CONTAINER" dovecot reload || echo "⚠️ Dovecot Reload fehlgeschlagen"
echo ""
echo "📋 Nächste Schritte:"
echo ""
echo "1. TLS testen (SNI):"
for domain in $DOMAINS_OK; do
echo " openssl s_client -connect mail.$domain:993 -servername mail.$domain 2>/dev/null | grep 'subject\|issuer'"
done
echo "============================================================"

1
DMS/sieve-schedule Normal file
View File

@@ -0,0 +1 @@
*/5 * * * *

10
DMS/sieve-supervisor.conf Normal file
View File

@@ -0,0 +1,10 @@
[program:sieve-sync]
# Das "-u" ist entscheidend für sofortige Logs!
command=/usr/bin/python3 -u /scripts/sync.py
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autostart=true
autorestart=true
startsecs=5

View File

@@ -0,0 +1,225 @@
#!/usr/bin/env python3
import boto3
import os
import shutil
from pathlib import Path
import json
import time
from datetime import datetime
try:
from croniter import croniter
except ImportError:
print("Bitte 'croniter' via pip installieren!")
exit(1)
# Config
REGION = 'us-east-2'
TABLE = 'email-rules'
VMAIL_BASE = '/var/mail'
dynamodb = boto3.resource('dynamodb', region_name=REGION)
table = dynamodb.Table(TABLE)
def generate_sieve(email, rules):
"""Generate Sieve script from DynamoDB rules"""
lines = ['require ["copy","vacation","variables"];', '']
# Skip if already processed by worker
lines.extend([
'# Skip if already processed by worker',
'if header :contains "X-SES-Worker-Processed" "" {',
' keep;',
' stop;',
'}',
''
])
# Forwards
forwards = rules.get('forwards', [])
if forwards:
lines.append('# rule:[forward]')
for fwd in forwards:
lines.append(f'redirect :copy "{fwd}";')
lines.append('')
# OOO
if rules.get('ooo_active'):
msg = rules.get('ooo_message', 'I am away')
content_type = rules.get('ooo_content_type', 'text')
lines.append('# rule:[reply]')
if content_type == 'html':
lines.extend([
f'vacation :days 1 :from "{email}" :mime text:',
'Content-Type: text/html; charset=utf-8',
'',
msg,
'.',
';'
])
else:
safe_msg = json.dumps(msg, ensure_ascii=False)
lines.append(f'vacation :days 1 :from "{email}" {safe_msg};')
return '\n'.join(lines) + '\n'
def deactivate_sieve(email, mailbox_home):
"""
SICHERHEITS-VARIANTE:
Überschreibt das Sieve-Skript mit einem leeren 'keep;',
anstatt Dateien zu löschen.
"""
# Pfad zur aktiven Datei
sieve_path = mailbox_home / '.dovecot.sieve'
# Inhalt: Nur "keep;" -> Mail behalten, nichts tun.
safe_content = (
'# Script deactivated by DynamoDB Sync (User not in DB)\n'
'keep;\n'
)
# Prüfen, ob wir überhaupt etwas tun müssen (um unnötige Schreibvorgänge zu meiden)
# Wenn der Inhalt schon "keep;" ist, brechen wir ab.
if sieve_path.exists() and not sieve_path.is_symlink():
try:
current_content = sieve_path.read_text()
if "Script deactivated" in current_content:
return # Ist schon deaktiviert
except:
pass
# Datei sicher schreiben (überschreibt auch Symlinks, wenn os.open genutzt wird,
# aber pathlib write_text folgt symlinks oder überschreibt file).
# Um sicher zu gehen, dass wir keinen Symlink auf eine Systemdatei überschreiben:
if sieve_path.is_symlink():
try:
os.unlink(sieve_path) # Link entfernen
except OSError:
pass
try:
sieve_path.write_text(safe_content)
# Kompilieren (wichtig, damit Dovecot die Änderung sofort sieht)
os.system(f'sievec {sieve_path}')
# Ownership sicherstellen
os.system(f'chown docker:docker {sieve_path}')
print(f'{email} (Regeln deaktiviert/geleert)')
except Exception as e:
print(f"Fehler beim Deaktivieren von {email}: {e}")
def sync():
"""Sync logic"""
# 1. DB Status abrufen
try:
response = table.scan()
db_users = {item['email_address']: item for item in response.get('Items', [])}
except Exception as e:
print(f"FATAL: Konnte DynamoDB nicht lesen ({e}). Breche ab, um keine Regeln zu löschen.")
return
# 2. Filesystem scannen
base_path = Path(VMAIL_BASE)
if not base_path.exists():
print("Warnung: /var/mail existiert nicht.")
return
# Iteriere durch Domains
for domain_dir in base_path.iterdir():
if not domain_dir.is_dir(): continue
# Iteriere durch User
for user_dir in domain_dir.iterdir():
if not user_dir.is_dir(): continue
user = user_dir.name
domain = domain_dir.name
email = f"{user}@{domain}"
# WICHTIG: Wir arbeiten NUR im 'home' Unterordner
# Die Mails liegen in user_dir/cur etc. -> Die fassen wir nicht an.
mailbox_home = user_dir / 'home'
# --- FALL A: User ist in der DB (Update) ---
if email in db_users:
item = db_users[email]
if not mailbox_home.exists():
mailbox_home.mkdir(exist_ok=True)
os.system(f'chown docker:docker {mailbox_home}')
sieve_path = mailbox_home / '.dovecot.sieve'
script = generate_sieve(email, item)
sieve_path.write_text(script)
os.system(f'sievec {sieve_path}')
# Ownership
os.system(f'chown docker:docker {sieve_path}')
# (Optional) Auch in den sieve/ Ordner spiegeln für Roundcube Kompatibilität
sieve_dir = mailbox_home / 'sieve'
if sieve_dir.exists():
managed_script = sieve_dir / 'default.sieve'
managed_script.write_text(script)
os.system(f'sievec {managed_script}')
os.system(f'chown -R docker:docker {sieve_dir}')
print(f'{email}')
# --- FALL B: User ist NICHT in DB (Deaktivieren) ---
else:
# Nur wenn der Home-Ordner existiert (wir legen keine Leichen für nicht-existente User an)
if mailbox_home.exists():
deactivate_sieve(email, mailbox_home)
def wait_for_dovecot():
socket_path = '/var/run/dovecot/auth-userdb'
print("⏳ Warte auf Dovecot Start...")
while not os.path.exists(socket_path):
time.sleep(5)
print("✅ Dovecot ist bereit!")
if __name__ == '__main__':
wait_for_dovecot()
CRON_FILE = '/etc/sieve-schedule'
cron_string = "*/5 * * * *"
if os.path.exists(CRON_FILE):
with open(CRON_FILE, 'r') as f:
content = f.read().strip()
if content and not content.startswith('#'):
cron_string = content
print(f"DynamoDB Sieve Sync (Safe Mode) gestartet. Zeitplan: {cron_string}")
sync()
base_time = datetime.now()
iter = croniter(cron_string, base_time)
while True:
next_run = iter.get_next(datetime)
now = datetime.now()
sleep_seconds = (next_run - now).total_seconds()
if sleep_seconds > 0:
time.sleep(sleep_seconds)
try:
print(f"[{datetime.now()}] Starte Sync...")
sync()
except Exception as e:
print(f"Fehler beim Sync: {e}")
pass

View File

@@ -1,91 +0,0 @@
#!/bin/bash
# update_dms_config.sh
# Fügt eine neue Domain zur lokalen DMS Konfiguration hinzu:
# 1. Ergänzt SRS_EXCLUDE_DOMAINS in docker-compose.yml
# 2. Ergänzt Whitelist in smtp_header_checks
set -e
DOMAIN=$1
DOCKER_COMPOSE_FILE="./docker-compose.yml"
HEADER_CHECKS_FILE="./docker-data/dms/config/postfix/smtp_header_checks"
if [ -z "$DOMAIN" ]; then
echo "Usage: $0 <domain>"
echo "Example: $0 cielectrical.com"
exit 1
fi
echo "=== Aktualisiere lokale Konfiguration für $DOMAIN ==="
# ---------------------------------------------
# 1. Update docker-compose.yml (SRS Exclude)
# ---------------------------------------------
if [ -f "$DOCKER_COMPOSE_FILE" ]; then
echo "-> Prüfe docker-compose.yml..."
# Prüfen, ob Domain schon in der Zeile steht
if grep -q "SRS_EXCLUDE_DOMAINS=.*$DOMAIN" "$DOCKER_COMPOSE_FILE"; then
echo " Domain bereits in SRS_EXCLUDE_DOMAINS vorhanden."
else
# Backup erstellen
cp "$DOCKER_COMPOSE_FILE" "${DOCKER_COMPOSE_FILE}.bak"
# sed Magie: Suche Zeile mit SRS_EXCLUDE_DOMAINS, hänge ",domain" am Ende an
# Wir nutzen ein Komma als Trenner vor der neuen Domain
sed -i "s/SRS_EXCLUDE_DOMAINS=.*/&,$DOMAIN/" "$DOCKER_COMPOSE_FILE"
echo "$DOMAIN zu SRS_EXCLUDE_DOMAINS hinzugefügt."
fi
else
echo "❌ Fehler: $DOCKER_COMPOSE_FILE nicht gefunden!"
exit 1
fi
# ---------------------------------------------
# 2. Update smtp_header_checks (PCRE Whitelist)
# ---------------------------------------------
if [ -f "$HEADER_CHECKS_FILE" ]; then
echo "-> Prüfe smtp_header_checks..."
# Domain für Regex escapen (der Punkt muss \. sein)
ESCAPED_DOMAIN="${DOMAIN//./\\.}"
NEW_LINE="/.*@${ESCAPED_DOMAIN}/ DUNNO"
# Prüfen, ob Eintrag existiert
if grep -Fq "@$ESCAPED_DOMAIN/" "$HEADER_CHECKS_FILE"; then
echo " Domain bereits in smtp_header_checks vorhanden."
else
# Backup erstellen
cp "$HEADER_CHECKS_FILE" "${HEADER_CHECKS_FILE}.bak"
# Wir fügen die Zeile oben bei den Whitelists ein (nach dem Kommentar "# 1. EIGENE...")
# Oder einfach am Anfang der Datei, falls die Reihenfolge egal ist.
# Aber bei PCRE ist Reihenfolge wichtig! Whitelist muss VOR Rewrite stehen.
# Strategie: Wir suchen die erste Zeile, die mit /.*@ anfängt und fügen davor ein
# Oder wir hängen es einfach oben an einen definierten Marker an.
# Einfachste sichere Methode für dein File: Nach dem Kommentarblock einfügen
# Wir suchen nach der Zeile mit "1. EIGENE DOMAINS" und fügen 3 Zeilen später ein
# Aber sed insert ist tricky.
# Bessere Methode: Wir wissen, dass Whitelists ganz oben stehen sollen.
# Wir erstellen eine temporäre Datei.
# 1. Header (Kommentare) behalten oder neu schreiben?
# Wir hängen es einfach GANZ OBEN in die Datei ein (vor alle anderen Regeln),
# das ist bei "DUNNO" (Whitelist) immer sicherste Variante.
sed -i "1i $NEW_LINE" "$HEADER_CHECKS_FILE"
echo "$DOMAIN zu smtp_header_checks hinzugefügt (ganz oben)."
fi
else
echo "⚠️ Warnung: $HEADER_CHECKS_FILE nicht gefunden. Überspringe."
fi
echo "========================================================"
echo "Konfiguration aktualisiert."
echo "HINWEIS: Damit die Änderungen wirksam werden, führen Sie bitte aus:"
echo " docker compose up -d --force-recreate"
echo "========================================================"

View File

@@ -0,0 +1,6 @@
[program:dynamic-whitelist]
command=/usr/bin/python3 -u /scripts/dynamic_whitelist.py
autostart=true
autorestart=true
stderr_logfile=/var/log/supervisor/dynamic-whitelist.err.log
stdout_logfile=/var/log/supervisor/dynamic-whitelist.out.log

View File

@@ -1,20 +0,0 @@
# Worker bauen
docker-compose build
# Worker starten
docker-compose up -d
# Logs ansehen
docker-compose logs -f
# Logs nur für eine Domain
docker-compose logs -f worker-andreasknuth
# Status prüfen
docker-compose ps
# Worker neu starten
docker-compose restart
# Worker stoppen
docker-compose down

View File

@@ -1,26 +0,0 @@
FROM python:3.11-slim
# Metadata
LABEL maintainer="your-email@example.com"
LABEL description="Domain-specific email worker for SMTP delivery"
# Non-root user für Security
RUN useradd -m -u 1000 worker && \
mkdir -p /app && \
chown -R worker:worker /app
# Boto3 installieren
RUN pip install --no-cache-dir boto3
# Worker Code
COPY --chown=worker:worker worker.py /app/worker.py
WORKDIR /app
USER worker
# Healthcheck
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD pgrep -f worker.py || exit 1
# Start worker mit unbuffered output
CMD ["python", "-u", "worker.py"]

View File

@@ -1,35 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:HeadObject",
"s3:ListBucket",
"s3:CopyObject"
],
"Resource": [
"arn:aws:s3:::*-emails",
"arn:aws:s3:::*-emails/*"
]
},
{
"Effect": "Allow",
"Action": [
"sqs:SendMessage",
"sqs:GetQueueUrl"
],
"Resource": "arn:aws:sqs:eu-central-1:123456789:*-queue"
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:::*"
}
]
}

13
TESTS
View File

@@ -1,13 +0,0 @@
# Via AWS SES CLI
aws ses send-email \
--from "sender@example.com" \
--destination "ToAddresses=test@andreasknuth.de" \
--message "Subject={Data='Test',Charset=utf-8},Body={Text={Data='Test message',Charset=utf-8}}" \
--region us-east-2
# Mail an mehrere Domains
aws ses send-email \
--from "sender@example.com" \
--destination "ToAddresses=test@andreasknuth.de,test@bizmatch.net" \
--message "Subject={Data='Multi-Domain Test',Charset=utf-8},Body={Text={Data='Testing multiple domains',Charset=utf-8}}" \
--region us-east-2

92
backup_mail.sh Executable file
View File

@@ -0,0 +1,92 @@
#!/bin/bash
# ==============================================================================
# KONFIGURATION
# ==============================================================================
BASE_MAIL_PATH="/home/aknuth/git/email-amazon/DMS/docker-data/dms/mail-data"
RCLONE_CONFIG="/home/aknuth/.config/rclone/rclone.conf"
LOGFILE="/var/log/mail_backup.log"
# ==============================================================================
# HILFSFUNKTION: LOGGING (Bildschirm + Datei)
# ==============================================================================
log() {
# Schreibt Zeitstempel + Text auf stdout (Bildschirm) UND in das Logfile
echo "$(date '+%Y-%m-%d %H:%M:%S') | $1" | tee -a "$LOGFILE"
}
# ==============================================================================
# PARAMETER & VALIDIERUNG
# ==============================================================================
DOMAIN=$1
REMOTE_NAME=$2
BUCKET_NAME=$3
if [[ -z "$DOMAIN" || -z "$REMOTE_NAME" || -z "$BUCKET_NAME" ]]; then
echo "FEHLER: Fehlende Parameter."
echo "Benutzung: sudo $0 <DOMAIN> <RCLONE_REMOTE> <BUCKET>"
echo "Beispiel: sudo $0 buddelectric.net buddelectric buddelectric-mails"
exit 1
fi
SOURCE_PATH="${BASE_MAIL_PATH}/${DOMAIN}"
if [ ! -d "$SOURCE_PATH" ]; then
log "FEHLER: Quell-Ordner existiert nicht: $SOURCE_PATH"
exit 1
fi
if [[ $EUID -ne 0 ]]; then
echo "FEHLER: Dieses Skript muss als root ausgeführt werden (sudo)."
exit 1
fi
# ==============================================================================
# LOCKING
# ==============================================================================
LOCKFILE_PATH="/var/run/rclone_mail_${DOMAIN}.lock"
if [ -e ${LOCKFILE_PATH} ] && kill -0 `cat ${LOCKFILE_PATH}` 2>/dev/null; then
log "ABBRUCH: Backup für $DOMAIN läuft bereits."
exit 1
fi
trap "rm -f ${LOCKFILE_PATH}; exit" INT TERM EXIT
echo $$ > ${LOCKFILE_PATH}
# ==============================================================================
# BACKUP START
# ==============================================================================
log "----------------------------------------------------------------"
log "START Backup"
log "Domain: $DOMAIN"
log "Quelle: $SOURCE_PATH"
log "Ziel: $REMOTE_NAME:$BUCKET_NAME"
# Rclone ausführen
# --progress zeigt den Balken im Terminal (landet nicht im Logfile, das ist gut so)
# --log-file schreibt technische Details NUR ins Logfile (nicht auf den Schirm, um ihn nicht zu fluten)
/usr/bin/rclone sync "$SOURCE_PATH" "$REMOTE_NAME:$BUCKET_NAME" \
--config "$RCLONE_CONFIG" \
--exclude "**/tmp/**" \
--exclude "*.lock" \
--exclude "dovecot-uidlist.lock" \
--exclude ".dovecot.lda-dupes" \
--transfers 32 \
--checkers 32 \
--fast-list \
--min-age 15m \
--log-file $LOGFILE \
--log-level INFO \
--progress
EXIT_CODE=$?
if [ $EXIT_CODE -eq 0 ]; then
log "STATUS: ERFOLG - $DOMAIN erfolgreich gesichert."
else
log "STATUS: FEHLER - Exit Code $EXIT_CODE. Details siehe $LOGFILE"
fi
log "----------------------------------------------------------------"
exit $EXIT_CODE

View File

@@ -43,7 +43,7 @@ aws s3api put-bucket-lifecycle-configuration \
"ID": "DeleteOldEmails", "ID": "DeleteOldEmails",
"Status": "Enabled", "Status": "Enabled",
"Expiration": { "Expiration": {
"Days": 30 "Days": 14
}, },
"Filter": { "Filter": {
"Prefix": "" "Prefix": ""

View File

@@ -1,7 +1,16 @@
#!/bin/bash #!/bin/bash
# awsses_lambda_global.sh - SES Setup mit S3 + Global Lambda Shim -> SQS # awsses.sh - SES Setup mit S3 + Global Lambda Shim -> SQS
# Dieses Skript ist idempotent: Es kann sicher mehrfach ausgeführt werden. # Dieses Skript ist idempotent: Es kann sicher mehrfach ausgeführt werden.
# Globale Lambda für alle Domains. # Globale Lambda für alle Domains.
#
# MAIL FROM Subdomain:
# Standard: mail.${DOMAIN_NAME}
# Override: export MAIL_FROM_SUBDOMAIN="mailfrom" (nur der Prefix, ohne Domain)
#
# Beispiel:
# export DOMAIN_NAME="buddelectric.net"
# export MAIL_FROM_SUBDOMAIN="mailfrom" # → mailfrom.buddelectric.net
# ./awsses.sh
set -e set -e
@@ -21,6 +30,10 @@ AWS_REGION=${AWS_REGION:-"us-east-2"}
EMAIL_PREFIX=${EMAIL_PREFIX:-""} EMAIL_PREFIX=${EMAIL_PREFIX:-""}
CONFIGURATION_SET_NAME="relay-outbound" CONFIGURATION_SET_NAME="relay-outbound"
# MAIL FROM Subdomain (konfigurierbar)
MAIL_FROM_SUBDOMAIN=${MAIL_FROM_SUBDOMAIN:-"mail"}
MAIL_FROM_DOMAIN="${MAIL_FROM_SUBDOMAIN}.${DOMAIN_NAME}"
# Bucket Name generieren falls leer # Bucket Name generieren falls leer
if [ -z "$S3_BUCKET_NAME" ]; then if [ -z "$S3_BUCKET_NAME" ]; then
S3_BUCKET_NAME=$(echo "$DOMAIN_NAME" | tr '.' '-' | awk '{print $0 "-emails"}') S3_BUCKET_NAME=$(echo "$DOMAIN_NAME" | tr '.' '-' | awk '{print $0 "-emails"}')
@@ -35,6 +48,7 @@ LAMBDA_ROLE_NAME="SesShimGlobalRole"
echo "==========================================================" echo "=========================================================="
echo " SES Setup (S3 -> Global Lambda Shim -> SQS) für $DOMAIN_NAME" echo " SES Setup (S3 -> Global Lambda Shim -> SQS) für $DOMAIN_NAME"
echo " MAIL FROM: $MAIL_FROM_DOMAIN"
echo "==========================================================" echo "=========================================================="
# --------------------------------------------------------- # ---------------------------------------------------------
@@ -46,9 +60,11 @@ if ! aws sesv2 get-email-identity --email-identity ${DOMAIN_NAME} --region ${AWS
fi fi
# Update Attributes (Idempotent) # Update Attributes (Idempotent)
aws sesv2 put-email-identity-dkim-attributes --email-identity ${DOMAIN_NAME} --signing-enabled --region ${AWS_REGION} aws sesv2 put-email-identity-dkim-attributes --email-identity ${DOMAIN_NAME} --signing-enabled --region ${AWS_REGION}
aws sesv2 put-email-identity-mail-from-attributes --email-identity ${DOMAIN_NAME} --mail-from-domain "mail.${DOMAIN_NAME}" --behavior-on-mx-failure USE_DEFAULT_VALUE --region ${AWS_REGION} aws sesv2 put-email-identity-mail-from-attributes --email-identity ${DOMAIN_NAME} --mail-from-domain "${MAIL_FROM_DOMAIN}" --behavior-on-mx-failure USE_DEFAULT_VALUE --region ${AWS_REGION}
aws sesv2 put-email-identity-configuration-set-attributes --email-identity ${DOMAIN_NAME} --configuration-set-name "$CONFIGURATION_SET_NAME" --region ${AWS_REGION} aws sesv2 put-email-identity-configuration-set-attributes --email-identity ${DOMAIN_NAME} --configuration-set-name "$CONFIGURATION_SET_NAME" --region ${AWS_REGION}
echo " -> MAIL FROM Domain: ${MAIL_FROM_DOMAIN}"
# --------------------------------------------------------- # ---------------------------------------------------------
# 2. SQS Queue holen (nur zur Validierung, Lambda holt dynamisch) # 2. SQS Queue holen (nur zur Validierung, Lambda holt dynamisch)
# --------------------------------------------------------- # ---------------------------------------------------------
@@ -149,7 +165,7 @@ RULE_JSON=$(jq -n \
--arg larn "$LAMBDA_ARN" \ --arg larn "$LAMBDA_ARN" \
--arg rule "$RULE_NAME" \ --arg rule "$RULE_NAME" \
--arg domain "$DOMAIN_NAME" \ --arg domain "$DOMAIN_NAME" \
--arg subdomain "mail.$DOMAIN_NAME" \ --arg subdomain "${MAIL_FROM_DOMAIN}" \
'{ '{
Name: $rule, Name: $rule,
Enabled: true, Enabled: true,
@@ -181,7 +197,12 @@ else
fi fi
# Aktivieren # Aktivieren
aws ses set-active-receipt-rule-set --rule-set-name "bizmatch-ruleset" --region ${AWS_REGION} aws ses set-active-receipt-rule-set --rule-set-name "bizmatch-ruleset" --region ${AWS_REGION}
echo "========================================================" echo "========================================================"
echo "✅ Setup erfolgreich. Globale Lambda ($LAMBDA_NAME) für alle Domains." echo "✅ Setup erfolgreich."
echo " S3 -> Lambda -> Domain-spezifische SQS" echo " Domain: $DOMAIN_NAME"
echo " MAIL FROM: $MAIL_FROM_DOMAIN"
echo " S3 Bucket: $S3_BUCKET_NAME"
echo " SQS Queue: $QUEUE_NAME"
echo " Lambda: $LAMBDA_NAME (global)"
echo "========================================================" echo "========================================================"

View File

@@ -1,156 +1,171 @@
#!/bin/bash #!/bin/bash
# Cloudflare API-Konfiguration # ==========================================
# Setze deine API-Schlüssel und Zone-ID als Umgebungsvariablen oder ersetze sie direkt # KONFIGURATION
# ==========================================
AWS_REGION="us-east-2"
# CADDY_SERVER_IP="DEINE_CADDY_IP_HIER" # WICHTIG: IP deines Caddy Servers eintragen
# MAIL_SERVER_HOSTNAME="mail.email-srvr.com" # Der Server, mit dem sich Outlook/iPhone verbinden
# CF_ZONE_ID="1b7756cee93ed8ba8c05bdc3cb0a5da8" # Die Zone-ID deiner Domain bei Cloudflare
AWS_REGION="us-east-2" # AWS-Region
if [ -z "$DOMAIN_NAME" ]; then if [ -z "$DOMAIN_NAME" ]; then
echo "Fehler: DOMAIN_NAME ist nicht gesetzt." echo "Fehler: DOMAIN_NAME ist nicht gesetzt (z.B. export DOMAIN_NAME='bayarea-cc.com')."
echo "Bitte setzen Sie die Variable mit: export DOMAIN_NAME='IhreDomain.de'" exit 1
exit 1 # Skript mit Fehlercode beenden
fi fi
# Überprüfen, ob der erforderliche API-Token gesetzt ist
if [ -z "$CF_API_TOKEN" ]; then if [ -z "$CF_API_TOKEN" ]; then
echo "Fehler: Bitte setze CF_API_TOKEN als Umgebungsvariable oder im Skript." echo "Fehler: CF_API_TOKEN fehlt."
exit 1
fi
if [ -z "$CADDY_SERVER_IP" ]; then
echo "Fehler: CADDY_SERVER_IP fehlt. Bitte im Skript eintragen."
exit 1 exit 1
fi fi
# Zone ID basierend auf Domain-Namen abrufen # Fallback für Mailserver Variable
if [ -z "$MAIL_SERVER_HOSTNAME" ]; then
MAIL_SERVER_HOSTNAME="mail.email-srvr.com"
fi
# ==========================================
# ZONE ID ERMITTELN
# ==========================================
echo "Zone ID für $DOMAIN_NAME abrufen..." echo "Zone ID für $DOMAIN_NAME abrufen..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN_NAME" \ ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" \ -H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type: application/json") -H "Content-Type: application/json")
# Überprüfen, ob die Antwort erfolgreich war
if [ "$(echo $ZONE_RESPONSE | jq -r '.success')" != "true" ]; then if [ "$(echo $ZONE_RESPONSE | jq -r '.success')" != "true" ]; then
echo "Fehler beim Abrufen der Zone ID:" echo "Fehler beim Abrufen der Zone ID:"
echo $ZONE_RESPONSE | jq . echo $ZONE_RESPONSE | jq .
exit 1 exit 1
fi fi
# Zone ID extrahieren
CF_ZONE_ID=$(echo $ZONE_RESPONSE | jq -r '.result[0].id') CF_ZONE_ID=$(echo $ZONE_RESPONSE | jq -r '.result[0].id')
echo "Zone ID: $CF_ZONE_ID"
# Überprüfen, ob eine Zone ID gefunden wurde # ==========================================
if [ -z "$CF_ZONE_ID" ] || [ "$CF_ZONE_ID" = "null" ]; then # FUNKTIONEN
echo "Keine Zone ID für $DOMAIN_NAME gefunden. Bitte stelle sicher, dass die Domain bei Cloudflare registriert ist." # ==========================================
exit 1
fi
echo "Zone ID für $DOMAIN_NAME: $CF_ZONE_ID"
# Hilfsfunktion für DNS-Einträge anlegen
create_dns_record() { create_dns_record() {
local TYPE=$1 local TYPE=$1
local NAME=$2 local NAME=$2
local CONTENT=$3 local CONTENT=$3
local PROXIED=$4 local PROXIED=$4
local TTL=$5 local TTL=$5
local PRIORITY=$6 # Neu: MX-Priority local PRIORITY=$6
# Standardwerte für Proxied und TTL setzen, falls nicht angegeben if [ -z "$PROXIED" ]; then PROXIED="false"; fi
if [ -z "$PROXIED" ]; then if [ -z "$TTL" ]; then TTL=3600; fi
PROXIED="false"
fi
if [ -z "$TTL" ]; then
TTL=3600 # 1 Stunde
fi
echo "Erstelle $TYPE-Eintrag für $NAME mit Inhalt $CONTENT..." echo "Erstelle $TYPE-Eintrag für $NAME..."
# Json Payload vorbereiten abhängig vom Record-Typ
local JSON_DATA="" local JSON_DATA=""
if [ "$TYPE" = "MX" ]; then if [ "$TYPE" = "MX" ]; then
# Bei MX-Einträgen müssen wir die Priority separat angeben if [ -z "$PRIORITY" ]; then PRIORITY=10; fi
if [ -z "$PRIORITY" ]; then
PRIORITY=10 # Standard-Priority, falls nicht angegeben
fi
JSON_DATA="{ JSON_DATA="{
\"type\": \"$TYPE\", \"type\": \"$TYPE\", \"name\": \"$NAME\", \"content\": \"$CONTENT\",
\"name\": \"$NAME\", \"ttl\": $TTL, \"priority\": $PRIORITY, \"proxied\": $PROXIED
\"content\": \"$CONTENT\",
\"ttl\": $TTL,
\"priority\": $PRIORITY,
\"proxied\": $PROXIED
}" }"
elif [ "$TYPE" = "TXT" ]; then elif [ "$TYPE" = "TXT" ]; then
# Bei TXT-Einträgen müssen wir sicherstellen, dass der Inhalt in Anführungszeichen steht
# Aber Anführungszeichen innerhalb von JSON müssen escaped werden
# Wir entfernen zuerst alle vorhandenen Anführungszeichen und fügen sie dann korrekt hinzu
CONTENT=$(echo "$CONTENT" | sed 's/"//g') CONTENT=$(echo "$CONTENT" | sed 's/"//g')
JSON_DATA="{ JSON_DATA="{
\"type\": \"$TYPE\", \"type\": \"$TYPE\", \"name\": \"$NAME\", \"content\": \"\\\"$CONTENT\\\"\",
\"name\": \"$NAME\", \"ttl\": $TTL, \"proxied\": $PROXIED
\"content\": \"\\\"$CONTENT\\\"\",
\"ttl\": $TTL,
\"proxied\": $PROXIED
}" }"
else else
# Für alle anderen Record-Typen (z.B. CNAME)
JSON_DATA="{ JSON_DATA="{
\"type\": \"$TYPE\", \"type\": \"$TYPE\", \"name\": \"$NAME\", \"content\": \"$CONTENT\",
\"name\": \"$NAME\", \"ttl\": $TTL, \"proxied\": $PROXIED
\"content\": \"$CONTENT\",
\"ttl\": $TTL,
\"proxied\": $PROXIED
}" }"
fi fi
# API-Aufruf an Cloudflare
curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records" \ curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records" \
-H "Authorization: Bearer $CF_API_TOKEN" \ -H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
--data "$JSON_DATA" | jq . --data "$JSON_DATA" | jq -r '.success'
} }
# DKIM-Einträge abrufen und bei Cloudflare eintragen create_srv_record() {
echo "DKIM-Tokens abrufen von AWS SES..." local SERVICE=$1 # z.B. _imap
DKIM_TOKENS=$(aws ses get-identity-dkim-attributes \ local PROTO=$2 # z.B. _tcp
--identities ${DOMAIN_NAME} \ local PORT=$3 # z.B. 993
--region ${AWS_REGION} \ local TARGET=$4 # z.B. mail.email-srvr.com
--query "DkimAttributes.\"${DOMAIN_NAME}\".DkimTokens" \ local NAME="${SERVICE}.${PROTO}.${DOMAIN_NAME}"
--output text)
# Überprüfen, ob DKIM-Tokens abgerufen wurden echo "Erstelle SRV-Eintrag für $NAME -> $TARGET:$PORT..."
if [ -z "$DKIM_TOKENS" ]; then
echo "Fehler: Konnte DKIM-Tokens nicht abrufen. Ist die Domain bei AWS SES verifiziert?" local JSON_DATA="{
exit 1 \"type\": \"SRV\",
\"name\": \"$NAME\",
\"data\": {
\"service\": \"$SERVICE\",
\"proto\": \"$PROTO\",
\"name\": \"$DOMAIN_NAME\",
\"priority\": 0,
\"weight\": 1,
\"port\": $PORT,
\"target\": \"$TARGET\"
},
\"ttl\": 3600
}"
curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records" \
-H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type: application/json" \
--data "$JSON_DATA" | jq -r '.success'
}
# ==========================================
# 1. AWS SES Setup (DKIM & Verifizierung)
# ==========================================
echo "--- AWS SES Konfiguration ---"
DKIM_TOKENS=$(aws ses get-identity-dkim-attributes \
--identities ${DOMAIN_NAME} --region ${AWS_REGION} \
--query "DkimAttributes.\"${DOMAIN_NAME}\".DkimTokens" --output text)
VERIFICATION_TOKEN=$(aws ses get-identity-verification-attributes \
--identities ${DOMAIN_NAME} --region ${AWS_REGION} \
--query "VerificationAttributes.\"${DOMAIN_NAME}\".VerificationToken" --output text)
if [ -n "$DKIM_TOKENS" ]; then
for TOKEN in ${DKIM_TOKENS}; do
create_dns_record "CNAME" "${TOKEN}._domainkey.${DOMAIN_NAME}" "${TOKEN}.dkim.amazonses.com" "false"
done
fi fi
# Domain-Verifizierungstoken abrufen if [ -n "$VERIFICATION_TOKEN" ]; then
VERIFICATION_TOKEN=$(aws ses get-identity-verification-attributes \ create_dns_record "TXT" "_amazonses.${DOMAIN_NAME}" "${VERIFICATION_TOKEN}" "false"
--identities ${DOMAIN_NAME} \ fi
--region ${AWS_REGION} \
--query "VerificationAttributes.\"${DOMAIN_NAME}\".VerificationToken" \
--output text)
# DKIM-Einträge anlegen # ==========================================
echo "DKIM-Einträge anlegen bei Cloudflare..." # 2. MX Records (AWS SES Ingest)
for TOKEN in ${DKIM_TOKENS}; do # ==========================================
create_dns_record "CNAME" "${TOKEN}._domainkey.${DOMAIN_NAME}" "${TOKEN}.dkim.amazonses.com" "false" 3600 echo "--- MX Records (AWS SES) ---"
done # Hier leiten wir eingehende Mails an Amazon S3/SQS Pipeline
# Domain-Verifizierungs-TXT-Eintrag anlegen
echo "Domain-Verifizierungs-TXT-Eintrag anlegen bei Cloudflare..."
create_dns_record "TXT" "_amazonses.${DOMAIN_NAME}" "${VERIFICATION_TOKEN}" "false" 3600
# MX-Einträge anlegen
echo "MX-Einträge anlegen bei Cloudflare..."
create_dns_record "MX" "${DOMAIN_NAME}" "inbound-smtp.${AWS_REGION}.amazonaws.com" "false" 3600 10 create_dns_record "MX" "${DOMAIN_NAME}" "inbound-smtp.${AWS_REGION}.amazonaws.com" "false" 3600 10
create_dns_record "MX" "mail.${DOMAIN_NAME}" "feedback-smtp.${AWS_REGION}.amazonses.com" "false" 3600 10
# SPF-Eintrag anlegen
echo "SPF-Eintrag anlegen bei Cloudflare..."
create_dns_record "TXT" "mail.${DOMAIN_NAME}" "v=spf1 include:amazonses.com ~all" "false" 3600
# DMARC-Eintrag anlegen # ==========================================
echo "DMARC-Eintrag anlegen bei Cloudflare..." # 3. Autodiscover & Caddy (Client Access)
create_dns_record "TXT" "_dmarc.${DOMAIN_NAME}" "v=DMARC1; p=none; pct=100; rua=mailto:postmaster@${DOMAIN_NAME}" "false" 3600 # ==========================================
echo "--- Autodiscover & Caddy Konfiguration ---"
echo "DNS-Einrichtung abgeschlossen." # A-Records: Autodiscover Domains zeigen auf deinen Caddy
echo "Es kann bis zu 72 Stunden dauern, bis AWS SES die Domain verifiziert hat." create_dns_record "A" "autodiscover.${DOMAIN_NAME}" "$CADDY_SERVER_IP" "false"
create_dns_record "A" "autoconfig.${DOMAIN_NAME}" "$CADDY_SERVER_IP" "false"
# ==========================================
# 4. SPF & DMARC
# ==========================================
echo "--- E-Mail Sicherheit (SPF & DMARC) ---"
# SPF: Nur Amazon SES erlaubt (Versand läuft darüber)
create_dns_record "TXT" "${DOMAIN_NAME}" "v=spf1 include:amazonses.com ~all" "false"
create_dns_record "TXT" "mail.${DOMAIN_NAME}" "v=spf1 include:amazonses.com ~all" "false"
# DMARC: Standard Policy
create_dns_record "TXT" "_dmarc.${DOMAIN_NAME}" "v=DMARC1; p=none; pct=100; rua=mailto:postmaster@${DOMAIN_NAME}" "false"
echo "Fertig. Konfiguration für $DOMAIN_NAME abgeschlossen."

View File

@@ -0,0 +1,329 @@
#!/bin/bash
# cloudflareMigrationDns.sh
# Setzt DNS Records für Amazon SES Migration + Cloudflare
# Unterstützt: DKIM, SPF (Merge), DMARC, MX, Autodiscover
# Setzt mail/imap/smtp/pop Subdomains für domain-spezifischen Mailserver-Zugang
#
# MIGRATIONS-FLAGS:
# SKIP_CLIENT_DNS=true → Abschnitt 8 (imap/smtp/pop/webmail) + 10 (SRV) überspringen
# Nutzen: Client-Subdomains bleiben beim alten Provider
# SKIP_DMARC=true → Abschnitt 7 (DMARC) überspringen
# Nutzen: Bestehenden DMARC-Record nicht anfassen
#
# Typischer Migrations-Ablauf:
# Phase 0 (Vorbereitung): SKIP_CLIENT_DNS=true SKIP_DMARC=true → nur SES + SPF
# Phase 1 (MX Cutover): MX umstellen (manuell)
# Phase 2 (Client Switch): ohne SKIP Flags → alle Records setzen
set -e
# --- KONFIGURATION ---
AWS_REGION=${AWS_REGION:-"us-east-2"}
DRY_RUN=${DRY_RUN:-"false"}
# Migrations-Flags (NEU)
SKIP_CLIENT_DNS=${SKIP_CLIENT_DNS:-"false"}
SKIP_DMARC=${SKIP_DMARC:-"false"}
# IP des Mailservers - PFLICHT wenn keine CNAME-Kette gewünscht
MAIL_SERVER_IP=${MAIL_SERVER_IP:-""}
# Ziel-Server für Mailclients. Standard: mail.<kundendomain>
TARGET_MAIL_SERVER=${TARGET_MAIL_SERVER:-"mail.${DOMAIN_NAME}"}
# --- CHECKS ---
if [ -z "$DOMAIN_NAME" ]; then echo "❌ Fehler: DOMAIN_NAME fehlt."; exit 1; fi
if [ -z "$CF_API_TOKEN" ]; then echo "❌ Fehler: CF_API_TOKEN fehlt."; exit 1; fi
if ! command -v jq &> /dev/null; then echo "❌ Fehler: 'jq' fehlt."; exit 1; fi
if ! command -v aws &> /dev/null; then echo "❌ Fehler: 'aws' CLI fehlt."; exit 1; fi
if [ -z "$MAIL_SERVER_IP" ] && [ "$TARGET_MAIL_SERVER" == "mail.$DOMAIN_NAME" ]; then
echo "⚠️ WARNUNG: MAIL_SERVER_IP ist nicht gesetzt!"
echo " mail.$DOMAIN_NAME braucht einen A-Record."
echo " Setze: export MAIL_SERVER_IP=<deine-server-ip>"
# Kein exit - Abschnitt 8 wird ggf. übersprungen
fi
echo "============================================================"
echo " 🛡️ DNS Migration Setup für: $DOMAIN_NAME"
echo " 🌍 Region: $AWS_REGION"
echo " 📬 Mail-Server Target: $TARGET_MAIL_SERVER"
[ -n "$MAIL_SERVER_IP" ] && echo " 🖥️ Server IP: $MAIL_SERVER_IP"
[ "$DRY_RUN" = "true" ] && echo " ⚠️ DRY RUN MODE - Keine Änderungen!"
[ "$SKIP_CLIENT_DNS" = "true" ] && echo " ⏭️ SKIP: Client-Subdomains (imap/smtp/pop/webmail/SRV)"
[ "$SKIP_DMARC" = "true" ] && echo " ⏭️ SKIP: DMARC Record"
echo "============================================================"
# 1. ZONE ID HOLEN
echo "🔍 Suche Cloudflare Zone ID..."
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" | jq -r '.result[0].id')
if [ "$ZONE_ID" == "null" ] || [ -z "$ZONE_ID" ]; then
echo "❌ Zone nicht gefunden."
exit 1
fi
echo " ✅ Zone ID: $ZONE_ID"
# ------------------------------------------------------------------
# FUNKTION: ensure_record
# Prüft Existenz -> Create oder Update (je nach Typ)
# ------------------------------------------------------------------
ensure_record() {
local type=$1
local name=$2
local content=$3
local proxied=${4:-false}
local priority=$5 # Optional für MX
echo " ⚙️ Prüfe $type $name..."
local search_res=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=$type&name=$name" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json")
local rec_id
local rec_content
if [ "$type" == "TXT" ] && [ "$name" == "$DOMAIN_NAME" ] && [[ "$content" == v=spf1* ]]; then
# Spezialfall Root-Domain SPF: Filtere gezielt den SPF-Eintrag heraus,
# damit z.B. Google Site Verification nicht überschrieben wird.
rec_id=$(echo "$search_res" | jq -r '.result[] | select(.content | contains("v=spf1")) | .id' | head -n 1)
rec_content=$(echo "$search_res" | jq -r '.result[] | select(.content | contains("v=spf1")) | .content' | head -n 1)
else
# Standardverhalten für alle anderen (A, CNAME, MX, etc.)
rec_id=$(echo "$search_res" | jq -r '.result[0].id')
rec_content=$(echo "$search_res" | jq -r '.result[0].content')
fi
# Fallback für jq, damit das restliche Skript funktioniert
[ -z "$rec_id" ] && rec_id="null"
[ -z "$rec_content" ] && rec_content="null"
if [ "$type" == "MX" ]; then
json_data=$(jq -n --arg t "$type" --arg n "$name" --arg c "$content" --argjson p "$proxied" --argjson prio "$priority" \
'{type: $t, name: $n, content: $c, ttl: 3600, proxied: $p, priority: $prio}')
else
json_data=$(jq -n --arg t "$type" --arg n "$name" --arg c "$content" --argjson p "$proxied" \
'{type: $t, name: $n, content: $c, ttl: 3600, proxied: $p}')
fi
if [ "$rec_id" == "null" ]; then
if [ "$DRY_RUN" = "true" ]; then
echo " [DRY] Würde ERSTELLEN: $content"
else
res=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" --data "$json_data")
if [ "$(echo $res | jq -r .success)" == "true" ]; then
echo " ✅ Erstellt."
else
echo " ❌ Fehler beim Erstellen: $(echo $res | jq -r '.errors[0].message')"
fi
fi
else
if [ "$rec_content" == "$content" ]; then
echo " 🆗 Identisch. Überspringe."
else
if [ "$type" == "MX" ] && [ "$name" == "$DOMAIN_NAME" ]; then
echo " ⛔ Root-MX existiert aber ist anders: $rec_content"
echo " → Wird NICHT automatisch geändert (Migrations-Schutz)"
return
fi
if [ "$DRY_RUN" = "true" ]; then
echo " [DRY] Würde UPDATEN: '$rec_content' → '$content'"
else
res=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$rec_id" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" --data "$json_data")
if [ "$(echo $res | jq -r .success)" == "true" ]; then
echo " ✅ Aktualisiert."
else
echo " ❌ Fehler beim Updaten: $(echo $res | jq -r '.errors[0].message')"
fi
fi
fi
fi
}
# ------------------------------------------------------------------
# SCHRITT 1: MAIL FROM Domain (aus SES lesen)
# ------------------------------------------------------------------
echo ""
echo "--- 1. MAIL FROM Domain ---"
MAIL_FROM_DOMAIN=$(aws sesv2 get-email-identity \
--email-identity "$DOMAIN_NAME" \
--region "$AWS_REGION" \
--query 'MailFromAttributes.MailFromDomain' \
--output text 2>/dev/null || echo "NONE")
if [ "$MAIL_FROM_DOMAIN" == "NONE" ] || [ "$MAIL_FROM_DOMAIN" == "None" ] || [ -z "$MAIL_FROM_DOMAIN" ]; then
echo " Keine MAIL FROM Domain in SES konfiguriert."
echo " → Überspringe MAIL FROM DNS Setup."
MAIL_FROM_DOMAIN=""
fi
# ------------------------------------------------------------------
# SCHRITT 2: DKIM Records
# ------------------------------------------------------------------
echo ""
echo "--- 2. DKIM Records ---"
DKIM_TOKENS=$(aws sesv2 get-email-identity \
--email-identity "$DOMAIN_NAME" \
--region "$AWS_REGION" \
--query 'DkimAttributes.Tokens' \
--output text 2>/dev/null || echo "")
if [ -n "$DKIM_TOKENS" ] && [ "$DKIM_TOKENS" != "None" ]; then
for TOKEN in $DKIM_TOKENS; do
ensure_record "CNAME" "${TOKEN}._domainkey.${DOMAIN_NAME}" "${TOKEN}.dkim.amazonses.com" false
done
else
echo " ⚠️ Keine DKIM Tokens gefunden. SES Identity angelegt?"
fi
# ------------------------------------------------------------------
# SCHRITT 3: SES Verification TXT
# ------------------------------------------------------------------
echo ""
echo "--- 3. SES Verification TXT ---"
VERIFICATION_TOKEN=$(aws ses get-identity-verification-attributes \
--identities "$DOMAIN_NAME" \
--region "$AWS_REGION" \
--query "VerificationAttributes.\"${DOMAIN_NAME}\".VerificationToken" \
--output text 2>/dev/null || echo "")
if [ -n "$VERIFICATION_TOKEN" ] && [ "$VERIFICATION_TOKEN" != "None" ]; then
ensure_record "TXT" "_amazonses.${DOMAIN_NAME}" "$VERIFICATION_TOKEN" false
else
echo " ⚠️ Kein Verification Token. SES Identity angelegt?"
fi
# ------------------------------------------------------------------
# SCHRITT 4: MAIL FROM Subdomain (MX + SPF)
# ------------------------------------------------------------------
echo ""
echo "--- 4. MAIL FROM Subdomain (${MAIL_FROM_DOMAIN:-'nicht konfiguriert'}) ---"
if [ -n "$MAIL_FROM_DOMAIN" ]; then
# Prüfe ob CNAME-Konflikt auf der MAIL FROM Subdomain existiert
CNAME_CHECK=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=CNAME&name=$MAIL_FROM_DOMAIN" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" | jq -r '.result[0].content')
if [ "$CNAME_CHECK" != "null" ] && [ -n "$CNAME_CHECK" ]; then
echo " ⛔ CNAME-Konflikt! $MAIL_FROM_DOMAIN hat CNAME → $CNAME_CHECK"
echo " MX + TXT können nicht neben CNAME existieren."
echo " → awsses.sh mit anderem MAIL_FROM_SUBDOMAIN erneut ausführen"
exit 1
fi
ensure_record "MX" "$MAIL_FROM_DOMAIN" "feedback-smtp.${AWS_REGION}.amazonses.com" false 10
ensure_record "TXT" "$MAIL_FROM_DOMAIN" "v=spf1 include:amazonses.com ~all" false
else
echo " Übersprungen (keine MAIL FROM Domain konfiguriert)."
fi
# ------------------------------------------------------------------
# SCHRITT 5: Root Domain SPF (Merge mit altem Provider)
# ------------------------------------------------------------------
echo ""
echo "--- 5. Root Domain SPF ---"
# Aktuellen SPF-Record lesen
# Cloudflare liefert TXT-Content manchmal mit Anführungszeichen,
# daher erst alle TXT-Records holen und dann filtern
CURRENT_SPF=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=TXT&name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" \
| jq -r '[.result[] | select(.content | gsub("^\"|\"$";"") | startswith("v=spf1"))][0].content // ""')
# Anführungszeichen sofort entfernen
CURRENT_SPF=$(echo "$CURRENT_SPF" | tr -d '"')
if [ -n "$CURRENT_SPF" ]; then
echo " 📋 Aktueller SPF: $CURRENT_SPF"
# Prüfe ob amazonses.com schon drin ist
if echo "$CURRENT_SPF" | grep -q "include:amazonses.com"; then
echo " 🆗 SPF enthält bereits include:amazonses.com"
else
# amazonses.com einfügen direkt nach v=spf1
NEW_SPF=$(echo "$CURRENT_SPF" | sed 's/v=spf1 /v=spf1 include:amazonses.com /')
# ?all → ~all upgraden
NEW_SPF=$(echo "$NEW_SPF" | sed 's/?all/~all/')
echo " 📝 Neuer SPF: $NEW_SPF"
ensure_record "TXT" "$DOMAIN_NAME" "$NEW_SPF" false
fi
else
echo " Kein SPF Record vorhanden. Erstelle neuen."
ensure_record "TXT" "$DOMAIN_NAME" "v=spf1 include:amazonses.com ~all" false
fi
# ------------------------------------------------------------------
# SCHRITT 6: Root Domain MX (nur Info, wird nicht geändert)
# ------------------------------------------------------------------
echo ""
echo "--- 6. Root Domain MX (nur Info, wird nicht geändert) ---"
CURRENT_MX=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=MX&name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" \
| jq -r '.result[0].content // "keiner"')
echo " MX vorhanden: $CURRENT_MX (wird nicht geändert)"
# ------------------------------------------------------------------
# SCHRITT 7: DMARC
# ------------------------------------------------------------------
echo ""
echo "--- 7. DMARC ---"
if [ "$SKIP_DMARC" = "true" ]; then
echo " ⏭️ Übersprungen (SKIP_DMARC=true)"
echo " Bestehender DMARC-Record bleibt unverändert."
else
ensure_record "TXT" "_dmarc.$DOMAIN_NAME" "v=DMARC1; p=none; rua=mailto:postmaster@$DOMAIN_NAME" false
fi
# ------------------------------------------------------------------
# SCHRITT 8: Mailclient Subdomains (A + CNAME)
# ------------------------------------------------------------------
echo ""
echo "--- 8. Mailclient Subdomains (A + CNAME) ---"
if [ "$SKIP_CLIENT_DNS" = "true" ]; then
echo " ⏭️ Übersprungen (SKIP_CLIENT_DNS=true)"
echo " imap/smtp/pop/webmail bleiben beim alten Provider."
echo " Setze SKIP_CLIENT_DNS=false nach MX-Cutover + Client-Umstellung."
else
if [ -n "$MAIL_SERVER_IP" ]; then
# A-Record für mail.<domain> direkt auf Server-IP
ensure_record "A" "mail.$DOMAIN_NAME" "$MAIL_SERVER_IP" false
else
# CNAME auf externen Ziel-Host (nur wenn verschieden)
if [ "$TARGET_MAIL_SERVER" != "mail.$DOMAIN_NAME" ]; then
ensure_record "CNAME" "mail.$DOMAIN_NAME" "$TARGET_MAIL_SERVER" false
fi
fi
# imap, smtp, pop, webmail → CNAME auf mail.<domain>
ensure_record "CNAME" "imap.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "smtp.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "pop.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "webmail.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
fi
# ------------------------------------------------------------------
# SCHRITT 9: Autodiscover / Autoconfig
# ------------------------------------------------------------------
echo ""
echo "--- 9. Autodiscover / Autoconfig ---"
ensure_record "CNAME" "autodiscover.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "autoconfig.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
echo ""
echo "============================================================"
echo "✅ Fertig für Domain: $DOMAIN_NAME"
if [ "$SKIP_CLIENT_DNS" = "true" ]; then
echo ""
echo " ⚠️ Client-Subdomains wurden NICHT geändert."
echo " Nach MX-Cutover + Worker-Validierung erneut ausführen mit:"
echo " SKIP_CLIENT_DNS=false SKIP_DMARC=false ./cloudflareMigrationDns.sh"
fi
echo ""
echo " Mailclient-Konfiguration für Kunden:"
echo " IMAP: imap.$DOMAIN_NAME Port 993 (SSL)"
echo " SMTP: smtp.$DOMAIN_NAME Port 587 (STARTTLS) oder 465 (SSL)"
echo " POP3: pop.$DOMAIN_NAME Port 995 (SSL)"
echo " Webmail: webmail.$DOMAIN_NAME"
echo "============================================================"

View File

@@ -1,55 +1,58 @@
#!/bin/bash #!/bin/bash
# create-queue.sh # create-queue.sh (v2 — mit SNS Fan-Out + Standby Queue)
# Usage: DOMAIN=andreasknuth.de ./create-queue.sh # Usage: DOMAIN=andreasknuth.de ./create-queue.sh
#
# Erstellt pro Domain:
# - Primary Queue + DLQ (wie bisher, für Contabo)
# - Standby Queue + DLQ (NEU, für Office-VM)
# - SNS Topic (NEU, Fan-Out)
# - 2 SNS Subscriptions (NEU, Topic → Primary + Standby)
set -e set -e
AWS_REGION="us-east-2" AWS_REGION="us-east-2"
# Domain aus Environment Variable
if [ -z "$DOMAIN" ]; then if [ -z "$DOMAIN" ]; then
echo "Error: DOMAIN environment variable not set" echo "Error: DOMAIN environment variable not set"
echo "Usage: DOMAIN=andreasknuth.de $0" echo "Usage: DOMAIN=andreasknuth.de $0"
exit 1 exit 1
fi fi
QUEUE_NAME="${DOMAIN//./-}-queue" DOMAIN_SLUG="${DOMAIN//./-}"
QUEUE_NAME="${DOMAIN_SLUG}-queue"
DLQ_NAME="${QUEUE_NAME}-dlq" DLQ_NAME="${QUEUE_NAME}-dlq"
STANDBY_QUEUE_NAME="${DOMAIN_SLUG}-standby-queue"
STANDBY_DLQ_NAME="${STANDBY_QUEUE_NAME}-dlq"
TOPIC_NAME="${DOMAIN_SLUG}-topic"
ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
echo "========================================" echo "========================================"
echo "Creating SQS Queue for Email Delivery" echo "Creating SQS + SNS for Email Delivery"
echo "========================================" echo "========================================"
echo "" echo ""
echo "📧 Domain: $DOMAIN" echo "📧 Domain: $DOMAIN"
echo " Region: $AWS_REGION" echo " Region: $AWS_REGION"
echo " Account: $ACCOUNT_ID"
echo "" echo ""
# Dead Letter Queue erstellen # ============================================================
# 1. Primary DLQ + Queue (wie bisher)
# ============================================================
echo "━━━ Primary Queue (Contabo) ━━━"
echo "Creating DLQ: $DLQ_NAME" echo "Creating DLQ: $DLQ_NAME"
DLQ_URL=$(aws sqs create-queue \ DLQ_URL=$(aws sqs create-queue \
--queue-name "${DLQ_NAME}" \ --queue-name "${DLQ_NAME}" \
--region "${AWS_REGION}" \ --region "${AWS_REGION}" \
--attributes '{ --attributes '{"MessageRetentionPeriod": "1209600"}' \
"MessageRetentionPeriod": "1209600" --query 'QueueUrl' --output text 2>/dev/null \
}' \ || aws sqs get-queue-url --queue-name "${DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
--query 'QueueUrl' \ DLQ_ARN=$(aws sqs get-queue-attributes --queue-url "${DLQ_URL}" --region "${AWS_REGION}" \
--output text 2>/dev/null || aws sqs get-queue-url --queue-name "${DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text) --attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ DLQ: ${DLQ_ARN}"
echo " ✓ DLQ URL: ${DLQ_URL}" echo "Creating Queue: $QUEUE_NAME"
# DLQ ARN ermitteln
DLQ_ARN=$(aws sqs get-queue-attributes \
--queue-url "${DLQ_URL}" \
--region "${AWS_REGION}" \
--attribute-names QueueArn \
--query 'Attributes.QueueArn' \
--output text)
echo " ✓ DLQ ARN: ${DLQ_ARN}"
echo ""
# Haupt-Queue erstellen mit Redrive Policy
echo "Creating Main Queue: $QUEUE_NAME"
QUEUE_URL=$(aws sqs create-queue \ QUEUE_URL=$(aws sqs create-queue \
--queue-name "${QUEUE_NAME}" \ --queue-name "${QUEUE_NAME}" \
--region "${AWS_REGION}" \ --region "${AWS_REGION}" \
@@ -59,18 +62,146 @@ QUEUE_URL=$(aws sqs create-queue \
\"ReceiveMessageWaitTimeSeconds\": \"20\", \"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\" \"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \ }" \
--query 'QueueUrl' \ --query 'QueueUrl' --output text 2>/dev/null \
--output text 2>/dev/null || aws sqs get-queue-url --queue-name "${QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text) || aws sqs get-queue-url --queue-name "${QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "${QUEUE_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Queue: ${QUEUE_ARN}"
echo ""
echo " ✓ Queue URL: ${QUEUE_URL}" # ============================================================
# 2. Standby DLQ + Queue (NEU)
# ============================================================
echo "━━━ Standby Queue (Office-VM) ━━━"
echo "Creating Standby DLQ: $STANDBY_DLQ_NAME"
STANDBY_DLQ_URL=$(aws sqs create-queue \
--queue-name "${STANDBY_DLQ_NAME}" \
--region "${AWS_REGION}" \
--attributes '{"MessageRetentionPeriod": "1209600"}' \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${STANDBY_DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
STANDBY_DLQ_ARN=$(aws sqs get-queue-attributes --queue-url "${STANDBY_DLQ_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Standby DLQ: ${STANDBY_DLQ_ARN}"
echo "Creating Standby Queue: $STANDBY_QUEUE_NAME"
STANDBY_QUEUE_URL=$(aws sqs create-queue \
--queue-name "${STANDBY_QUEUE_NAME}" \
--region "${AWS_REGION}" \
--attributes "{
\"VisibilityTimeout\": \"300\",
\"MessageRetentionPeriod\": \"86400\",
\"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${STANDBY_DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${STANDBY_QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
STANDBY_QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "${STANDBY_QUEUE_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Standby Queue: ${STANDBY_QUEUE_ARN}"
echo "" echo ""
# ============================================================
# 3. SNS Topic (NEU)
# ============================================================
echo "━━━ SNS Topic (Fan-Out) ━━━"
echo "Creating Topic: $TOPIC_NAME"
TOPIC_ARN=$(aws sns create-topic \
--name "${TOPIC_NAME}" \
--region "${AWS_REGION}" \
--query 'TopicArn' --output text)
echo " ✓ Topic: ${TOPIC_ARN}"
echo ""
# ============================================================
# 4. SNS → SQS Subscriptions (NEU)
# ============================================================
echo "━━━ Subscriptions ━━━"
# SNS braucht Berechtigung, in die SQS Queues zu schreiben
# Policy für Primary Queue
POLICY_PRIMARY="{
\"Version\": \"2012-10-17\",
\"Statement\": [{
\"Effect\": \"Allow\",
\"Principal\": {\"Service\": \"sns.amazonaws.com\"},
\"Action\": \"sqs:SendMessage\",
\"Resource\": \"${QUEUE_ARN}\",
\"Condition\": {\"ArnEquals\": {\"aws:SourceArn\": \"${TOPIC_ARN}\"}}
}]
}"
aws sqs set-queue-attributes \
--queue-url "${QUEUE_URL}" \
--region "${AWS_REGION}" \
--attributes "{\"Policy\": $(echo "$POLICY_PRIMARY" | jq -c '.' | jq -Rs '.')}" \
> /dev/null
echo " ✓ Primary Queue Policy gesetzt"
# Policy für Standby Queue
POLICY_STANDBY="{
\"Version\": \"2012-10-17\",
\"Statement\": [{
\"Effect\": \"Allow\",
\"Principal\": {\"Service\": \"sns.amazonaws.com\"},
\"Action\": \"sqs:SendMessage\",
\"Resource\": \"${STANDBY_QUEUE_ARN}\",
\"Condition\": {\"ArnEquals\": {\"aws:SourceArn\": \"${TOPIC_ARN}\"}}
}]
}"
aws sqs set-queue-attributes \
--queue-url "${STANDBY_QUEUE_URL}" \
--region "${AWS_REGION}" \
--attributes "{\"Policy\": $(echo "$POLICY_STANDBY" | jq -c '.' | jq -Rs '.')}" \
> /dev/null
echo " ✓ Standby Queue Policy gesetzt"
# Subscription: Topic → Primary Queue
SUB_PRIMARY=$(aws sns subscribe \
--topic-arn "${TOPIC_ARN}" \
--protocol sqs \
--notification-endpoint "${QUEUE_ARN}" \
--region "${AWS_REGION}" \
--attributes '{"RawMessageDelivery": "true"}' \
--query 'SubscriptionArn' --output text)
echo " ✓ Subscription Primary: ${SUB_PRIMARY}"
# Subscription: Topic → Standby Queue
SUB_STANDBY=$(aws sns subscribe \
--topic-arn "${TOPIC_ARN}" \
--protocol sqs \
--notification-endpoint "${STANDBY_QUEUE_ARN}" \
--region "${AWS_REGION}" \
--attributes '{"RawMessageDelivery": "true"}' \
--query 'SubscriptionArn' --output text)
echo " ✓ Subscription Standby: ${SUB_STANDBY}"
echo ""
# ============================================================
# Zusammenfassung
# ============================================================
echo "========================================" echo "========================================"
echo "✅ Queue created successfully!" echo "✅ Setup complete for $DOMAIN"
echo "========================================" echo "========================================"
echo "" echo ""
echo "Configuration:" echo "Primary (Contabo):"
echo " Domain: $DOMAIN" echo " Queue: $QUEUE_URL"
echo " Queue: $QUEUE_NAME" echo " DLQ: $DLQ_URL"
echo " Queue URL: $QUEUE_URL" echo ""
echo " DLQ: $DLQ_NAME" echo "Standby (Office-VM):"
echo " Region: $AWS_REGION" echo " Queue: $STANDBY_QUEUE_URL"
echo " DLQ: $STANDBY_DLQ_URL"
echo ""
echo "SNS Fan-Out:"
echo " Topic: $TOPIC_ARN"
echo " → Primary: $SUB_PRIMARY"
echo " → Standby: $SUB_STANDBY"
echo ""
echo "⚠️ Nächste Schritte:"
echo " 1. Lambda-Funktion updaten: sns.publish() statt sqs.send_message()"
echo " 2. Lambda IAM Role: sns:Publish Berechtigung hinzufügen"
echo " 3. Worker auf Office-VM: QUEUE_SUFFIX=-standby-queue konfigurieren"
echo " 4. Worker auf Office-VM: STANDBY_MODE=true setzen"

View File

@@ -14,7 +14,7 @@ EMAIL=$2
PASSWORD=$3 PASSWORD=$3
# Container Name deines DMS # Container Name deines DMS
DMS_CONTAINER="mailserver-new" DMS_CONTAINER="mailserver"
AWS_REGION="us-east-2" AWS_REGION="us-east-2"
# Hilfsfunktion: Usage # Hilfsfunktion: Usage

111
basic_setup/requeue_email.sh Executable file
View File

@@ -0,0 +1,111 @@
#!/bin/bash
# requeue_email.sh
# Nimmt eine existierende Email aus S3 und stellt ein Event in die SQS Queue,
# um eine erneute Verarbeitung durch den Worker auszulösen.
set -e
# --- Parameter ---
DOMAIN="$1"
RECIPIENT="$2"
MESSAGE_ID="$3" # Das ist der S3 Key (die lange Zeichenkette aus dem Log)
AWS_REGION=${AWS_REGION:-"us-east-2"}
if [ -z "$DOMAIN" ] || [ -z "$RECIPIENT" ] || [ -z "$MESSAGE_ID" ]; then
echo "Usage: $0 <domain> <recipient> <s3-message-id>"
echo "Example: $0 buddelectric.net Tyler@buddelectric.net cn8j6j970atkh7n3fstdhgqr9imgrivegnm70jg1"
exit 1
fi
# --- Variablen ableiten ---
BUCKET_NAME=$(echo "$DOMAIN" | tr '.' '-')"-emails"
QUEUE_NAME=$(echo "$DOMAIN" | tr '.' '-')"-queue"
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
echo "============================================================"
echo " Requeue S3 Email"
echo "============================================================"
echo " Domain: $DOMAIN"
echo " Recipient: $RECIPIENT"
echo " Bucket: $BUCKET_NAME"
echo " Key (ID): $MESSAGE_ID"
echo ""
# --- Schritt 1: Prüfen ob S3 Objekt existiert ---
echo "[1/2] Checking S3 object: s3://${BUCKET_NAME}/${MESSAGE_ID} ..."
if aws s3 ls "s3://${BUCKET_NAME}/${MESSAGE_ID}" --region "$AWS_REGION" > /dev/null 2>&1; then
echo " ✓ Object found."
else
echo " ❌ ERROR: Object s3://${BUCKET_NAME}/${MESSAGE_ID} does not exist!"
exit 1
fi
# --- Schritt 2: Nachricht in SQS stellen ---
echo "[2/2] Placing message in SQS queue..."
QUEUE_URL=$(aws sqs get-queue-url \
--queue-name "$QUEUE_NAME" \
--region "$AWS_REGION" \
--output text \
--query 'QueueUrl')
if [ -z "$QUEUE_URL" ]; then
echo " ❌ ERROR: Queue $QUEUE_NAME not found!"
exit 1
fi
# SES event payload (Simuliert die Lambda-Ausgabe)
# Wir nutzen "requeue@admin" als Source, da der Worker den echten Absender
# ohnehin aus den Email-Headern im S3-File parst.
SES_DATA=$(jq -n \
--arg msgId "$MESSAGE_ID" \
--arg source "requeue-admin@${DOMAIN}" \
--arg recipient "$RECIPIENT" \
--arg ts "$TIMESTAMP" \
--arg bucket "$BUCKET_NAME" \
'{
mail: {
messageId: $msgId,
source: $source,
timestamp: $ts,
destination: [$recipient]
},
receipt: {
recipients: [$recipient],
timestamp: $ts,
action: {
type: "S3",
bucketName: $bucket,
objectKey: $msgId
}
}
}')
# Fake SNS wrapper (Gleiches Format wie Lambda Shim)
SQS_BODY=$(jq -n \
--arg sesData "$SES_DATA" \
--arg ts "$TIMESTAMP" \
'{
Type: "Notification",
MessageId: "requeue-\(now | tostring)",
TopicArn: "arn:aws:sns:ses-shim:global-topic",
Subject: "Amazon SES Email Receipt Notification",
Message: $sesData,
Timestamp: $ts
}')
# Senden
SQS_MSG_ID=$(aws sqs send-message \
--queue-url "$QUEUE_URL" \
--region "$AWS_REGION" \
--message-body "$SQS_BODY" \
--output text \
--query 'MessageId')
echo " ✓ Done (SQS MessageId: ${SQS_MSG_ID})"
echo ""
echo "============================================================"
echo " Email successfully requeued!"
echo " Worker should pick it up immediately."
echo "============================================================"

View File

@@ -1,38 +0,0 @@
#!/bin/bash
# setup_email_domain.sh - Ein Wrapper-Script, das alle drei Skripte in der richtigen Reihenfolge ausführt
# Überprüfen, ob die Domain-Variable gesetzt ist
if [ -z "$1" ]; then
echo "Fehler: Keine Domain angegeben."
echo "Verwendung: ./setup_email_domain.sh domain.de [region]"
exit 1
fi
DOMAIN_NAME=$1
AWS_REGION=${2:-"us-east-2"}
# Variablen exportieren
export DOMAIN_NAME
export AWS_REGION
echo "=== AWS E-Mail-Infrastruktur für $DOMAIN_NAME einrichten ==="
echo "AWS-Region: $AWS_REGION"
echo
# Skripte nacheinander ausführen
echo "1. S3-Bucket erstellen..."
./awss3.sh
echo
echo "2. SES-Konfiguration einrichten..."
export S3_BUCKET_NAME=$(echo "$DOMAIN_NAME" | tr '.' '-' | awk '{print $0 "-emails"}')
./awsses.sh
echo
echo "3. IAM-Benutzer und SMTP-Zugangsdaten erstellen..."
./awsiam.sh
echo
echo "=== Setup abgeschlossen ==="
echo "Alle Schritte wurden abgeschlossen. Bitte überprüfen Sie die Ausgaben der einzelnen Skripte."
echo "Vergessen Sie nicht, die benötigten DNS-Einträge für Ihre Domain zu setzen, um die SES-Verifizierung abzuschließen."

View File

@@ -0,0 +1,168 @@
#!/bin/bash
# test_migration_email.sh - Places a test email into S3 + SQS
#
# Simulates the complete SES inbound flow: Mail goes to S3, metadata to SQS.
# The worker picks it up and processes it (Delivery or Forward).
#
# Usage:
# ./test_migration_email.sh cielectrical.com carlosr@cielectrical.com
# ./test_migration_email.sh buddelectric.net service@buddelectric.net
#
# Optional sender address:
# ./test_migration_email.sh cielectrical.com carlosr@cielectrical.com sender@example.com
set -e
# --- Parameters ---
DOMAIN="$1"
RECIPIENT="$2"
FROM_ADDR="${3:-support@bayarea-cc.com}"
AWS_REGION=${AWS_REGION:-"us-east-2"}
if [ -z "$DOMAIN" ] || [ -z "$RECIPIENT" ]; then
echo "Usage: $0 <domain> <recipient> [from-address]"
echo "Example: $0 cielectrical.com carlosr@cielectrical.com"
exit 1
fi
# --- Derived variables ---
BUCKET_NAME=$(echo "$DOMAIN" | tr '.' '-')"-emails"
QUEUE_NAME=$(echo "$DOMAIN" | tr '.' '-')"-queue"
MESSAGE_ID="test-migration-$(date +%s)-$$"
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
DATE_RFC2822=$(date -R)
echo "============================================================"
echo " Migration Test Email"
echo "============================================================"
echo " Domain: $DOMAIN"
echo " Recipient: $RECIPIENT"
echo " Sender: $FROM_ADDR"
echo " Bucket: $BUCKET_NAME"
echo " Queue: $QUEUE_NAME"
echo " Key: $MESSAGE_ID"
echo ""
# --- Step 1: Create RFC822 email ---
echo "[1/3] Creating test email..."
TMP_FILE=$(mktemp /tmp/test-mail-XXXXXX.eml)
cat > "$TMP_FILE" << EOF
From: Migration Test <${FROM_ADDR}>
To: ${RECIPIENT}
Subject: Migration Test $(date '+%Y-%m-%d %H:%M:%S')
Date: ${DATE_RFC2822}
Message-ID: <${MESSAGE_ID}@test.email-srvr.com>
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 7bit
Hello!
This is a test email to validate the email migration pipeline.
Sent: $(date)
Domain: ${DOMAIN}
Recipient: ${RECIPIENT}
Message-ID: ${MESSAGE_ID}
If you see this email in your inbox, the complete path is working:
S3 -> SQS -> Worker -> Forward/Delivery
--
Bay Area Affiliates - Migration Test
EOF
echo " Done ($(wc -c < "$TMP_FILE") bytes)"
# --- Step 2: Upload to S3 ---
echo "[2/3] Uploading to S3: s3://${BUCKET_NAME}/${MESSAGE_ID} ..."
aws s3 cp "$TMP_FILE" "s3://${BUCKET_NAME}/${MESSAGE_ID}" \
--region "$AWS_REGION" \
--quiet
echo " Done"
# --- Step 3: Place SQS message in fake-SNS format ---
echo "[3/3] Placing message in SQS queue..."
QUEUE_URL=$(aws sqs get-queue-url \
--queue-name "$QUEUE_NAME" \
--region "$AWS_REGION" \
--output text \
--query 'QueueUrl')
if [ -z "$QUEUE_URL" ]; then
echo " ERROR: Queue $QUEUE_NAME not found!"
rm -f "$TMP_FILE"
exit 1
fi
# SES event payload (what the Lambda normally produces)
SES_DATA=$(jq -n \
--arg msgId "$MESSAGE_ID" \
--arg source "$FROM_ADDR" \
--arg recipient "$RECIPIENT" \
--arg ts "$TIMESTAMP" \
'{
mail: {
messageId: $msgId,
source: $source,
timestamp: $ts,
destination: [$recipient]
},
receipt: {
recipients: [$recipient],
timestamp: $ts,
action: {
type: "S3",
bucketName: "test",
objectKey: $msgId
}
}
}')
# Fake SNS wrapper (same format as ses_sns_shim_global.py)
SQS_BODY=$(jq -n \
--arg sesData "$SES_DATA" \
--arg ts "$TIMESTAMP" \
'{
Type: "Notification",
MessageId: "test-\(now | tostring)",
TopicArn: "arn:aws:sns:ses-shim:global-topic",
Subject: "Amazon SES Email Receipt Notification",
Message: $sesData,
Timestamp: $ts
}')
SQS_MSG_ID=$(aws sqs send-message \
--queue-url "$QUEUE_URL" \
--region "$AWS_REGION" \
--message-body "$SQS_BODY" \
--output text \
--query 'MessageId')
echo " Done (SQS MessageId: ${SQS_MSG_ID})"
# --- Cleanup ---
rm -f "$TMP_FILE"
echo ""
echo "============================================================"
echo " Test email placed successfully!"
echo "============================================================"
echo ""
echo " Watch worker logs:"
echo " docker logs -f email-worker --tail 50"
echo ""
echo " Expected output:"
echo " Processing: ${MESSAGE_ID:0:20}... -> ${RECIPIENT}"
echo " Forwarded via legacy SMTP ... (if forward rule exists)"
echo " OR"
echo " Delivered to ${RECIPIENT} (if DMS mailbox exists)"
echo ""
echo " Check S3 object:"
echo " aws s3 ls s3://${BUCKET_NAME}/${MESSAGE_ID} --region ${AWS_REGION}"
echo "============================================================"

2
caddy/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
caddy-data/
caddy-config/

7
caddy/Caddyfile Normal file
View File

@@ -0,0 +1,7 @@
{
email {env.CLOUDFLARE_EMAIL}
acme_dns cloudflare {env.CLOUDFLARE_API_TOKEN}
acme_ca https://acme-v02.api.letsencrypt.org/directory
debug
}
import mail_certs

13
caddy/Dockerfile.caddy Normal file
View File

@@ -0,0 +1,13 @@
# Dockerfile.caddy
ARG CADDY_VERSION=2.9.1
FROM caddy:${CADDY_VERSION}-builder AS builder
# Caddy in exakt dieser Version + Plugins bauen
RUN xcaddy build ${CADDY_VERSION} \
--with github.com/caddy-dns/cloudflare \
--with github.com/caddyserver/replace-response
FROM caddy:${CADDY_VERSION}
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
RUN mkdir -p /var/log/caddy

31
caddy/docker-compose.yml Normal file
View File

@@ -0,0 +1,31 @@
services:
caddy:
image: custom-caddy:2.9.1-rr1
container_name: caddy
build:
context: .
dockerfile: Dockerfile.caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- mail_network
volumes:
- $PWD/Caddyfile:/etc/caddy/Caddyfile
- $PWD/mail_certs:/etc/caddy/mail_certs
# email_autodiscover entfernt - Snippet ist jetzt in mail_certs eingebettet
# email.mobileconfig.html entfernt - Inhalt ist jetzt inline in mail_certs
- $PWD/email-setup:/var/www/email-setup
- ./caddy-data:/data
- ./caddy-config:/config
- /home/aknuth/log/caddy:/var/log/caddy
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
- CLOUDFLARE_EMAIL=${CLOUDFLARE_EMAIL}
networks:
mail_network:
external: true

BIN
caddy/email-setup/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

View File

@@ -0,0 +1,122 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Email Setup</title>
<script src="https://cdnjs.cloudflare.com/ajax/libs/qrcodejs/1.0.0/qrcode.min.js"></script>
<style>
body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif; background: #f2f2f7; display: flex; justify-content: center; align-items: center; min-height: 100vh; margin: 0; padding: 20px; box-sizing: border-box; }
.card { background: white; padding: 2.5rem; border-radius: 24px; box-shadow: 0 12px 30px rgba(0,0,0,0.1); width: 100%; max-width: 420px; text-align: center; transition: all 0.3s ease; }
.logo { width: 80px; height: 80px; margin-bottom: 1.5rem; }
h1 { margin: 0 0 1rem 0; color: #1a1a1a; font-size: 1.8rem; }
p { color: #666; line-height: 1.5; margin-bottom: 2rem; }
/* Input Section */
#input-section { transition: opacity 0.3s ease; }
input { width: 100%; padding: 16px; margin-bottom: 16px; border: 2px solid #eee; border-radius: 14px; font-size: 16px; box-sizing: border-box; transition: border-color 0.2s; outline: none; }
input:focus { border-color: #007AFF; }
button { width: 100%; padding: 16px; background: #007AFF; color: white; border: none; border-radius: 14px; font-size: 18px; font-weight: 600; cursor: pointer; transition: background 0.2s, transform 0.1s; }
button:hover { background: #0062cc; }
button:active { transform: scale(0.98); }
/* QR Section (initially hidden) */
#qr-section { display: none; opacity: 0; transition: opacity 0.5s ease; }
#qrcode { margin: 2rem auto; padding: 15px; background: white; border-radius: 16px; box-shadow: 0 4px 12px rgba(0,0,0,0.08); display: inline-block; }
#qrcode img { margin: auto; } /* Centers the generated QR code */
.hint { font-size: 0.9rem; color: #888; margin-top: 1.5rem; }
.hint strong { color: #333; }
.error { color: #d32f2f; background: #fde8e8; padding: 10px; border-radius: 8px; font-size: 0.9rem; display: none; margin-bottom: 16px; }
.back-btn { background: transparent; color: #007AFF; margin-top: 1rem; font-size: 16px; }
.back-btn:hover { background: #f0f8ff; }
</style>
</head>
<body>
<div class="card">
<img src="/logo.png" alt="Logo" class="logo">
<div id="input-section">
<h1>Email Setup</h1>
<p>Enter your email address to automatically configure your iPhone or iPad.</p>
<div id="error-msg" class="error">Please enter a valid email address.</div>
<input type="email" id="email" placeholder="name@company.com" required autocomplete="email">
<button onclick="generateQR()">Generate QR Code</button>
</div>
<div id="qr-section">
<h1>Scan me!</h1>
<p>Open the <strong>Camera app</strong> on your iPhone and point it at this code.</p>
<div id="qrcode"></div>
<p class="hint">
Tap the banner that appears at the top.<br>
Click <strong>"Allow"</strong> and then go to <strong>Settings</strong> to install the profile.
</p>
<button class="back-btn" onclick="resetForm()">Back</button>
</div>
</div>
<script>
const inputSection = document.getElementById('input-section');
const qrSection = document.getElementById('qr-section');
const emailInput = document.getElementById('email');
const errorMsg = document.getElementById('error-msg');
let qrcode = null;
function generateQR() {
const email = emailInput.value.trim();
if (!email || !email.includes('@') || email.split('@')[1].length < 3) {
errorMsg.style.display = 'block';
emailInput.focus();
return;
}
errorMsg.style.display = 'none';
const domain = email.split('@')[1];
// The magic link
const targetUrl = `https://autodiscover.${domain}/apple?email=${email}`;
// Hide input, show QR
inputSection.style.display = 'none';
qrSection.style.display = 'block';
setTimeout(() => qrSection.style.opacity = '1', 50);
// Generate (or update) QR Code
if (qrcode === null) {
qrcode = new QRCode(document.getElementById("qrcode"), {
text: targetUrl,
width: 200,
height: 200,
colorDark : "#000000",
colorLight : "#ffffff",
correctLevel : QRCode.CorrectLevel.H
});
} else {
qrcode.clear();
qrcode.makeCode(targetUrl);
}
}
function resetForm() {
qrSection.style.opacity = '0';
setTimeout(() => {
qrSection.style.display = 'none';
inputSection.style.display = 'block';
emailInput.value = '';
emailInput.focus();
}, 300);
}
emailInput.addEventListener("keypress", function(event) {
if (event.key === "Enter") generateQR();
});
</script>
</body>
</html>

378
caddy/update-caddy-certs.sh Executable file
View File

@@ -0,0 +1,378 @@
#!/bin/bash
# update-caddy-certs.sh
# Gehört ins Caddy-Verzeichnis (neben dem Caddyfile).
#
# Liest alle Domains aus dem DMS und generiert die Wildcard-Cert-Blöcke
# für Caddy in die Datei "mail_certs" (per "import mail_certs" im Caddyfile).
#
# Generiert pro Domain:
# - Wildcard-Cert Block (*.domain + domain)
# - Webmail Block (reverse_proxy zu Roundcube)
# - Autodiscover/Autoconfig Block (importiert email_settings Snippet)
# - Email-Setup Block (QR-Code Seite für iPhone)
#
# Bei neuen Domains: Script erneut laufen lassen + caddy reload.
#
# Usage:
# ./update-caddy-certs.sh
# DRY_RUN=true ./update-caddy-certs.sh
# DMS_CONTAINER=mailserver CADDY_CONTAINER=caddy ./update-caddy-certs.sh
set -e
DMS_CONTAINER=${DMS_CONTAINER:-"mailserver"}
CADDY_CONTAINER=${CADDY_CONTAINER:-"caddy"}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
OUTPUT_FILE="$SCRIPT_DIR/mail_certs"
DRY_RUN=${DRY_RUN:-"false"}
# Node-Hostname des Mailservers (für Default-Cert Block)
NODE_HOSTNAME=${NODE_HOSTNAME:-"node1.email-srvr.com"}
echo "============================================================"
echo " 📜 Caddy Wildcard-Cert Konfig Generator"
echo " DMS Container: $DMS_CONTAINER"
echo " Caddy Container: $CADDY_CONTAINER"
echo " Output: $OUTPUT_FILE"
echo " Node Hostname: $NODE_HOSTNAME"
[ "$DRY_RUN" = "true" ] && echo " ⚠️ DRY RUN - Keine Dateien werden geschrieben"
echo "============================================================"
# --- Domains aus DMS lesen ---
echo ""
echo "📋 Lese Domains aus DMS..."
DOMAINS=$(docker exec "$DMS_CONTAINER" setup email list 2>/dev/null \
| grep -oP '(?<=@)[^\s]+' \
| sort -u)
if [ -z "$DOMAINS" ]; then
echo "⚠️ Keine DMS-Accounts gefunden. Nur Node-Hostname wird eingetragen."
fi
if [ -n "$DOMAINS" ]; then
echo " Gefundene Domains:"
for d in $DOMAINS; do echo " - $d"; done
fi
# --- Konfig generieren ---
echo ""
echo "📝 Generiere Caddy-Konfiguration..."
OUTPUT=""
OUTPUT="${OUTPUT}# mail_certs - Automatisch generiert von update-caddy-certs.sh\n"
OUTPUT="${OUTPUT}# Wildcard-Zertifikate + Webmail + Autodiscover für DMS-Domains.\n"
OUTPUT="${OUTPUT}# Einbinden im Caddyfile: import mail_certs\n"
OUTPUT="${OUTPUT}# Generiert: $(date)\n"
OUTPUT="${OUTPUT}\n"
# =====================================================================
# Autodiscover/Autoconfig Snippet (dynamisch)
# {labels.1}.{labels.0} = Basisdomain aus Hostname
# =====================================================================
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}# Autodiscover/Autoconfig Snippet (dynamisch)\n"
OUTPUT="${OUTPUT}# {labels.1}.{labels.0} = Basisdomain aus Hostname\n"
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}(email_settings) {\n"
# --- 1. Outlook Classic Autodiscover (POST + GET XML) ---
OUTPUT="${OUTPUT} # Outlook Autodiscover (XML) - POST und GET\n"
OUTPUT="${OUTPUT} route /autodiscover/autodiscover.xml {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/xml\"\n"
OUTPUT="${OUTPUT} respond \`<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"
OUTPUT="${OUTPUT}<Autodiscover xmlns=\"http://schemas.microsoft.com/exchange/autodiscover/responseschema/2006\">\n"
OUTPUT="${OUTPUT} <Response xmlns=\"http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a\">\n"
OUTPUT="${OUTPUT} <Account>\n"
OUTPUT="${OUTPUT} <AccountType>email</AccountType>\n"
OUTPUT="${OUTPUT} <Action>settings</Action>\n"
OUTPUT="${OUTPUT} <Protocol>\n"
OUTPUT="${OUTPUT} <Type>IMAP</Type>\n"
OUTPUT="${OUTPUT} <Server>imap.{labels.1}.{labels.0}</Server>\n"
OUTPUT="${OUTPUT} <Port>993</Port>\n"
OUTPUT="${OUTPUT} <DomainRequired>off</DomainRequired>\n"
OUTPUT="${OUTPUT} <LoginName></LoginName>\n"
OUTPUT="${OUTPUT} <SPA>off</SPA>\n"
OUTPUT="${OUTPUT} <SSL>on</SSL>\n"
OUTPUT="${OUTPUT} <AuthRequired>on</AuthRequired>\n"
OUTPUT="${OUTPUT} </Protocol>\n"
OUTPUT="${OUTPUT} <Protocol>\n"
OUTPUT="${OUTPUT} <Type>SMTP</Type>\n"
OUTPUT="${OUTPUT} <Server>smtp.{labels.1}.{labels.0}</Server>\n"
OUTPUT="${OUTPUT} <Port>465</Port>\n"
OUTPUT="${OUTPUT} <DomainRequired>off</DomainRequired>\n"
OUTPUT="${OUTPUT} <LoginName></LoginName>\n"
OUTPUT="${OUTPUT} <SPA>off</SPA>\n"
OUTPUT="${OUTPUT} <SSL>on</SSL>\n"
OUTPUT="${OUTPUT} <AuthRequired>on</AuthRequired>\n"
OUTPUT="${OUTPUT} </Protocol>\n"
OUTPUT="${OUTPUT} </Account>\n"
OUTPUT="${OUTPUT} </Response>\n"
OUTPUT="${OUTPUT}</Autodiscover>\` 200\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}\n"
# --- 2. Outlook New / Microsoft 365 (JSON v2) ---
# Outlook New sendet GET auf /autodiscover/autodiscover.json?Protocol=AutodiscoverV1&...
# Antwort muss den XML-Endpoint zurückgeben
OUTPUT="${OUTPUT} # Outlook New/365 (JSON → Redirect zu XML)\n"
OUTPUT="${OUTPUT} route /autodiscover/autodiscover.json {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/json\"\n"
OUTPUT="${OUTPUT} respond \`{\"Protocol\":\"AutodiscoverV1\",\"Url\":\"https://autodiscover.{labels.1}.{labels.0}/autodiscover/autodiscover.xml\"}\` 200\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}\n"
# --- 3. Thunderbird Autoconfig ---
OUTPUT="${OUTPUT} # Thunderbird Autoconfig\n"
OUTPUT="${OUTPUT} route /mail/config-v1.1.xml {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/xml\"\n"
OUTPUT="${OUTPUT} respond \`<?xml version=\"1.0\"?>\n"
OUTPUT="${OUTPUT}<clientConfig version=\"1.1\">\n"
OUTPUT="${OUTPUT} <emailProvider id=\"{labels.1}.{labels.0}\">\n"
OUTPUT="${OUTPUT} <displayName>{labels.1}.{labels.0} Mail</displayName>\n"
OUTPUT="${OUTPUT} <domain>{labels.1}.{labels.0}</domain>\n"
OUTPUT="${OUTPUT} <incomingServer type=\"imap\">\n"
OUTPUT="${OUTPUT} <hostname>imap.{labels.1}.{labels.0}</hostname>\n"
OUTPUT="${OUTPUT} <port>993</port>\n"
OUTPUT="${OUTPUT} <socketType>SSL</socketType>\n"
OUTPUT="${OUTPUT} <authentication>password-cleartext</authentication>\n"
OUTPUT="${OUTPUT} <username>%%EMAILADDRESS%%</username>\n"
OUTPUT="${OUTPUT} </incomingServer>\n"
OUTPUT="${OUTPUT} <outgoingServer type=\"smtp\">\n"
OUTPUT="${OUTPUT} <hostname>smtp.{labels.1}.{labels.0}</hostname>\n"
OUTPUT="${OUTPUT} <port>465</port>\n"
OUTPUT="${OUTPUT} <socketType>SSL</socketType>\n"
OUTPUT="${OUTPUT} <authentication>password-cleartext</authentication>\n"
OUTPUT="${OUTPUT} <username>%%EMAILADDRESS%%</username>\n"
OUTPUT="${OUTPUT} </outgoingServer>\n"
OUTPUT="${OUTPUT} </emailProvider>\n"
OUTPUT="${OUTPUT}</clientConfig>\` 200\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}\n"
# --- 4. Apple MobileConfig (inline, wie Autodiscover/Autoconfig) ---
OUTPUT="${OUTPUT} # Apple MobileConfig (inline respond)\n"
OUTPUT="${OUTPUT} route /apple {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/x-apple-aspen-config; charset=utf-8\"\n"
OUTPUT="${OUTPUT} header Content-Disposition \"attachment; filename=email.mobileconfig\"\n"
OUTPUT="${OUTPUT} respond \`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
OUTPUT="${OUTPUT}<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
OUTPUT="${OUTPUT}<plist version=\"1.0\">\n"
OUTPUT="${OUTPUT}<dict>\n"
OUTPUT="${OUTPUT} <key>PayloadContent</key>\n"
OUTPUT="${OUTPUT} <array>\n"
OUTPUT="${OUTPUT} <dict>\n"
OUTPUT="${OUTPUT} <key>EmailAccountDescription</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>EmailAccountName</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>EmailAccountType</key>\n"
OUTPUT="${OUTPUT} <string>EmailTypeIMAP</string>\n"
OUTPUT="${OUTPUT} <key>EmailAddress</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerAuthentication</key>\n"
OUTPUT="${OUTPUT} <string>EmailAuthPassword</string>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerHostName</key>\n"
OUTPUT="${OUTPUT} <string>imap.{labels.1}.{labels.0}</string>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerPortNumber</key>\n"
OUTPUT="${OUTPUT} <integer>993</integer>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerUseSSL</key>\n"
OUTPUT="${OUTPUT} <true/>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerUsername</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerAuthentication</key>\n"
OUTPUT="${OUTPUT} <string>EmailAuthPassword</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerHostName</key>\n"
OUTPUT="${OUTPUT} <string>smtp.{labels.1}.{labels.0}</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerPortNumber</key>\n"
OUTPUT="${OUTPUT} <integer>465</integer>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerUseSSL</key>\n"
OUTPUT="${OUTPUT} <true/>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerUsername</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingPasswordRequired</key>\n"
OUTPUT="${OUTPUT} <true/>\n"
OUTPUT="${OUTPUT} <key>PayloadDescription</key>\n"
OUTPUT="${OUTPUT} <string>E-Mail Konfiguration</string>\n"
OUTPUT="${OUTPUT} <key>PayloadDisplayName</key>\n"
OUTPUT="${OUTPUT} <string>{labels.1}.{labels.0} E-Mail</string>\n"
OUTPUT="${OUTPUT} <key>PayloadIdentifier</key>\n"
OUTPUT="${OUTPUT} <string>com.{labels.1}.{labels.0}.email.account</string>\n"
OUTPUT="${OUTPUT} <key>PayloadType</key>\n"
OUTPUT="${OUTPUT} <string>com.apple.mail.managed</string>\n"
OUTPUT="${OUTPUT} <key>PayloadUUID</key>\n"
OUTPUT="${OUTPUT} <string>A1B2C3D4-E5F6-7890-ABCD-EF1234567890</string>\n"
OUTPUT="${OUTPUT} <key>PayloadVersion</key>\n"
OUTPUT="${OUTPUT} <integer>1</integer>\n"
OUTPUT="${OUTPUT} </dict>\n"
OUTPUT="${OUTPUT} </array>\n"
OUTPUT="${OUTPUT} <key>PayloadDescription</key>\n"
OUTPUT="${OUTPUT} <string>E-Mail Einrichtung</string>\n"
OUTPUT="${OUTPUT} <key>PayloadDisplayName</key>\n"
OUTPUT="${OUTPUT} <string>{labels.1}.{labels.0} E-Mail</string>\n"
OUTPUT="${OUTPUT} <key>PayloadIdentifier</key>\n"
OUTPUT="${OUTPUT} <string>com.{labels.1}.{labels.0}.email.profile</string>\n"
OUTPUT="${OUTPUT} <key>PayloadOrganization</key>\n"
OUTPUT="${OUTPUT} <string>Bay Area Affiliates, Inc.</string>\n"
OUTPUT="${OUTPUT} <key>PayloadRemovalDisallowed</key>\n"
OUTPUT="${OUTPUT} <false/>\n"
OUTPUT="${OUTPUT} <key>PayloadType</key>\n"
OUTPUT="${OUTPUT} <string>Configuration</string>\n"
OUTPUT="${OUTPUT} <key>PayloadUUID</key>\n"
OUTPUT="${OUTPUT} <string>F0E1D2C3-B4A5-6789-0FED-CBA987654321</string>\n"
OUTPUT="${OUTPUT} <key>PayloadVersion</key>\n"
OUTPUT="${OUTPUT} <integer>1</integer>\n"
OUTPUT="${OUTPUT}</dict>\n"
OUTPUT="${OUTPUT}</plist>\` 200\n"
OUTPUT="${OUTPUT} }\n"
# --- 5. Samsung Email (nutzt ebenfalls autoconfig, kein extra Block nötig) ---
# Samsung Email-App versucht:
# 1. https://autoconfig.<domain>/mail/config-v1.1.xml (= Thunderbird-Format, schon abgedeckt)
# 2. Alternativ: Outlook Autodiscover XML
# → Kein separater Block erforderlich.
OUTPUT="${OUTPUT}}\n\n"
# =====================================================================
# Email-Setup Snippet (QR-Code Seite für iPhone)
# =====================================================================
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}# Email-Setup Snippet (QR-Code Seite)\n"
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}(email_setup_page) {\n"
OUTPUT="${OUTPUT} route /email-setup* {\n"
OUTPUT="${OUTPUT} uri strip_prefix /email-setup\n"
OUTPUT="${OUTPUT} root * /var/www/email-setup\n"
OUTPUT="${OUTPUT} try_files {path} /setup.html\n"
OUTPUT="${OUTPUT} file_server\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}}\n\n"
# Node-Hostname immer als erstes (Default-Cert des DMS)
echo " → Node-Hostname Block: $NODE_HOSTNAME"
OUTPUT="${OUTPUT}# Node-Hostname (Default-Cert für DMS Fallback)\n"
OUTPUT="${OUTPUT}${NODE_HOSTNAME} {\n"
OUTPUT="${OUTPUT} tls {\n"
OUTPUT="${OUTPUT} dns cloudflare {env.CLOUDFLARE_API_TOKEN}\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT} respond \"OK\" 200\n"
OUTPUT="${OUTPUT}}\n\n"
# Wildcard-Blocks + Webmail + Autodiscover pro Kundendomain
for domain in $DOMAINS; do
echo " → Wildcard Block: *.${domain}"
echo " → Webmail Block: webmail.${domain}"
echo " → Autodiscover Block: autodiscover.${domain}, autoconfig.${domain}"
echo " → Email-Setup Block: webmail.${domain}/email-setup"
# Wildcard-Cert Block (für Cert-Generierung + Fallback)
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}# ${domain}\n"
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n\n"
OUTPUT="${OUTPUT}# Wildcard-Cert für $domain\n"
OUTPUT="${OUTPUT}*.${domain}, ${domain} {\n"
OUTPUT="${OUTPUT} tls {\n"
OUTPUT="${OUTPUT} dns cloudflare {env.CLOUDFLARE_API_TOKEN}\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT} respond \"OK\" 200\n"
OUTPUT="${OUTPUT}}\n\n"
# Webmail Block (Roundcube + Email-Setup)
OUTPUT="${OUTPUT}# Roundcube Webmail + Email-Setup für $domain\n"
OUTPUT="${OUTPUT}webmail.${domain} {\n"
OUTPUT="${OUTPUT} import email_setup_page\n"
OUTPUT="${OUTPUT} reverse_proxy roundcube:80\n"
OUTPUT="${OUTPUT} encode gzip\n"
OUTPUT="${OUTPUT} log {\n"
OUTPUT="${OUTPUT} output stderr\n"
OUTPUT="${OUTPUT} format console\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}}\n\n"
# Autodiscover / Autoconfig Block
OUTPUT="${OUTPUT}# Autodiscover/Autoconfig für $domain\n"
OUTPUT="${OUTPUT}autodiscover.${domain}, autoconfig.${domain} {\n"
OUTPUT="${OUTPUT} import email_settings\n"
OUTPUT="${OUTPUT} respond \"Autodiscover Service Online\" 200\n"
OUTPUT="${OUTPUT}}\n\n"
done
# --- Ausgabe ---
if [ "$DRY_RUN" = "true" ]; then
echo ""
echo "--- VORSCHAU ---"
printf '%b' "$OUTPUT"
echo "--- ENDE ---"
else
printf '%b' "$OUTPUT" > "$OUTPUT_FILE"
echo " ✅ Geschrieben: $OUTPUT_FILE"
fi
# --- Import im Caddyfile prüfen ---
CADDYFILE="$SCRIPT_DIR/Caddyfile"
if [ -f "$CADDYFILE" ]; then
if grep -q "import mail_certs" "$CADDYFILE"; then
echo " ✅ 'import mail_certs' bereits im Caddyfile vorhanden."
else
echo ""
echo "⚠️ AKTION: 'import mail_certs' fehlt noch im Caddyfile!"
echo " Bitte nach dem globalen {} Block eintragen:"
echo ""
echo " { ← globaler Block"
echo " email {env.CLOUDFLARE_EMAIL}"
echo " ..."
echo " }"
echo " import mail_certs ← hier einfügen"
fi
# Prüfe ob alte email_autodiscover Referenz entfernt werden kann
if grep -q "import email_autodiscover" "$CADDYFILE"; then
echo ""
echo "⚠️ AUFRÄUMEN: 'import email_autodiscover' im Caddyfile gefunden!"
echo " Das Snippet (email_settings) ist jetzt in mail_certs eingebettet."
echo " Bitte 'import email_autodiscover' aus dem Caddyfile entfernen."
fi
fi
# --- Prüfe ob alte Dateien noch existieren ---
if [ -f "$SCRIPT_DIR/email_autodiscover" ]; then
echo ""
echo "⚠️ AUFRÄUMEN: Datei 'email_autodiscover' kann entfernt werden!"
echo " Das Snippet ist jetzt in mail_certs eingebettet."
fi
if [ -f "$SCRIPT_DIR/email-setup/autodiscover.xml" ]; then
echo ""
echo "⚠️ AUFRÄUMEN: 'email-setup/autodiscover.xml' kann entfernt werden!"
echo " Statische XML wird nicht mehr benötigt (dynamisch über Caddy)."
fi
echo ""
echo "============================================================"
echo "🔄 Nächste Schritte:"
echo ""
echo "1. Caddy Konfiguration validieren:"
echo " docker exec $CADDY_CONTAINER caddy validate --config /etc/caddy/Caddyfile"
echo ""
echo "2. Caddy neu laden (kein Downtime):"
echo " docker exec $CADDY_CONTAINER caddy reload --config /etc/caddy/Caddyfile"
echo ""
echo "3. Cert-Generierung verfolgen (~30s pro Domain):"
echo " docker logs -f $CADDY_CONTAINER 2>&1 | grep -i 'certificate\|acme\|tls\|error'"
echo ""
echo "4. Autodiscover testen:"
for domain in $DOMAINS; do
echo " # Thunderbird:"
echo " curl -s https://autoconfig.${domain}/mail/config-v1.1.xml | head -10"
echo " # Outlook:"
echo " curl -s https://autodiscover.${domain}/autodiscover/autodiscover.xml | head -10"
echo " # Apple (sollte .mobileconfig liefern):"
echo " curl -sI \"https://autodiscover.${domain}/apple?email=test@${domain}\""
echo ""
done
echo "5. iPhone Email-Setup QR-Code Seite:"
for domain in $DOMAINS; do
echo " https://webmail.${domain}/email-setup"
done
echo "============================================================"

View File

@@ -1,53 +0,0 @@
services:
worker:
image: python:3.11-slim
container_name: email-worker-${WORKER_DOMAIN}
restart: unless-stopped
network_mode: host # Zugriff auf lokales Netzwerk für Postfix
# Worker-Code mounten
volumes:
- ./worker.py:/app/worker.py:ro
working_dir: /app
# Python Dependencies installieren und Worker starten
command: >
sh -c "apt-get update &&
apt-get install -y --no-install-recommends procps &&
rm -rf /var/lib/apt/lists/* &&
pip install --no-cache-dir boto3 &&
python -u worker.py"
environment:
# ⚠️ WICHTIG: WORKER_DOMAIN muss von außen gesetzt werden!
- WORKER_DOMAIN=${WORKER_DOMAIN}
# AWS Credentials
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
# Worker Settings
- POLL_INTERVAL=${POLL_INTERVAL:-20}
- MAX_MESSAGES=${MAX_MESSAGES:-10}
- VISIBILITY_TIMEOUT=${VISIBILITY_TIMEOUT:-300}
# SMTP Configuration
- SMTP_HOST=${SMTP_HOST:-localhost}
- SMTP_PORT=${SMTP_PORT:-25}
- SMTP_USE_TLS=${SMTP_USE_TLS:-false}
- SMTP_USER=${SMTP_USER:-}
- SMTP_PASS=${SMTP_PASS:-}
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
healthcheck:
test: ["CMD", "pgrep", "-f", "worker.py"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

View File

@@ -0,0 +1,38 @@
# AWS credentials (or use IAM role / instance profile)
AWS_REGION=us-east-2
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# Domains: comma-separated list OR file path
# DOMAINS=andreasknuth.de,bizmatch.net
DOMAINS_FILE=/etc/email-worker/domains.txt
# SMTP (Docker Mail Server)
SMTP_HOST=localhost
SMTP_PORT=25
SMTP_USE_TLS=false
SMTP_USER=
SMTP_PASS=
SMTP_POOL_SIZE=5
# Internal SMTP port (bypass transport_maps)
INTERNAL_SMTP_PORT=25
# Worker settings
WORKER_THREADS=10
POLL_INTERVAL=20
MAX_MESSAGES=10
VISIBILITY_TIMEOUT=300
# DynamoDB tables
DYNAMODB_RULES_TABLE=email-rules
DYNAMODB_MESSAGES_TABLE=ses-outbound-messages
DYNAMODB_BLOCKED_TABLE=email-blocked-senders
# Bounce handling
BOUNCE_LOOKUP_RETRIES=3
BOUNCE_LOOKUP_DELAY=1.0
# Monitoring
METRICS_PORT=8000
HEALTH_PORT=8080

View File

@@ -0,0 +1,34 @@
# ── Build stage ──────────────────────────────────────────────────
FROM node:20-slim AS builder
WORKDIR /app
COPY package.json package-lock.json* ./
RUN npm ci
COPY tsconfig.json ./
COPY src/ ./src/
RUN npx tsc
# ── Run stage ────────────────────────────────────────────────────
FROM node:20-slim
WORKDIR /app
# Only production deps
COPY package.json package-lock.json* ./
RUN npm ci --omit=dev && npm cache clean --force
# Compiled JS from build stage
COPY --from=builder /app/dist ./dist
# Config directory (mount domains.txt here)
RUN mkdir -p /etc/email-worker /var/log/email-worker
EXPOSE 8000 8080
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD node -e "fetch('http://localhost:8080').then(r => r.ok ? process.exit(0) : process.exit(1)).catch(() => process.exit(1))"
CMD ["node", "dist/main.js"]

View File

@@ -0,0 +1,21 @@
services:
email-worker:
build: .
container_name: email-worker-ts
restart: unless-stopped
env_file: .env
volumes:
- ./domains.txt:/etc/email-worker/domains.txt:ro
- ./logs:/var/log/email-worker
ports:
- "9000:8000" # Prometheus metrics (Host:Container)
- "9090:8080" # Health check (Host:Container)
# Connect to DMS on the host or Docker network
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- SMTP_HOST=host.docker.internal
- SMTP_PORT=25
volumes:
worker-logs:

3190
email-worker-nodejs/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,37 @@
{
"name": "unified-email-worker",
"version": "2.0.0",
"description": "Unified multi-domain email worker (TypeScript)",
"main": "dist/main.js",
"scripts": {
"build": "tsc",
"start": "node dist/main.js",
"dev": "tsx src/main.ts",
"lint": "eslint src/",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@aws-sdk/client-dynamodb": "^3.700.0",
"@aws-sdk/client-s3": "^3.700.0",
"@aws-sdk/client-ses": "^3.700.0",
"@aws-sdk/client-sqs": "^3.700.0",
"@aws-sdk/lib-dynamodb": "^3.700.0",
"mailparser": "^3.7.1",
"nodemailer": "^6.9.16",
"picomatch": "^4.0.2",
"pino": "^9.5.0",
"pino-pretty": "^13.0.0",
"prom-client": "^15.1.3"
},
"devDependencies": {
"@types/mailparser": "^3.4.5",
"@types/nodemailer": "^6.4.17",
"@types/picomatch": "^3.0.1",
"@types/node": "^22.10.0",
"tsx": "^4.19.0",
"typescript": "^5.7.0"
},
"engines": {
"node": ">=20.0.0"
}
}

View File

@@ -0,0 +1,230 @@
/**
* DynamoDB operations handler
*
* Tables:
* - email-rules → OOO / Forward rules per address
* - ses-outbound-messages → Bounce info (MessageId → original sender)
* - email-blocked-senders → Blocked patterns per address
*/
import { DynamoDBClient } from '@aws-sdk/client-dynamodb';
import {
DynamoDBDocumentClient,
GetCommand,
BatchGetCommand,
} from '@aws-sdk/lib-dynamodb';
import { config } from '../config.js';
import { log } from '../logger.js';
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
export interface EmailRule {
email_address: string;
ooo_active?: boolean;
ooo_message?: string;
ooo_content_type?: string;
forwards?: string[];
[key: string]: unknown;
}
export interface BounceInfo {
original_source: string;
bounceType: string;
bounceSubType: string;
bouncedRecipients: string[];
timestamp: string;
}
// ---------------------------------------------------------------------------
// Handler
// ---------------------------------------------------------------------------
export class DynamoDBHandler {
private docClient: DynamoDBDocumentClient;
public available = false;
constructor() {
const raw = new DynamoDBClient({ region: config.awsRegion });
this.docClient = DynamoDBDocumentClient.from(raw, {
marshallOptions: { removeUndefinedValues: true },
});
this.initialize();
}
// -----------------------------------------------------------------------
// Init
// -----------------------------------------------------------------------
private initialize(): void {
// We just mark as available; actual connectivity is tested on first call.
// The Python version tested table_status, but that's a DescribeTable call
// which is heavy and not needed the first GetItem will tell us.
this.available = true;
log('✓ DynamoDB client initialized');
}
/**
* Verify tables exist by doing a cheap GetItem on each.
* Called once during startup.
*/
async verifyTables(): Promise<boolean> {
try {
await Promise.all([
this.docClient.send(
new GetCommand({ TableName: config.rulesTable, Key: { email_address: '__probe__' } }),
),
this.docClient.send(
new GetCommand({ TableName: config.messagesTable, Key: { MessageId: '__probe__' } }),
),
this.docClient.send(
new GetCommand({ TableName: config.blockedTable, Key: { email_address: '__probe__' } }),
),
]);
this.available = true;
log('✓ DynamoDB tables connected successfully');
return true;
} catch (err: any) {
log(`⚠ DynamoDB not fully available: ${err.message ?? err}`, 'WARNING');
this.available = false;
return false;
}
}
// -----------------------------------------------------------------------
// Email rules
// -----------------------------------------------------------------------
async getEmailRules(emailAddress: string): Promise<EmailRule | null> {
if (!this.available) return null;
try {
const resp = await this.docClient.send(
new GetCommand({
TableName: config.rulesTable,
Key: { email_address: emailAddress },
}),
);
return (resp.Item as EmailRule) ?? null;
} catch (err: any) {
if (err.name !== 'ResourceNotFoundException') {
log(`⚠ DynamoDB error for ${emailAddress}: ${err.message ?? err}`, 'ERROR');
}
return null;
}
}
// -----------------------------------------------------------------------
// Bounce info
// -----------------------------------------------------------------------
async getBounceInfo(
messageId: string,
workerName = 'unified',
): Promise<BounceInfo | null> {
if (!this.available) return null;
for (let attempt = 0; attempt < config.bounceLookupRetries; attempt++) {
try {
const resp = await this.docClient.send(
new GetCommand({
TableName: config.messagesTable,
Key: { MessageId: messageId },
}),
);
if (resp.Item) {
return {
original_source: (resp.Item.original_source as string) ?? '',
bounceType: (resp.Item.bounceType as string) ?? 'Unknown',
bounceSubType: (resp.Item.bounceSubType as string) ?? 'Unknown',
bouncedRecipients: (resp.Item.bouncedRecipients as string[]) ?? [],
timestamp: (resp.Item.timestamp as string) ?? '',
};
}
if (attempt < config.bounceLookupRetries - 1) {
log(
` Bounce record not found yet, retrying in ${config.bounceLookupDelay}s ` +
`(attempt ${attempt + 1}/${config.bounceLookupRetries})...`,
'INFO',
workerName,
);
await sleep(config.bounceLookupDelay * 1000);
} else {
log(
`⚠ No bounce record found after ${config.bounceLookupRetries} attempts ` +
`for Message-ID: ${messageId}`,
'WARNING',
workerName,
);
return null;
}
} catch (err: any) {
log(
`⚠ DynamoDB Error (attempt ${attempt + 1}/${config.bounceLookupRetries}): ` +
`${err.message ?? err}`,
'ERROR',
workerName,
);
if (attempt < config.bounceLookupRetries - 1) {
await sleep(config.bounceLookupDelay * 1000);
} else {
return null;
}
}
}
return null;
}
// -----------------------------------------------------------------------
// Blocked senders
// -----------------------------------------------------------------------
async getBlockedPatterns(emailAddress: string): Promise<string[]> {
if (!this.available) return [];
try {
const resp = await this.docClient.send(
new GetCommand({
TableName: config.blockedTable,
Key: { email_address: emailAddress },
}),
);
return (resp.Item?.blocked_patterns as string[]) ?? [];
} catch (err: any) {
log(`⚠ Error getting block list for ${emailAddress}: ${err.message ?? err}`, 'ERROR');
return [];
}
}
async batchGetBlockedPatterns(
emailAddresses: string[],
): Promise<Record<string, string[]>> {
const empty: Record<string, string[]> = {};
for (const a of emailAddresses) empty[a] = [];
if (!this.available || emailAddresses.length === 0) return empty;
try {
const keys = emailAddresses.map((a) => ({ email_address: a }));
const resp = await this.docClient.send(
new BatchGetCommand({
RequestItems: {
[config.blockedTable]: { Keys: keys },
},
}),
);
const items = resp.Responses?.[config.blockedTable] ?? [];
const result: Record<string, string[]> = { ...empty };
for (const item of items) {
const addr = item.email_address as string;
result[addr] = (item.blocked_patterns as string[]) ?? [];
}
return result;
} catch (err: any) {
log(`⚠ Batch blocklist check error: ${err.message ?? err}`, 'ERROR');
return empty;
}
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}

View File

@@ -0,0 +1,202 @@
/**
* S3 operations handler
*
* Responsibilities:
* - Download raw email from domain-specific bucket
* - Mark email metadata (processed / all-invalid / blocked)
* - Delete blocked emails
*/
import {
S3Client,
GetObjectCommand,
HeadObjectCommand,
CopyObjectCommand,
DeleteObjectCommand,
type S3ClientConfig,
} from '@aws-sdk/client-s3';
import { config, domainToBucketName } from '../config.js';
import { log } from '../logger.js';
export class S3Handler {
private client: S3Client;
constructor() {
const opts: S3ClientConfig = { region: config.awsRegion };
this.client = new S3Client(opts);
}
// -------------------------------------------------------------------------
// Download
// -------------------------------------------------------------------------
/**
* Download raw email bytes from S3.
* Returns `null` when the object does not exist yet (caller should retry).
* Throws on permanent errors.
*/
async getEmail(
domain: string,
messageId: string,
receiveCount: number,
): Promise<Buffer | null> {
const bucket = domainToBucketName(domain);
try {
const resp = await this.client.send(
new GetObjectCommand({ Bucket: bucket, Key: messageId }),
);
const bytes = await resp.Body?.transformToByteArray();
return bytes ? Buffer.from(bytes) : null;
} catch (err: any) {
if (err.name === 'NoSuchKey' || err.Code === 'NoSuchKey') {
if (receiveCount < 5) {
log(`⏳ S3 Object not found yet (Attempt ${receiveCount}). Retrying...`, 'WARNING');
return null;
}
log('❌ S3 Object missing permanently after retries.', 'ERROR');
throw err;
}
log(`❌ S3 Download Error: ${err.message ?? err}`, 'ERROR');
throw err;
}
}
// -------------------------------------------------------------------------
// Metadata helpers (copy-in-place with updated metadata)
// -------------------------------------------------------------------------
private async updateMetadata(
bucket: string,
key: string,
patch: Record<string, string>,
removeKeys: string[] = [],
): Promise<void> {
const head = await this.client.send(
new HeadObjectCommand({ Bucket: bucket, Key: key }),
);
const metadata = { ...(head.Metadata ?? {}) };
// Apply patch
for (const [k, v] of Object.entries(patch)) {
metadata[k] = v;
}
// Remove keys
for (const k of removeKeys) {
delete metadata[k];
}
await this.client.send(
new CopyObjectCommand({
Bucket: bucket,
Key: key,
CopySource: `${bucket}/${key}`,
Metadata: metadata,
MetadataDirective: 'REPLACE',
}),
);
}
// -------------------------------------------------------------------------
// Mark helpers
// -------------------------------------------------------------------------
async markAsProcessed(
domain: string,
messageId: string,
workerName: string,
invalidInboxes?: string[],
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
const patch: Record<string, string> = {
processed: 'true',
processed_at: String(Math.floor(Date.now() / 1000)),
processed_by: workerName,
status: 'delivered',
};
if (invalidInboxes?.length) {
patch['invalid_inboxes'] = invalidInboxes.join(',');
log(`⚠ Invalid inboxes recorded: ${invalidInboxes.join(', ')}`, 'WARNING', workerName);
}
await this.updateMetadata(bucket, messageId, patch, [
'processing_started',
'queued_at',
]);
} catch (err: any) {
log(`Failed to mark as processed: ${err.message ?? err}`, 'WARNING', workerName);
}
}
async markAsAllInvalid(
domain: string,
messageId: string,
invalidInboxes: string[],
workerName: string,
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
await this.updateMetadata(
bucket,
messageId,
{
processed: 'true',
processed_at: String(Math.floor(Date.now() / 1000)),
processed_by: workerName,
status: 'failed',
error: 'All recipients are invalid (mailboxes do not exist)',
invalid_inboxes: invalidInboxes.join(','),
},
['processing_started', 'queued_at'],
);
} catch (err: any) {
log(`Failed to mark as all invalid: ${err.message ?? err}`, 'WARNING', workerName);
}
}
async markAsBlocked(
domain: string,
messageId: string,
blockedRecipients: string[],
sender: string,
workerName: string,
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
await this.updateMetadata(
bucket,
messageId,
{
processed: 'true',
processed_at: String(Math.floor(Date.now() / 1000)),
processed_by: workerName,
status: 'blocked',
blocked_recipients: blockedRecipients.join(','),
blocked_sender: sender,
},
['processing_started', 'queued_at'],
);
log('✓ Marked as blocked in S3 metadata', 'INFO', workerName);
} catch (err: any) {
log(`⚠ Failed to mark as blocked: ${err.message ?? err}`, 'ERROR', workerName);
throw err;
}
}
async deleteBlockedEmail(
domain: string,
messageId: string,
workerName: string,
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
await this.client.send(
new DeleteObjectCommand({ Bucket: bucket, Key: messageId }),
);
log('🗑 Deleted blocked email from S3', 'SUCCESS', workerName);
} catch (err: any) {
log(`⚠ Failed to delete blocked email: ${err.message ?? err}`, 'ERROR', workerName);
throw err;
}
}
}

View File

@@ -0,0 +1,52 @@
/**
* SES operations handler
*
* Only used for:
* - Sending OOO replies to external addresses
* - Forwarding to external addresses
*/
import {
SESClient,
SendRawEmailCommand,
} from '@aws-sdk/client-ses';
import { config } from '../config.js';
import { log } from '../logger.js';
export class SESHandler {
private client: SESClient;
constructor() {
this.client = new SESClient({ region: config.awsRegion });
}
/**
* Send a raw MIME message via SES.
* Returns true on success, false on failure (never throws).
*/
async sendRawEmail(
source: string,
destination: string,
rawMessage: Buffer,
workerName: string,
): Promise<boolean> {
try {
await this.client.send(
new SendRawEmailCommand({
Source: source,
Destinations: [destination],
RawMessage: { Data: rawMessage },
}),
);
return true;
} catch (err: any) {
const code = err.name ?? err.Code ?? 'Unknown';
log(
`⚠ SES send failed to ${destination} (${code}): ${err.message ?? err}`,
'ERROR',
workerName,
);
return false;
}
}
}

View File

@@ -0,0 +1,99 @@
/**
* SQS operations handler
*
* Responsibilities:
* - Resolve queue URL for a domain
* - Long-poll for messages
* - Delete processed messages
* - Report approximate queue size
*/
import {
SQSClient,
GetQueueUrlCommand,
ReceiveMessageCommand,
DeleteMessageCommand,
GetQueueAttributesCommand,
type Message,
} from '@aws-sdk/client-sqs';
import { config, domainToQueueName } from '../config.js';
import { log } from '../logger.js';
export class SQSHandler {
private client: SQSClient;
constructor() {
this.client = new SQSClient({ region: config.awsRegion });
}
/** Resolve queue URL for a domain. Returns null if queue does not exist. */
async getQueueUrl(domain: string): Promise<string | null> {
const queueName = domainToQueueName(domain);
try {
const resp = await this.client.send(
new GetQueueUrlCommand({ QueueName: queueName }),
);
return resp.QueueUrl ?? null;
} catch (err: any) {
if (err.name === 'QueueDoesNotExist' ||
err.Code === 'AWS.SimpleQueueService.NonExistentQueue') {
log(`Queue not found for domain: ${domain}`, 'WARNING');
} else {
log(`Error getting queue URL for ${domain}: ${err.message ?? err}`, 'ERROR');
}
return null;
}
}
/** Long-poll for messages (uses configured poll interval as wait time). */
async receiveMessages(queueUrl: string): Promise<Message[]> {
try {
const resp = await this.client.send(
new ReceiveMessageCommand({
QueueUrl: queueUrl,
MaxNumberOfMessages: config.maxMessages,
WaitTimeSeconds: config.pollInterval,
VisibilityTimeout: config.visibilityTimeout,
MessageSystemAttributeNames: ['ApproximateReceiveCount', 'SentTimestamp'],
}),
);
return resp.Messages ?? [];
} catch (err: any) {
log(`Error receiving messages: ${err.message ?? err}`, 'ERROR');
return [];
}
}
/** Delete a message from the queue after successful processing. */
async deleteMessage(queueUrl: string, receiptHandle: string): Promise<void> {
try {
await this.client.send(
new DeleteMessageCommand({
QueueUrl: queueUrl,
ReceiptHandle: receiptHandle,
}),
);
} catch (err: any) {
log(`Error deleting message: ${err.message ?? err}`, 'ERROR');
throw err;
}
}
/** Approximate number of messages in the queue. Returns 0 on error. */
async getQueueSize(queueUrl: string): Promise<number> {
try {
const resp = await this.client.send(
new GetQueueAttributesCommand({
QueueUrl: queueUrl,
AttributeNames: ['ApproximateNumberOfMessages'],
}),
);
return parseInt(
resp.Attributes?.ApproximateNumberOfMessages ?? '0',
10,
);
} catch {
return 0;
}
}
}

View File

@@ -0,0 +1,115 @@
/**
* Configuration management for unified email worker
*
* All settings are read from environment variables with sensible defaults.
* Domain helpers (bucket name, queue name, internal check) are co-located here
* so every module can import { config, domainToBucket, ... } from './config'.
*/
import { readFileSync, existsSync } from 'node:fs';
// ---------------------------------------------------------------------------
// Config object
// ---------------------------------------------------------------------------
export const config = {
// AWS
awsRegion: process.env.AWS_REGION ?? 'us-east-2',
// Domains
domainsList: process.env.DOMAINS ?? '',
domainsFile: process.env.DOMAINS_FILE ?? '/etc/email-worker/domains.txt',
// Worker
workerThreads: parseInt(process.env.WORKER_THREADS ?? '10', 10),
pollInterval: parseInt(process.env.POLL_INTERVAL ?? '20', 10),
maxMessages: parseInt(process.env.MAX_MESSAGES ?? '10', 10),
visibilityTimeout: parseInt(process.env.VISIBILITY_TIMEOUT ?? '300', 10),
// SMTP delivery (local DMS)
smtpHost: process.env.SMTP_HOST ?? 'localhost',
smtpPort: parseInt(process.env.SMTP_PORT ?? '25', 10),
smtpUseTls: (process.env.SMTP_USE_TLS ?? 'false').toLowerCase() === 'true',
smtpUser: process.env.SMTP_USER ?? '',
smtpPass: process.env.SMTP_PASS ?? '',
smtpPoolSize: parseInt(process.env.SMTP_POOL_SIZE ?? '5', 10),
// Internal SMTP port (for OOO / forwards to managed domains)
internalSmtpPort: parseInt(process.env.INTERNAL_SMTP_PORT ?? '25', 10),
// DynamoDB tables
rulesTable: process.env.DYNAMODB_RULES_TABLE ?? 'email-rules',
messagesTable: process.env.DYNAMODB_MESSAGES_TABLE ?? 'ses-outbound-messages',
blockedTable: process.env.DYNAMODB_BLOCKED_TABLE ?? 'email-blocked-senders',
// Bounce handling
bounceLookupRetries: parseInt(process.env.BOUNCE_LOOKUP_RETRIES ?? '3', 10),
bounceLookupDelay: parseFloat(process.env.BOUNCE_LOOKUP_DELAY ?? '1.0'),
// Monitoring
metricsPort: parseInt(process.env.METRICS_PORT ?? '8000', 10),
healthPort: parseInt(process.env.HEALTH_PORT ?? '8080', 10),
} as const;
export type Config = typeof config;
// ---------------------------------------------------------------------------
// Managed domains (populated by loadDomains())
// ---------------------------------------------------------------------------
const managedDomains = new Set<string>();
/**
* Load domains from env var and/or file, populate the internal set.
*/
export function loadDomains(): string[] {
const domains: string[] = [];
// From env
if (config.domainsList) {
for (const d of config.domainsList.split(',')) {
const trimmed = d.trim();
if (trimmed) domains.push(trimmed);
}
}
// From file
if (existsSync(config.domainsFile)) {
const content = readFileSync(config.domainsFile, 'utf-8');
for (const line of content.split('\n')) {
const trimmed = line.trim();
if (trimmed && !trimmed.startsWith('#')) {
domains.push(trimmed);
}
}
}
// Deduplicate
const unique = [...new Set(domains)];
managedDomains.clear();
for (const d of unique) {
managedDomains.add(d.toLowerCase());
}
return unique;
}
// ---------------------------------------------------------------------------
// Domain helpers
// ---------------------------------------------------------------------------
/** Check whether an email address belongs to one of our managed domains */
export function isInternalAddress(email: string): boolean {
const atIdx = email.indexOf('@');
if (atIdx < 0) return false;
return managedDomains.has(email.slice(atIdx + 1).toLowerCase());
}
/** Convert domain to SQS queue name: bizmatch.net → bizmatch-net-queue */
export function domainToQueueName(domain: string): string {
return domain.replace(/\./g, '-') + '-queue';
}
/** Convert domain to S3 bucket name: bizmatch.net → bizmatch-net-emails */
export function domainToBucketName(domain: string): string {
return domain.replace(/\./g, '-') + '-emails';
}

View File

@@ -0,0 +1,62 @@
/**
* Sender blocklist checking with wildcard / glob support
*
* Uses picomatch for pattern matching (equivalent to Python's fnmatch).
* Patterns are stored per-recipient in DynamoDB.
*/
import picomatch from 'picomatch';
import type { DynamoDBHandler } from '../aws/dynamodb.js';
import { log } from '../logger.js';
/**
* Extract the bare email address from a From header value.
* "John Doe <john@example.com>" → "john@example.com"
*/
function extractAddress(sender: string): string {
const match = sender.match(/<([^>]+)>/);
const addr = match ? match[1] : sender;
return addr.trim().toLowerCase();
}
export class BlocklistChecker {
constructor(private dynamodb: DynamoDBHandler) {}
/**
* Batch-check whether a sender is blocked for each recipient.
* Uses a single batch DynamoDB call for efficiency.
*/
async batchCheckBlockedSenders(
recipients: string[],
senders: string[], // <-- Geändert zu Array
workerName: string,
): Promise<Record<string, boolean>> {
const patternsByRecipient = await this.dynamodb.batchGetBlockedPatterns(recipients);
// Alle übergebenen Adressen bereinigen
const sendersClean = senders.map(s => extractAddress(s)).filter(Boolean);
const result: Record<string, boolean> = {};
for (const recipient of recipients) {
const patterns = patternsByRecipient[recipient] ?? [];
let isBlocked = false;
for (const pattern of patterns) {
for (const senderClean of sendersClean) {
if (picomatch.isMatch(senderClean, pattern.toLowerCase())) {
log(
`⛔ BLOCKED: Sender ${senderClean} matches pattern '${pattern}' for inbox ${recipient}`,
'WARNING',
workerName,
);
isBlocked = true;
break;
}
}
if (isBlocked) break;
}
result[recipient] = isBlocked;
}
return result;
}
}

View File

@@ -0,0 +1,190 @@
/**
* Bounce detection and header rewriting
*
* When Amazon SES returns a bounce, the From header is
* mailer-daemon@amazonses.com. We look up the original sender
* in DynamoDB and rewrite the headers so the bounce appears
* to come from the actual bounced recipient.
*/
import type { ParsedMail } from 'mailparser';
import type { DynamoDBHandler } from '../aws/dynamodb.js';
import { isSesBounceNotification, getHeader } from './parser.js';
import { log } from '../logger.js';
export interface BounceResult {
/** Updated raw bytes (headers rewritten if bounce was detected) */
rawBytes: Buffer;
/** Whether bounce was detected and headers were modified */
modified: boolean;
/** Whether this email is a bounce notification at all */
isBounce: boolean;
/** The effective From address (rewritten or original) */
fromAddr: string;
}
export class BounceHandler {
constructor(private dynamodb: DynamoDBHandler) {}
/**
* Detect SES bounce, look up original sender in DynamoDB,
* and rewrite headers in the raw buffer.
*
* We operate on the raw Buffer because we need to preserve
* the original MIME structure exactly, only swapping specific
* header lines. mailparser's ParsedMail is read-only.
*/
async applyBounceLogic(
parsed: ParsedMail,
rawBytes: Buffer,
subject: string,
workerName = 'unified',
): Promise<BounceResult> {
if (!isSesBounceNotification(parsed)) {
return {
rawBytes,
modified: false,
isBounce: false,
fromAddr: parsed.from?.text ?? '',
};
}
log('🔍 Detected SES MAILER-DAEMON bounce notification', 'INFO', workerName);
// Extract Message-ID from the bounce notification header
const rawMessageId = getHeader(parsed, 'message-id')
.replace(/^</, '')
.replace(/>$/, '')
.split('@')[0];
if (!rawMessageId) {
log('⚠ Could not extract Message-ID from bounce notification', 'WARNING', workerName);
return {
rawBytes,
modified: false,
isBounce: true,
fromAddr: parsed.from?.text ?? '',
};
}
log(` Looking up Message-ID: ${rawMessageId}`, 'INFO', workerName);
const bounceInfo = await this.dynamodb.getBounceInfo(rawMessageId, workerName);
if (!bounceInfo) {
return {
rawBytes,
modified: false,
isBounce: true,
fromAddr: parsed.from?.text ?? '',
};
}
// Log bounce details
log(`✓ Found bounce info:`, 'INFO', workerName);
log(` Original sender: ${bounceInfo.original_source}`, 'INFO', workerName);
log(` Bounce type: ${bounceInfo.bounceType}/${bounceInfo.bounceSubType}`, 'INFO', workerName);
log(` Bounced recipients: ${bounceInfo.bouncedRecipients}`, 'INFO', workerName);
if (!bounceInfo.bouncedRecipients.length) {
log('⚠ No bounced recipients found in bounce info', 'WARNING', workerName);
return {
rawBytes,
modified: false,
isBounce: true,
fromAddr: parsed.from?.text ?? '',
};
}
const newFrom = bounceInfo.bouncedRecipients[0];
// Rewrite headers in raw bytes
let modifiedBytes = rawBytes;
const originalFrom = getHeader(parsed, 'from');
// Replace From header
modifiedBytes = replaceHeader(modifiedBytes, 'From', newFrom);
// Add diagnostic headers
modifiedBytes = addHeader(modifiedBytes, 'X-Original-SES-From', originalFrom);
modifiedBytes = addHeader(
modifiedBytes,
'X-Bounce-Type',
`${bounceInfo.bounceType}/${bounceInfo.bounceSubType}`,
);
// Add Reply-To if not present
if (!getHeader(parsed, 'reply-to')) {
modifiedBytes = addHeader(modifiedBytes, 'Reply-To', newFrom);
}
// Adjust subject for generic delivery status notifications
const subjectLower = subject.toLowerCase();
if (
subjectLower.includes('delivery status notification') ||
subjectLower.includes('thanks for your submission')
) {
modifiedBytes = replaceHeader(
modifiedBytes,
'Subject',
`Delivery Status: ${newFrom}`,
);
}
log(`✓ Rewritten FROM: ${newFrom}`, 'SUCCESS', workerName);
return {
rawBytes: modifiedBytes,
modified: true,
isBounce: true,
fromAddr: newFrom,
};
}
}
// ---------------------------------------------------------------------------
// Raw header manipulation helpers
// ---------------------------------------------------------------------------
/**
* Replace a header value in raw MIME bytes.
* Handles multi-line (folded) headers.
*/
function replaceHeader(raw: Buffer, name: string, newValue: string): Buffer {
const str = raw.toString('utf-8');
// Match header including potential folded continuation lines
const regex = new RegExp(
`^(${escapeRegex(name)}:\\s*).*?(\\r?\\n(?=[^ \\t])|\\r?\\n$)`,
'im',
);
// Also need to consume folded lines
const foldedRegex = new RegExp(
`^${escapeRegex(name)}:[ \\t]*[^\\r\\n]*(?:\\r?\\n[ \\t]+[^\\r\\n]*)*`,
'im',
);
const match = foldedRegex.exec(str);
if (!match) return raw;
const before = str.slice(0, match.index);
const after = str.slice(match.index + match[0].length);
const replaced = `${before}${name}: ${newValue}${after}`;
return Buffer.from(replaced, 'utf-8');
}
/**
* Add a new header line right before the header/body separator.
*/
function addHeader(raw: Buffer, name: string, value: string): Buffer {
const str = raw.toString('utf-8');
// Find the header/body boundary (first blank line)
const sep = str.match(/\r?\n\r?\n/);
if (!sep || sep.index === undefined) return raw;
const before = str.slice(0, sep.index);
const after = str.slice(sep.index);
return Buffer.from(`${before}\r\n${name}: ${value}${after}`, 'utf-8');
}
function escapeRegex(s: string): string {
return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}

View File

@@ -0,0 +1,120 @@
/**
* Email parsing utilities
*
* Wraps `mailparser` for parsing raw MIME bytes and provides
* header sanitization (e.g. Microsoft's malformed Message-IDs).
*/
import { simpleParser, type ParsedMail } from 'mailparser';
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
export interface BodyParts {
text: string;
html: string | null;
}
// ---------------------------------------------------------------------------
// Parser
// ---------------------------------------------------------------------------
/**
* Parse raw email bytes into a ParsedMail object.
* Applies pre-sanitization for known malformed headers before parsing.
*/
export async function parseEmail(raw: Buffer): Promise<ParsedMail> {
// Pre-sanitize: fix Microsoft's [uuid]@domain Message-IDs
const sanitized = sanitizeRawHeaders(raw);
return simpleParser(sanitized);
}
/**
* Extract text and HTML body parts from a parsed email.
*/
export function extractBodyParts(parsed: ParsedMail): BodyParts {
const text = parsed.text?.trim() || '(No body content)';
const html = parsed.html || null;
return { text, html };
}
/**
* Check if email was already processed by our worker (loop detection).
*/
export function isProcessedByWorker(parsed: ParsedMail): boolean {
const headers = parsed.headers;
const xWorker = headers.get('x-ses-worker-processed');
const autoSubmitted = headers.get('auto-submitted');
const isProcessedByUs = !!xWorker;
const isOurAutoReply = autoSubmitted === 'auto-replied' && !!xWorker;
return isProcessedByUs || isOurAutoReply;
}
/**
* Check if email is a SES MAILER-DAEMON bounce notification.
*/
export function isSesBounceNotification(parsed: ParsedMail): boolean {
const from = (parsed.from?.text ?? '').toLowerCase();
return from.includes('mailer-daemon@') && from.includes('amazonses.com');
}
/**
* Get a header value as string. Handles mailparser's headerlines Map.
*/
export function getHeader(parsed: ParsedMail, name: string): string {
const val = parsed.headers.get(name.toLowerCase());
if (val === undefined || val === null) return '';
if (typeof val === 'string') return val;
if (typeof val === 'object' && 'text' in val) return (val as any).text ?? '';
return String(val);
}
// ---------------------------------------------------------------------------
// Raw header sanitization
// ---------------------------------------------------------------------------
/**
* Fix known problematic patterns in raw MIME headers BEFORE parsing.
*
* Specifically targets Microsoft's `Message-ID: <[uuid]@domain>` which
* causes strict parsers to crash.
*/
function sanitizeRawHeaders(raw: Buffer): Buffer {
// We only need to check/fix the header section (before first blank line).
// For efficiency we work on the first ~8KB where headers live.
const headerEnd = findDoubleNewline(raw);
const headerLen = headerEnd === -1 ? Math.min(raw.length, 8192) : headerEnd;
const headerStr = raw.subarray(0, headerLen).toString('utf-8');
// Fix: Message-ID with square brackets <[...]@...>
if (headerStr.includes('[') || headerStr.includes(']')) {
const fixed = headerStr.replace(
/^(Message-ID:\s*<?)(\[.*?\])(@[^>]*>?\s*)$/im,
(_match, prefix, bracketed, suffix) =>
prefix + bracketed.replace(/\[/g, '').replace(/\]/g, '') + suffix,
);
if (fixed !== headerStr) {
return Buffer.concat([
Buffer.from(fixed, 'utf-8'),
raw.subarray(headerLen),
]);
}
}
return raw;
}
function findDoubleNewline(buf: Buffer): number {
// Look for \r\n\r\n or \n\n
for (let i = 0; i < buf.length - 3; i++) {
if (buf[i] === 0x0d && buf[i + 1] === 0x0a && buf[i + 2] === 0x0d && buf[i + 3] === 0x0a) {
return i;
}
if (buf[i] === 0x0a && buf[i + 1] === 0x0a) {
return i;
}
}
return -1;
}

View File

@@ -0,0 +1,306 @@
/**
* Email rules processing (Auto-Reply / OOO and Forwarding)
* * CLEANED UP & FIXED:
* - Uses MailComposer for ALL message generation (safer MIME handling)
* - Fixes broken attachment forwarding
* - Removed legacy SMTP forwarding
* - Removed manual string concatenation for MIME boundaries
*/
import { createTransport } from 'nodemailer';
import type { ParsedMail } from 'mailparser';
import type { SESHandler } from '../aws/ses.js';
import { extractBodyParts } from './parser.js';
import { log } from '../logger.js';
// Wir nutzen MailComposer direkt für das Erstellen der Raw Bytes
import MailComposer from 'nodemailer/lib/mail-composer/index.js';
import { DynamoDBHandler, EmailRule } from '../aws/dynamodb.js';
import { config, isInternalAddress } from '../config.js';
export type MetricsCallback = (action: 'autoreply' | 'forward', domain: string) => void;
export class RulesProcessor {
constructor(
private dynamodb: DynamoDBHandler,
private ses: SESHandler,
) {}
/**
* Process OOO and Forward rules for a single recipient.
*/
async processRulesForRecipient(
recipient: string,
parsed: ParsedMail,
rawBytes: Buffer,
domain: string,
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<boolean> {
const rule = await this.dynamodb.getEmailRules(recipient.toLowerCase());
if (!rule) return false;
const originalFrom = parsed.from?.text ?? '';
const senderAddr = extractSenderAddress(originalFrom);
// OOO / Auto-Reply
if (rule.ooo_active) {
await this.handleOoo(
recipient,
parsed,
senderAddr,
rule,
domain,
workerName,
metricsCallback,
);
}
// Forwarding
const forwards = rule.forwards ?? [];
if (forwards.length > 0) {
await this.handleForwards(
recipient,
parsed,
originalFrom,
forwards,
domain,
workerName,
metricsCallback,
);
}
return false; // never skip local delivery
}
// -----------------------------------------------------------------------
// OOO
// -----------------------------------------------------------------------
private async handleOoo(
recipient: string,
parsed: ParsedMail,
senderAddr: string,
rule: EmailRule,
domain: string,
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<void> {
// Don't reply to automatic messages
const autoSubmitted = parsed.headers.get('auto-submitted');
const precedence = String(parsed.headers.get('precedence') ?? '').toLowerCase();
if (autoSubmitted && autoSubmitted !== 'no') {
log(' ⏭ Skipping OOO for auto-submitted message', 'INFO', workerName);
return;
}
if (['bulk', 'junk', 'list'].includes(precedence)) {
log(` ⏭ Skipping OOO for ${precedence} message`, 'INFO', workerName);
return;
}
if (/noreply|no-reply|mailer-daemon/i.test(senderAddr)) {
log(' ⏭ Skipping OOO for noreply address', 'INFO', workerName);
return;
}
try {
const oooMsg = (rule.ooo_message as string) ?? 'I am out of office.';
const contentType = (rule.ooo_content_type as string) ?? 'text';
// FIX: Use MailComposer via await
const oooBuffer = await buildOooReply(parsed, recipient, oooMsg, contentType);
if (isInternalAddress(senderAddr)) {
const ok = await sendInternalEmail(recipient, senderAddr, oooBuffer, workerName);
if (ok) log(`✓ Sent OOO reply internally to ${senderAddr}`, 'SUCCESS', workerName);
else log(`⚠ Internal OOO reply failed to ${senderAddr}`, 'WARNING', workerName);
} else {
const ok = await this.ses.sendRawEmail(recipient, senderAddr, oooBuffer, workerName);
if (ok) log(`✓ Sent OOO reply externally to ${senderAddr} via SES`, 'SUCCESS', workerName);
}
metricsCallback?.('autoreply', domain);
} catch (err: any) {
log(`⚠ OOO reply failed to ${senderAddr}: ${err.message ?? err}`, 'ERROR', workerName);
}
}
// -----------------------------------------------------------------------
// Forwarding
// -----------------------------------------------------------------------
private async handleForwards(
recipient: string,
parsed: ParsedMail,
originalFrom: string,
forwards: string[],
domain: string,
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<void> {
for (const forwardTo of forwards) {
try {
// FIX: Correctly await the composer result
const fwdBuffer = await buildForwardMessage(parsed, recipient, forwardTo, originalFrom);
if (isInternalAddress(forwardTo)) {
const ok = await sendInternalEmail(recipient, forwardTo, fwdBuffer, workerName);
if (ok) log(`✓ Forwarded internally to ${forwardTo}`, 'SUCCESS', workerName);
else log(`⚠ Internal forward failed to ${forwardTo}`, 'WARNING', workerName);
} else {
const ok = await this.ses.sendRawEmail(recipient, forwardTo, fwdBuffer, workerName);
if (ok) log(`✓ Forwarded externally to ${forwardTo} via SES`, 'SUCCESS', workerName);
}
metricsCallback?.('forward', domain);
} catch (err: any) {
log(`⚠ Forward failed to ${forwardTo}: ${err.message ?? err}`, 'ERROR', workerName);
}
}
}
}
// ---------------------------------------------------------------------------
// Message building (Using Nodemailer MailComposer for Safety)
// ---------------------------------------------------------------------------
async function buildOooReply(
original: ParsedMail,
recipient: string,
oooMsg: string,
contentType: string,
): Promise<Buffer> {
const { text: textBody, html: htmlBody } = extractBodyParts(original);
const originalSubject = original.subject ?? '(no subject)';
const originalFrom = original.from?.text ?? 'unknown';
const originalMsgId = original.messageId ?? '';
const recipientDomain = recipient.split('@')[1];
// Text version
let textContent = `${oooMsg}\n\n--- Original Message ---\n`;
textContent += `From: ${originalFrom}\n`;
textContent += `Subject: ${originalSubject}\n\n`;
textContent += textBody;
// HTML version
let htmlContent = `<div>${oooMsg}</div><br><hr><br>`;
htmlContent += '<strong>Original Message</strong><br>';
htmlContent += `<strong>From:</strong> ${originalFrom}<br>`;
htmlContent += `<strong>Subject:</strong> ${originalSubject}<br><br>`;
htmlContent += htmlBody ? htmlBody : textBody.replace(/\n/g, '<br>');
const includeHtml = contentType === 'html' || !!htmlBody;
const composer = new MailComposer({
from: recipient,
to: originalFrom,
subject: `Out of Office: ${originalSubject}`,
inReplyTo: originalMsgId,
references: [originalMsgId], // Nodemailer wants array
text: textContent,
html: includeHtml ? htmlContent : undefined,
headers: {
'Auto-Submitted': 'auto-replied',
'X-SES-Worker-Processed': 'ooo-reply',
},
messageId: `<${Date.now()}.${Math.random().toString(36).slice(2)}@${recipientDomain}>`
});
return composer.compile().build();
}
async function buildForwardMessage(
original: ParsedMail,
recipient: string,
forwardTo: string,
originalFrom: string,
): Promise<Buffer> {
const { text: textBody, html: htmlBody } = extractBodyParts(original);
const originalSubject = original.subject ?? '(no subject)';
const originalDate = original.date?.toUTCString() ?? 'unknown';
// Text version
let fwdText = '---------- Forwarded message ---------\n';
fwdText += `From: ${originalFrom}\n`;
fwdText += `Date: ${originalDate}\n`;
fwdText += `Subject: ${originalSubject}\n`;
fwdText += `To: ${recipient}\n\n`;
fwdText += textBody;
// HTML version
let fwdHtml: string | undefined;
if (htmlBody) {
fwdHtml = "<div style='border-left:3px solid #ccc;padding-left:10px;'>";
fwdHtml += '<strong>---------- Forwarded message ---------</strong><br>';
fwdHtml += `<strong>From:</strong> ${originalFrom}<br>`;
fwdHtml += `<strong>Date:</strong> ${originalDate}<br>`;
fwdHtml += `<strong>Subject:</strong> ${originalSubject}<br>`;
fwdHtml += `<strong>To:</strong> ${recipient}<br><br>`;
fwdHtml += htmlBody;
fwdHtml += '</div>';
}
// Config object for MailComposer
const mailOptions: any = {
from: recipient,
to: forwardTo,
subject: `FWD: ${originalSubject}`,
replyTo: originalFrom,
text: fwdText,
html: fwdHtml,
headers: {
'X-SES-Worker-Processed': 'forwarded',
},
};
// Attachments
if (original.attachments && original.attachments.length > 0) {
mailOptions.attachments = original.attachments.map((att) => ({
filename: att.filename ?? 'attachment',
content: att.content,
contentType: att.contentType,
cid: att.cid ?? undefined,
contentDisposition: att.contentDisposition || 'attachment'
}));
}
const composer = new MailComposer(mailOptions);
return composer.compile().build();
}
// ---------------------------------------------------------------------------
// Internal SMTP delivery (port 25, bypasses transport_maps)
// ---------------------------------------------------------------------------
async function sendInternalEmail(
from: string,
to: string,
rawMessage: Buffer,
workerName: string,
): Promise<boolean> {
try {
const transport = createTransport({
host: config.smtpHost,
port: config.internalSmtpPort,
secure: false,
tls: { rejectUnauthorized: false },
});
await transport.sendMail({
envelope: { from, to: [to] },
raw: rawMessage,
});
transport.close();
return true;
} catch (err: any) {
log(` ✗ Internal delivery failed to ${to}: ${err.message ?? err}`, 'ERROR', workerName);
return false;
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function extractSenderAddress(fromHeader: string): string {
const match = fromHeader.match(/<([^>]+)>/);
return match ? match[1] : fromHeader;
}

View File

@@ -0,0 +1,48 @@
/**
* Health check HTTP server
*
* Provides a simple /health endpoint for Docker healthcheck
* and monitoring. Returns domain list and feature flags.
*/
import { createServer, type Server } from 'node:http';
import { log } from './logger.js';
export function startHealthServer(
port: number,
domains: string[],
getStats?: () => any,
): Server {
const server = createServer((_req, res) => {
const stats = getStats?.() ?? {};
const payload = {
status: 'healthy',
worker: 'unified-email-worker-ts',
version: '2.0.0',
domains,
domainCount: domains.length,
features: {
bounce_handling: true,
ooo_replies: true,
forwarding: true,
blocklist: true,
prometheus_metrics: true,
lmtp: false,
legacy_smtp_forward: false,
},
stats,
uptime: process.uptime(),
timestamp: new Date().toISOString(),
};
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(payload, null, 2));
});
server.listen(port, () => {
log(`Health check on port ${port}`);
});
return server;
}

View File

@@ -0,0 +1,166 @@
/**
* Structured logging for email worker with daily rotation AND retention
*
* Uses pino for high-performance JSON logging.
* Includes logic to delete logs older than X days.
*/
import pino from 'pino';
import {
existsSync,
mkdirSync,
createWriteStream,
type WriteStream,
readdirSync,
statSync,
unlinkSync
} from 'node:fs';
import { join } from 'node:path';
// ---------------------------------------------------------------------------
// Configuration
// ---------------------------------------------------------------------------
const LOG_DIR = '/var/log/email-worker';
const LOG_FILE_PREFIX = 'worker';
const RETENTION_DAYS = 14; // Logs älter als 14 Tage löschen
// ---------------------------------------------------------------------------
// File stream & Retention Logic
// ---------------------------------------------------------------------------
let fileStream: WriteStream | null = null;
let currentDateStr = '';
function getDateStr(): string {
return new Date().toISOString().slice(0, 10); // YYYY-MM-DD
}
/**
* Löscht alte Log-Dateien basierend auf RETENTION_DAYS
*/
function cleanUpOldLogs(): void {
try {
if (!existsSync(LOG_DIR)) return;
const files = readdirSync(LOG_DIR);
const now = Date.now();
const maxAgeMs = RETENTION_DAYS * 24 * 60 * 60 * 1000;
for (const file of files) {
// Prüfen ob es eine unserer Log-Dateien ist
if (!file.startsWith(LOG_FILE_PREFIX) || !file.endsWith('.log')) continue;
const filePath = join(LOG_DIR, file);
try {
const stats = statSync(filePath);
const ageMs = now - stats.mtimeMs;
if (ageMs > maxAgeMs) {
unlinkSync(filePath);
// Einmalig auf stdout loggen, damit man sieht, dass aufgeräumt wurde
process.stdout.write(`[INFO] Deleted old log file: ${file}\n`);
}
} catch (err) {
// Ignorieren, falls Datei gerade gelöscht wurde oder Zugriff verweigert
}
}
} catch (err) {
process.stderr.write(`[WARN] Failed to clean up old logs: ${err}\n`);
}
}
function ensureFileStream(): WriteStream | null {
const today = getDateStr();
// Wenn wir bereits einen Stream für heute haben, zurückgeben
if (fileStream && currentDateStr === today) return fileStream;
try {
if (!existsSync(LOG_DIR)) mkdirSync(LOG_DIR, { recursive: true });
// Wenn sich das Datum geändert hat (oder beim ersten Start): Aufräumen
if (currentDateStr !== today) {
cleanUpOldLogs();
}
// Alten Stream schließen, falls vorhanden
if (fileStream) {
fileStream.end();
}
const filePath = join(LOG_DIR, `${LOG_FILE_PREFIX}.${today}.log`);
fileStream = createWriteStream(filePath, { flags: 'a' });
currentDateStr = today;
return fileStream;
} catch {
// Silently continue without file logging (e.g. permission issue)
return null;
}
}
// ---------------------------------------------------------------------------
// Pino logger
// ---------------------------------------------------------------------------
const logger = pino({
level: 'info',
transport: {
targets: [
{
// 1. Schicke bunte Logs in die Konsole (für docker compose logs -f)
target: 'pino-pretty',
options: {
colorize: true,
translateTime: 'SYS:yyyy-mm-dd HH:MM:ss',
ignore: 'pid,hostname',
singleLine: true
}
},
{
// 2. Schreibe gleichzeitig alles unformatiert in die Datei
target: 'pino/file',
options: {
destination: '/var/log/email-worker/worker.log',
mkdir: true
}
}
]
}
});
// ---------------------------------------------------------------------------
// Log level mapping
// ---------------------------------------------------------------------------
type LogLevel = 'DEBUG' | 'INFO' | 'WARNING' | 'ERROR' | 'CRITICAL' | 'SUCCESS';
const LEVEL_MAP: Record<LogLevel, keyof pino.Logger> = {
DEBUG: 'debug',
INFO: 'info',
WARNING: 'warn',
ERROR: 'error',
CRITICAL: 'fatal',
SUCCESS: 'info',
};
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
export function log(
message: string,
level: LogLevel = 'INFO',
workerName = 'unified-worker',
): void {
const prefix = level === 'SUCCESS' ? '[SUCCESS] ' : '';
const formatted = `[${workerName}] ${prefix}${message}`;
// Pino (stdout/json)
const method = LEVEL_MAP[level] ?? 'info';
(logger as any)[method](formatted);
// File (plain text)
const stream = ensureFileStream();
if (stream) {
const ts = new Date().toISOString().replace('T', ' ').slice(0, 19);
const line = `[${ts}] [${level}] [${workerName}] ${prefix}${message}\n`;
stream.write(line);
}
}

View File

@@ -0,0 +1,89 @@
/**
* Main entry point for unified email worker
*
* Startup sequence:
* 1. Load configuration and domains
* 2. Start Prometheus metrics server
* 3. Start health check server
* 4. Initialize UnifiedWorker
* 5. Register signal handlers for graceful shutdown
*/
import { config, loadDomains } from './config.js';
import { log } from './logger.js';
import { startMetricsServer, type MetricsCollector } from './metrics.js';
import { startHealthServer } from './health.js';
import { UnifiedWorker } from './worker/unified-worker.js';
// ---------------------------------------------------------------------------
// Banner
// ---------------------------------------------------------------------------
function printBanner(domains: string[]): void {
log('╔══════════════════════════════════════════════════╗');
log('║ Unified Email Worker (TypeScript) ║');
log('║ Version 2.0.0 ║');
log('╚══════════════════════════════════════════════════╝');
log('');
log(`Domains (${domains.length}):`);
for (const d of domains) {
log(`${d}`);
}
log('');
log(`SMTP: ${config.smtpHost}:${config.smtpPort}`);
log(`Internal SMTP: port ${config.internalSmtpPort}`);
log(`Poll interval: ${config.pollInterval}s`);
log(`Metrics: port ${config.metricsPort}`);
log(`Health: port ${config.healthPort}`);
log('');
}
// ---------------------------------------------------------------------------
// Main
// ---------------------------------------------------------------------------
async function main(): Promise<void> {
// 1. Load domains
const domains = loadDomains();
if (domains.length === 0) {
log('❌ No domains configured. Set DOMAINS env var or provide DOMAINS_FILE.', 'ERROR');
process.exit(1);
}
printBanner(domains);
// 2. Metrics server
const metrics: MetricsCollector | null = await startMetricsServer(config.metricsPort);
// 3. Unified worker
const worker = new UnifiedWorker(domains, metrics);
// 4. Health server
startHealthServer(config.healthPort, domains, () => worker.getStats());
// 5. Signal handlers
let shuttingDown = false;
const shutdown = async (signal: string) => {
if (shuttingDown) return;
shuttingDown = true;
log(`\n🛑 Received ${signal}. Shutting down gracefully...`);
await worker.stop();
log('👋 Goodbye.');
process.exit(0);
};
process.on('SIGINT', () => shutdown('SIGINT'));
process.on('SIGTERM', () => shutdown('SIGTERM'));
// 6. Start
await worker.start();
// Keep alive (event loop stays open due to HTTP servers + SQS polling)
log('✅ Worker is running. Press Ctrl+C to stop.');
}
// ---------------------------------------------------------------------------
main().catch((err) => {
log(`💥 Fatal startup error: ${err.message ?? err}`, 'CRITICAL');
log(err.stack ?? '', 'CRITICAL');
process.exit(1);
});

View File

@@ -0,0 +1,155 @@
/**
* Prometheus metrics collection
*
* Uses prom-client. Falls back gracefully if not available.
*/
import { log } from './logger.js';
import type * as PromClientTypes from 'prom-client';
// prom-client is optional — import dynamically
let promClient: typeof PromClientTypes | null = null;
try {
promClient = require('prom-client') as typeof PromClientTypes;
} catch {
// not installed
}
// ---------------------------------------------------------------------------
// Metric instances (created lazily if prom-client is available)
// ---------------------------------------------------------------------------
let emailsProcessed: any;
let emailsInFlight: any;
let processingTime: any;
let queueSize: any;
let bouncesProcessed: any;
let autorepliesSent: any;
let forwardsSent: any;
let blockedSenders: any;
function initMetrics(): void {
if (!promClient) return;
const { Counter, Gauge, Histogram } = promClient;
emailsProcessed = new Counter({
name: 'emails_processed_total',
help: 'Total emails processed',
labelNames: ['domain', 'status'],
});
emailsInFlight = new Gauge({
name: 'emails_in_flight',
help: 'Emails currently being processed',
});
processingTime = new Histogram({
name: 'email_processing_seconds',
help: 'Time to process email',
labelNames: ['domain'],
});
queueSize = new Gauge({
name: 'queue_messages_available',
help: 'Messages in queue',
labelNames: ['domain'],
});
bouncesProcessed = new Counter({
name: 'bounces_processed_total',
help: 'Bounce notifications processed',
labelNames: ['domain', 'type'],
});
autorepliesSent = new Counter({
name: 'autoreplies_sent_total',
help: 'Auto-replies sent',
labelNames: ['domain'],
});
forwardsSent = new Counter({
name: 'forwards_sent_total',
help: 'Forwards sent',
labelNames: ['domain'],
});
blockedSenders = new Counter({
name: 'blocked_senders_total',
help: 'Emails blocked by blacklist',
labelNames: ['domain'],
});
}
// ---------------------------------------------------------------------------
// MetricsCollector
// ---------------------------------------------------------------------------
export class MetricsCollector {
public readonly enabled: boolean;
constructor() {
this.enabled = !!promClient;
if (this.enabled) initMetrics();
}
incrementProcessed(domain: string, status: string): void {
emailsProcessed?.labels(domain, status).inc();
}
incrementInFlight(): void {
emailsInFlight?.inc();
}
decrementInFlight(): void {
emailsInFlight?.dec();
}
observeProcessingTime(domain: string, seconds: number): void {
processingTime?.labels(domain).observe(seconds);
}
setQueueSize(domain: string, size: number): void {
queueSize?.labels(domain).set(size);
}
incrementBounce(domain: string, bounceType: string): void {
bouncesProcessed?.labels(domain, bounceType).inc();
}
incrementAutoreply(domain: string): void {
autorepliesSent?.labels(domain).inc();
}
incrementForward(domain: string): void {
forwardsSent?.labels(domain).inc();
}
incrementBlocked(domain: string): void {
blockedSenders?.labels(domain).inc();
}
}
// ---------------------------------------------------------------------------
// Start metrics HTTP server
// ---------------------------------------------------------------------------
export async function startMetricsServer(port: number): Promise<MetricsCollector | null> {
if (!promClient) {
log('⚠ Prometheus client not installed, metrics disabled', 'WARNING');
return null;
}
try {
const { createServer } = await import('node:http');
const { register } = promClient;
const server = createServer(async (_req, res) => {
try {
res.setHeader('Content-Type', register.contentType);
res.end(await register.metrics());
} catch {
res.statusCode = 500;
res.end();
}
});
server.listen(port, () => {
log(`Prometheus metrics on port ${port}`);
});
return new MetricsCollector();
} catch (err: any) {
log(`Failed to start metrics server: ${err.message ?? err}`, 'ERROR');
return null;
}
}

View File

@@ -0,0 +1,155 @@
/**
* SMTP / email delivery with nodemailer pooled transport
*
* Replaces both Python's SMTPPool and EmailDelivery classes.
* nodemailer handles connection pooling, keepalive, and reconnection natively.
*
* Removed: LMTP delivery path (never used in production).
*/
import { createTransport, type Transporter } from 'nodemailer';
import { log } from '../logger.js';
import { config } from '../config.js';
// ---------------------------------------------------------------------------
// Permanent error detection
// ---------------------------------------------------------------------------
const PERMANENT_INDICATORS = [
'550', '551', '553',
'mailbox not found', 'user unknown', 'no such user',
'recipient rejected', 'does not exist', 'invalid recipient',
'unknown user',
];
function isPermanentRecipientError(errorMsg: string): boolean {
const lower = errorMsg.toLowerCase();
return PERMANENT_INDICATORS.some((ind) => lower.includes(ind));
}
// ---------------------------------------------------------------------------
// Delivery class
// ---------------------------------------------------------------------------
export class EmailDelivery {
private transport: Transporter;
constructor() {
this.transport = createTransport({
host: config.smtpHost,
port: config.smtpPort,
secure: config.smtpUseTls,
pool: true,
maxConnections: config.smtpPoolSize,
maxMessages: Infinity, // reuse connections indefinitely
tls: { rejectUnauthorized: false },
...(config.smtpUser && config.smtpPass
? { auth: { user: config.smtpUser, pass: config.smtpPass } }
: {}),
});
log(
`📡 SMTP pool initialized → ${config.smtpHost}:${config.smtpPort} ` +
`(max ${config.smtpPoolSize} connections)`,
);
}
/**
* Send raw email to ONE recipient via the local DMS.
*
* Returns: [success, errorMessage?, isPermanent]
*/
async sendToRecipient(
fromAddr: string,
recipient: string,
rawMessage: Buffer,
workerName: string,
maxRetries = 2,
): Promise<[boolean, string | null, boolean]> {
let lastError: string | null = null;
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
await this.transport.sendMail({
envelope: { from: fromAddr, to: [recipient] },
raw: rawMessage,
});
log(`${recipient}: Delivered (SMTP)`, 'SUCCESS', workerName);
return [true, null, false];
} catch (err: any) {
const errorMsg = err.message ?? String(err);
const responseCode = err.responseCode ?? 0;
// Check for permanent errors (5xx)
if (
responseCode >= 550 ||
isPermanentRecipientError(errorMsg)
) {
log(
`${recipient}: ${errorMsg} (permanent)`,
'ERROR',
workerName,
);
return [false, errorMsg, true];
}
// Connection-level errors → retry
if (
err.code === 'ECONNRESET' ||
err.code === 'ECONNREFUSED' ||
err.code === 'ETIMEDOUT' ||
errorMsg.toLowerCase().includes('disconnect') ||
errorMsg.toLowerCase().includes('closed') ||
errorMsg.toLowerCase().includes('connection')
) {
log(
`${recipient}: Connection error, retrying... ` +
`(attempt ${attempt + 1}/${maxRetries + 1})`,
'WARNING',
workerName,
);
lastError = errorMsg;
await sleep(300);
continue;
}
// Other SMTP errors
const isPerm = isPermanentRecipientError(errorMsg);
log(
`${recipient}: ${errorMsg} (${isPerm ? 'permanent' : 'temporary'})`,
'ERROR',
workerName,
);
return [false, errorMsg, isPerm];
}
}
// All retries exhausted
log(
`${recipient}: All retries failed - ${lastError}`,
'ERROR',
workerName,
);
return [false, lastError ?? 'Connection failed after retries', false];
}
/** Verify the transport is reachable (used during startup). */
async verify(): Promise<boolean> {
try {
await this.transport.verify();
return true;
} catch {
return false;
}
}
/** Close all pooled connections. */
close(): void {
this.transport.close();
}
}
// ---------------------------------------------------------------------------
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}

View File

@@ -0,0 +1,151 @@
/**
* Domain queue poller
*
* One poller per domain. Runs an async loop that long-polls SQS
* and dispatches messages to the MessageProcessor.
*
* Replaces Python's threading.Thread + threading.Event with
* a simple async loop + AbortController for cancellation.
*/
import type { SQSHandler } from '../aws/sqs.js';
import type { MetricsCollector } from '../metrics.js';
import { log } from '../logger.js';
import { MessageProcessor } from './message-processor.js';
export interface DomainPollerStats {
domain: string;
processed: number;
errors: number;
lastActivity: Date | null;
running: boolean;
}
export class DomainPoller {
public stats: DomainPollerStats;
private abort: AbortController;
private loopPromise: Promise<void> | null = null;
constructor(
private domain: string,
private queueUrl: string,
private sqs: SQSHandler,
private processor: MessageProcessor,
private metrics: MetricsCollector | null,
) {
this.abort = new AbortController();
this.stats = {
domain,
processed: 0,
errors: 0,
lastActivity: null,
running: false,
};
}
/** Start the polling loop. Returns immediately. */
start(): void {
if (this.stats.running) return;
this.stats.running = true;
log(`▶ Started poller for ${this.domain}`, 'INFO', `poller-${this.domain}`);
this.loopPromise = this.pollLoop();
}
/** Signal the poller to stop and wait for it to finish. */
async stop(): Promise<void> {
if (!this.stats.running) return;
this.abort.abort();
if (this.loopPromise) {
await this.loopPromise;
}
this.stats.running = false;
log(`⏹ Stopped poller for ${this.domain}`, 'INFO', `poller-${this.domain}`);
}
// -----------------------------------------------------------------------
// Poll loop
// -----------------------------------------------------------------------
private async pollLoop(): Promise<void> {
const workerName = `poller-${this.domain}`;
while (!this.abort.signal.aborted) {
try {
// Report queue size
const qSize = await this.sqs.getQueueSize(this.queueUrl);
this.metrics?.setQueueSize(this.domain, qSize);
if (qSize > 0) {
log(`📊 Queue ${this.domain}: ~${qSize} messages`, 'INFO', workerName);
}
// Long-poll
const messages = await this.sqs.receiveMessages(this.queueUrl);
if (this.abort.signal.aborted) break;
if (messages.length === 0) continue;
log(
`📬 Received ${messages.length} message(s) for ${this.domain}`,
'INFO',
workerName,
);
// Process each message
for (const msg of messages) {
if (this.abort.signal.aborted) break;
const receiveCount = parseInt(
msg.Attributes?.ApproximateReceiveCount ?? '1',
10,
);
this.metrics?.incrementInFlight();
const start = Date.now();
try {
const shouldDelete = await this.processor.processMessage(
this.domain,
msg,
receiveCount,
);
if (shouldDelete && msg.ReceiptHandle) {
await this.sqs.deleteMessage(this.queueUrl, msg.ReceiptHandle);
}
this.stats.processed++;
this.stats.lastActivity = new Date();
const elapsed = ((Date.now() - start) / 1000).toFixed(2);
this.metrics?.observeProcessingTime(this.domain, parseFloat(elapsed));
} catch (err: any) {
this.stats.errors++;
log(
`❌ Error processing message: ${err.message ?? err}`,
'ERROR',
workerName,
);
} finally {
this.metrics?.decrementInFlight();
}
}
} catch (err: any) {
if (this.abort.signal.aborted) break;
this.stats.errors++;
log(
`❌ Polling error for ${this.domain}: ${err.message ?? err}`,
'ERROR',
workerName,
);
// Back off on repeated errors
await sleep(5000);
}
}
}
}
// ---------------------------------------------------------------------------
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}

View File

@@ -0,0 +1,355 @@
/**
* Email message processing worker
*
* Processes a single SQS message:
* 1. Unpack SNS/SES envelope
* 2. Download raw email from S3
* 3. Loop detection
* 4. Parse & sanitize headers
* 5. Bounce detection & header rewrite
* 6. Blocklist check
* 7. Process recipients (rules, SMTP delivery)
* 8. Mark result in S3 metadata
*/
import type { Message } from '@aws-sdk/client-sqs';
import type { S3Handler } from '../aws/s3.js';
import type { SQSHandler } from '../aws/sqs.js';
import type { SESHandler } from '../aws/ses.js';
import type { DynamoDBHandler } from '../aws/dynamodb.js';
import type { EmailDelivery } from '../smtp/delivery.js';
import type { MetricsCollector } from '../metrics.js';
import type { ParsedMail } from 'mailparser';
import { log } from '../logger.js';
import { BlocklistChecker } from '../email/blocklist.js';
import { BounceHandler } from '../email/bounce-handler.js';
import { parseEmail, isProcessedByWorker } from '../email/parser.js';
import { RulesProcessor } from '../email/rules-processor.js';
// ---------------------------------------------------------------------------
// Processor
// ---------------------------------------------------------------------------
export class MessageProcessor {
private bounceHandler: BounceHandler;
private rulesProcessor: RulesProcessor;
private blocklist: BlocklistChecker;
public metrics: MetricsCollector | null = null;
constructor(
private s3: S3Handler,
private sqs: SQSHandler,
private ses: SESHandler,
private dynamodb: DynamoDBHandler,
private delivery: EmailDelivery,
) {
this.bounceHandler = new BounceHandler(dynamodb);
this.rulesProcessor = new RulesProcessor(dynamodb, ses);
this.blocklist = new BlocklistChecker(dynamodb);
}
/**
* Process one email message from queue.
* Returns true → delete from queue.
* Returns false → leave in queue for retry.
*/
async processMessage(
domain: string,
message: Message,
receiveCount: number,
): Promise<boolean> {
const workerName = `worker-${domain}`;
try {
// 1. UNPACK (SNS → SES)
const body = JSON.parse(message.Body ?? '{}');
let sesMsg: any;
if (body.Message && body.Type) {
// SNS Notification wrapper
const snsContent = body.Message;
sesMsg = typeof snsContent === 'string' ? JSON.parse(snsContent) : snsContent;
} else {
sesMsg = body;
}
// 2. EXTRACT DATA
const mail = sesMsg.mail ?? {};
const receipt = sesMsg.receipt ?? {};
const messageId: string | undefined = mail.messageId;
// Ignore SES setup notifications
if (messageId === 'AMAZON_SES_SETUP_NOTIFICATION') {
log(' Received Amazon SES Setup Notification. Ignoring.', 'INFO', workerName);
return true;
}
const fromAddr: string = mail.source ?? '';
const recipients: string[] = receipt.recipients ?? [];
if (!messageId) {
log('❌ Error: No messageId in event payload', 'ERROR', workerName);
return true;
}
// Domain validation
if (recipients.length === 0) {
log('⚠ Warning: No recipients in event', 'WARNING', workerName);
return true;
}
const recipientDomain = recipients[0].split('@')[1];
if (recipientDomain.toLowerCase() !== domain.toLowerCase()) {
log(
`⚠ Security: Ignored message for ${recipientDomain} ` +
`(I am worker for ${domain})`,
'WARNING',
workerName,
);
return true;
}
// Compact log
const recipientsStr =
recipients.length === 1
? recipients[0]
: `${recipients.length} recipients`;
log(
`📧 Processing: ${messageId.slice(0, 20)}... -> ${recipientsStr}`,
'INFO',
workerName,
);
// 3. DOWNLOAD FROM S3
const rawBytes = await this.s3.getEmail(domain, messageId, receiveCount);
if (rawBytes === null) return false; // retry later
// 4. LOOP DETECTION
const tempParsed = await parseEmail(rawBytes);
const skipRules = isProcessedByWorker(tempParsed);
if (skipRules) {
log('🔄 Loop prevention: Already processed by worker', 'INFO', workerName);
}
// 5. PARSING & BOUNCE LOGIC
let finalRawBytes = rawBytes;
let fromAddrFinal = fromAddr;
let isBounce = false;
let parsedFinal: ParsedMail | null = null; // <-- Hier deklarieren
try {
const parsed = await parseEmail(rawBytes);
const subject = parsed.subject ?? '(no subject)';
// Bounce header rewriting
const bounceResult = await this.bounceHandler.applyBounceLogic(
parsed,
rawBytes,
subject,
workerName,
);
isBounce = bounceResult.isBounce;
finalRawBytes = bounceResult.rawBytes;
if (bounceResult.modified) {
log(' ✨ Bounce detected & headers rewritten via DynamoDB', 'INFO', workerName);
fromAddrFinal = bounceResult.fromAddr;
this.metrics?.incrementBounce(domain, 'rewritten');
} else {
fromAddrFinal = fromAddr;
}
// Add processing marker for non-processed emails
if (!skipRules) {
finalRawBytes = addProcessedHeader(finalRawBytes);
}
// Re-parse after modifications for rules processing
parsedFinal = await parseEmail(finalRawBytes);
} catch (err: any) {
log(
`⚠ Parsing/Logic Error: ${err.message ?? err}. Sending original RAW mail without rules.`,
'WARNING',
workerName,
);
log(`Full error: ${err.stack ?? err}`, 'ERROR', workerName);
fromAddrFinal = fromAddr;
isBounce = false;
parsedFinal = null; // <-- GANZ WICHTIG: Kein erneuter Parse-Versuch!
}
// 6. BLOCKLIST CHECK
const sendersToCheck: string[] = [];
if (fromAddrFinal) sendersToCheck.push(fromAddrFinal);
const headerFrom = parsedFinal?.from?.text;
if (headerFrom && !sendersToCheck.includes(headerFrom)) {
sendersToCheck.push(headerFrom);
}
const blockedByRecipient = await this.blocklist.batchCheckBlockedSenders(
recipients,
sendersToCheck, // <-- Array übergeben
workerName,
);
// 7. PROCESS RECIPIENTS
log(`📤 Sending to ${recipients.length} recipient(s)...`, 'INFO', workerName);
const successful: string[] = [];
const failedPermanent: string[] = [];
const failedTemporary: string[] = [];
const blockedRecipients: string[] = [];
for (const recipient of recipients) {
// Blocked?
if (blockedByRecipient[recipient]) {
log(
`🗑 Silently dropping message for ${recipient} (Sender blocked)`,
'INFO',
workerName,
);
blockedRecipients.push(recipient);
this.metrics?.incrementBlocked(domain);
continue;
}
// Process rules (OOO, Forwarding) — not for bounces or already forwarded
if (!isBounce && !skipRules && parsedFinal !== null) {
const metricsCallback = (action: 'autoreply' | 'forward', dom: string) => {
if (action === 'autoreply') this.metrics?.incrementAutoreply(dom);
else if (action === 'forward') this.metrics?.incrementForward(dom);
};
await this.rulesProcessor.processRulesForRecipient(
recipient,
parsedFinal,
finalRawBytes,
domain,
workerName,
metricsCallback,
);
}
// SMTP delivery
const [success, error, isPerm] = await this.delivery.sendToRecipient(
fromAddrFinal,
recipient,
finalRawBytes,
workerName,
);
if (success) {
successful.push(recipient);
this.metrics?.incrementProcessed(domain, 'success');
} else if (isPerm) {
failedPermanent.push(recipient);
this.metrics?.incrementProcessed(domain, 'permanent_failure');
} else {
failedTemporary.push(recipient);
this.metrics?.incrementProcessed(domain, 'temporary_failure');
}
}
// 8. RESULT & CLEANUP
const totalHandled =
successful.length + failedPermanent.length + blockedRecipients.length;
if (totalHandled === recipients.length) {
if (blockedRecipients.length === recipients.length) {
// All blocked
try {
await this.s3.markAsBlocked(
domain,
messageId,
blockedRecipients,
fromAddrFinal,
workerName,
);
await this.s3.deleteBlockedEmail(domain, messageId, workerName);
} catch (err: any) {
log(`⚠ Failed to handle blocked email: ${err.message ?? err}`, 'ERROR', workerName);
return false;
}
} else if (successful.length > 0) {
await this.s3.markAsProcessed(
domain,
messageId,
workerName,
failedPermanent.length > 0 ? failedPermanent : undefined,
);
} else if (failedPermanent.length > 0) {
await this.s3.markAsAllInvalid(
domain,
messageId,
failedPermanent,
workerName,
);
}
// Summary
const parts: string[] = [];
if (successful.length) parts.push(`${successful.length} OK`);
if (failedPermanent.length) parts.push(`${failedPermanent.length} invalid`);
if (blockedRecipients.length) parts.push(`${blockedRecipients.length} blocked`);
log(`✅ Completed (${parts.join(', ')})`, 'SUCCESS', workerName);
return true;
} else {
// Temporary failures remain
log(
`🔄 Temp failure (${failedTemporary.length} failed), will retry`,
'WARNING',
workerName,
);
return false;
}
} catch (err: any) {
log(`❌ CRITICAL WORKER ERROR: ${err.message ?? err}`, 'ERROR', workerName);
log(err.stack ?? '', 'ERROR', workerName);
return false;
}
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/**
* Add X-SES-Worker-Processed header to raw email bytes using Buffer manipulation.
* More robust and memory efficient than toString().
*/
function addProcessedHeader(raw: Buffer): Buffer {
// Wir suchen nach dem Ende der Header: Double Newline (\r\n\r\n oder \n\n)
let headerEndIndex = -1;
// Effiziente Suche im Buffer
for (let i = 0; i < raw.length - 3; i++) {
// Check für \r\n\r\n
if (raw[i] === 0x0d && raw[i+1] === 0x0a && raw[i+2] === 0x0d && raw[i+3] === 0x0a) {
headerEndIndex = i;
break;
}
// Check für \n\n (Unix style, seltener bei E-Mail aber möglich)
if (raw[i] === 0x0a && raw[i+1] === 0x0a) {
headerEndIndex = i;
break;
}
}
// Falls keine Header-Trennung gefunden wurde (kaputte Mail?), hängen wir es einfach vorne an
if (headerEndIndex === -1) {
const headerLine = Buffer.from('X-SES-Worker-Processed: delivered\r\n', 'utf-8');
return Buffer.concat([headerLine, raw]);
}
// Wir fügen den Header VOR der leeren Zeile ein
const before = raw.subarray(0, headerEndIndex);
const after = raw.subarray(headerEndIndex);
const newHeader = Buffer.from('\r\nX-SES-Worker-Processed: delivered', 'utf-8');
return Buffer.concat([before, newHeader, after]);
}

View File

@@ -0,0 +1,134 @@
/**
* Unified multi-domain worker coordinator
*
* Manages the lifecycle of all DomainPoller instances:
* - Resolves SQS queue URLs for each domain
* - Creates pollers for valid domains
* - Provides aggregate stats
* - Graceful shutdown
*/
import { DynamoDBHandler } from '../aws/dynamodb';
import { S3Handler} from '../aws/s3.js';
import { SQSHandler} from '../aws/sqs.js'
import { SESHandler } from '../aws/ses';
import { EmailDelivery } from '../smtp/delivery.js';
import { MessageProcessor } from './message-processor.js';
import { DomainPoller, type DomainPollerStats } from './domain-poller.js';
import type { MetricsCollector } from '../metrics.js';
import { log } from '../logger.js';
export class UnifiedWorker {
private pollers: DomainPoller[] = [];
private processor: MessageProcessor;
private sqs: SQSHandler;
private statusInterval: NodeJS.Timeout | null = null;
constructor(
private domains: string[],
private metrics: MetricsCollector | null,
) {
const s3 = new S3Handler();
this.sqs = new SQSHandler();
const ses = new SESHandler();
const dynamodb = new DynamoDBHandler();
const delivery = new EmailDelivery();
this.processor = new MessageProcessor(s3, this.sqs, ses, dynamodb, delivery);
this.processor.metrics = metrics;
dynamodb.verifyTables().catch(() => {});
}
async start(): Promise<void> {
log(`🚀 Starting unified worker for ${this.domains.length} domain(s)...`);
const resolvedPollers: DomainPoller[] = [];
for (const domain of this.domains) {
const queueUrl = await this.sqs.getQueueUrl(domain);
if (!queueUrl) {
log(`⚠ Skipping ${domain}: No SQS queue found`, 'WARNING');
continue;
}
const poller = new DomainPoller(
domain,
queueUrl,
this.sqs,
this.processor,
this.metrics,
);
resolvedPollers.push(poller);
}
if (resolvedPollers.length === 0) {
log('❌ No valid domains with SQS queues found. Exiting.', 'ERROR');
process.exit(1);
}
this.pollers = resolvedPollers;
for (const poller of this.pollers) {
poller.start();
}
log(
`✅ All ${this.pollers.length} domain poller(s) running: ` +
this.pollers.map((p) => p.stats.domain).join(', '),
'SUCCESS',
);
// Starte den 5-Minuten-Status-Report
this.statusInterval = setInterval(() => {
this.printStatus();
}, 5 * 60 * 1000);
}
async stop(): Promise<void> {
log('🛑 Stopping all domain pollers...');
if (this.statusInterval) clearInterval(this.statusInterval); // <-- Neue Zeile
await Promise.all(this.pollers.map((p) => p.stop()));
log('✅ All pollers stopped.');
}
getStats(): {
totalProcessed: number;
totalErrors: number;
domains: DomainPollerStats[];
} {
let totalProcessed = 0;
let totalErrors = 0;
const domains: DomainPollerStats[] = [];
for (const p of this.pollers) {
totalProcessed += p.stats.processed;
totalErrors += p.stats.errors;
domains.push({ ...p.stats });
}
return { totalProcessed, totalErrors, domains };
}
private printStatus(): void {
const stats = this.getStats();
// Zähle aktive Poller
const activePollers = this.pollers.filter((p) => p.stats.running).length;
const totalPollers = this.pollers.length;
// Formatiere die Domain-Statistiken (z.B. hotshpotshga:1)
const domainStats = stats.domains
.map((d) => {
const shortName = d.domain.split('.')[0].substring(0, 12);
return `${shortName}:${d.processed}`;
})
.join(' | ');
log(
`📊 Status: ${activePollers}/${totalPollers} active, total:${stats.totalProcessed} | ${domainStats}`,
'INFO',
'unified-worker'
);
}
}

View File

@@ -0,0 +1,22 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "Node16",
"moduleResolution": "Node16",
"lib": ["ES2022"],
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"noUnusedLocals": false,
"noUnusedParameters": false
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -1,24 +0,0 @@
import sys
import email
from email.utils import getaddresses
if len(sys.argv) < 2:
print("Usage: python3 extract_email_headers.py <email_file>")
sys.exit(1)
file_path = sys.argv[1]
with open(file_path, 'rb') as f:
msg = email.message_from_bytes(f.read())
# From: Nur die reine Email-Adresse
from_header = msg.get('From', '')
from_name, from_addr = email.utils.parseaddr(from_header)
# Recipients: Alle To und Cc, nur reine Emails, kommagetrennt
to_addrs = getaddresses(msg.get_all('To', []))
cc_addrs = getaddresses(msg.get_all('Cc', []))
all_addrs = to_addrs + cc_addrs
recipients = ','.join([addr for name, addr in all_addrs if addr]) if all_addrs else ''
print(f'FROM:{from_addr}')
print(f'RECIPIENTS:{recipients}')

View File

@@ -1,123 +0,0 @@
import json
import os
import boto3
import uuid
import logging
from datetime import datetime
from botocore.exceptions import ClientError
import time
import random
# Logging konfigurieren
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sqs = boto3.client('sqs')
# Retry-Konfiguration
MAX_RETRIES = 3
BASE_BACKOFF = 1 # Sekunden
def exponential_backoff(attempt):
"""Exponential Backoff mit Jitter"""
return BASE_BACKOFF * (2 ** attempt) + random.uniform(0, 1)
def get_queue_url(domain):
"""
Generiert Queue-Namen aus Domain und holt URL.
Konvention: domain.tld -> domain-tld-queue
"""
queue_name = domain.replace('.', '-') + '-queue'
try:
response = sqs.get_queue_url(QueueName=queue_name)
return response['QueueUrl']
except ClientError as e:
if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
logger.error(f"Queue nicht gefunden für Domain: {domain}")
raise ValueError(f"Keine Queue für Domain {domain}")
else:
raise
def lambda_handler(event, context):
"""
Nimmt SES Event entgegen, extrahiert Domain dynamisch,
verpackt Metadaten als 'Fake SNS' und sendet an die domain-spezifische SQS.
Mit integrierter Retry-Logik für SQS-Send.
"""
try:
records = event.get('Records', [])
logger.info(f"Received event with {len(records)} records.")
for record in records:
ses_data = record.get('ses', {})
if not ses_data:
logger.warning(f"Invalid SES event: Missing 'ses' in record: {record}")
continue
mail = ses_data.get('mail', {})
receipt = ses_data.get('receipt', {})
# Domain extrahieren (aus erstem Recipient)
recipients = receipt.get('recipients', []) or mail.get('destination', [])
if not recipients:
logger.warning("No recipients in event - skipping")
continue
first_recipient = recipients[0]
domain = first_recipient.split('@')[-1].lower()
if not domain:
logger.error("Could not extract domain from recipient")
continue
# Wichtige Metadaten loggen
msg_id = mail.get('messageId', 'unknown')
source = mail.get('source', 'unknown')
logger.info(f"Processing Message-ID: {msg_id} for domain: {domain}")
logger.info(f" From: {source}")
logger.info(f" To: {recipients}")
# SES JSON als String serialisieren
ses_json_string = json.dumps(ses_data)
# Payload Größe loggen und checken (Safeguard)
payload_size = len(ses_json_string.encode('utf-8'))
logger.info(f" Metadata Payload Size: {payload_size} bytes")
if payload_size > 200000: # Arbitrary Limit < SQS 256KB
raise ValueError("Payload too large for SQS")
# Fake SNS Payload
fake_sns_payload = {
"Type": "Notification",
"MessageId": str(uuid.uuid4()),
"TopicArn": "arn:aws:sns:ses-shim:global-topic",
"Subject": "Amazon SES Email Receipt Notification",
"Message": ses_json_string,
"Timestamp": datetime.utcnow().isoformat() + "Z"
}
# Queue URL dynamisch holen
queue_url = get_queue_url(domain)
# SQS Send mit Retries
attempt = 0
while attempt < MAX_RETRIES:
try:
sqs.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(fake_sns_payload)
)
logger.info(f"✅ Successfully forwarded {msg_id} to SQS: {queue_url}")
break
except ClientError as e:
attempt += 1
error_code = e.response['Error']['Code']
logger.warning(f"Retry {attempt}/{MAX_RETRIES} for SQS send: {error_code} - {str(e)}")
if attempt == MAX_RETRIES:
raise
time.sleep(exponential_backoff(attempt))
return {'status': 'ok'}
except Exception as e:
logger.error(f"❌ Critical Error in Lambda Shim: {str(e)}", exc_info=True)
raise e

View File

@@ -1,30 +0,0 @@
#!/bin/bash
# manage-worker.sh
DOMAIN=$1
if [ -z "$DOMAIN" ]; then
echo "Usage: $0 <domain> [action]"
echo "Example: $0 andreasknuth.de"
echo " $0 andreasknuth.de down"
echo " $0 andreasknuth.de logs -f"
exit 1
fi
# Entfernt den ersten Parameter ($1 / DOMAIN) aus der Argumentenliste
shift
# Nimm ALLE verbleibenden Argumente ($@). Wenn keine da sind, nimm "up -d".
ACTION="${@:-up -d}"
PROJECT_NAME="${DOMAIN//./-}"
ENV_FILE=".env.${DOMAIN}"
if [ ! -f "$ENV_FILE" ]; then
echo "Error: $ENV_FILE not found!"
exit 1
fi
# $ACTION wird hier nicht in Anführungszeichen gesetzt,
# damit "logs -f" als zwei separate Befehle erkannt wird.
docker compose -p "$PROJECT_NAME" --env-file "$ENV_FILE" $ACTION

View File

@@ -0,0 +1,36 @@
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: unless-stopped
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
ports:
- "9091:9090"
extra_hosts:
- "host.docker.internal:host-gateway" # Damit er deinen Worker findet
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: unless-stopped
volumes:
- grafana_data:/var/lib/grafana
ports:
- "4000:3000"
depends_on:
- prometheus
blackbox_exporter:
image: prom/blackbox-exporter:latest
container_name: blackbox_exporter
restart: unless-stopped
ports:
- "9115:9115"
extra_hosts: # <-- Diese Zeile neu
- "host.docker.internal:host-gateway" # <-- Diese Zeile neu
volumes:
prometheus_data:
grafana_data:

25
monitoring/prometheus.yml Normal file
View File

@@ -0,0 +1,25 @@
global:
scrape_interval: 15s
scrape_configs:
# 1. Scraping deines Node.js Email-Workers
- job_name: 'email-worker'
static_configs:
- targets: ['host.docker.internal:9000']
# 2. Port-Überwachung deines Mailservers (IMAP 993 & POP3 995)
- job_name: 'mailserver_ports'
metrics_path: /probe
params:
module: [tcp_connect] # Prüft nur, ob der TCP-Port offen ist
static_configs:
- targets:
- host.docker.internal:993 # IMAPS
- host.docker.internal:995 # POP3S
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox_exporter:9115 # Der Exporter führt den Check aus

View File

@@ -1,164 +0,0 @@
#!/usr/bin/env python3
"""
Test script für Message-ID Extraktion - VERBESSERTE VERSION
Kann lokal ausgeführt werden ohne AWS-Verbindung
"""
import re
from email.parser import BytesParser
from email.policy import SMTP as SMTPPolicy
def log(message: str, level: str = 'INFO'):
"""Dummy log für Tests"""
print(f"[{level}] {message}")
def extract_original_message_id(parsed):
"""
Extrahiert Original SES Message-ID aus Email
SES Format: 010f[hex32]-[hex8]-[hex4]-[hex4]-[hex4]-[hex12]-000000
"""
import re
# SES Message-ID Pattern (endet immer mit -000000)
ses_pattern = re.compile(r'010f[0-9a-f]{12}-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}-000000')
# Die Message-ID der aktuellen Email (Bounce selbst) - diese wollen wir NICHT
current_msg_id = (parsed.get('Message-ID') or '').strip()
current_match = ses_pattern.search(current_msg_id)
current_id = current_match.group(0) if current_match else None
log(f"Current Message-ID: {current_id}", 'DEBUG')
# 1. Versuche Standard-Header (In-Reply-To, References)
for header in ['In-Reply-To', 'References']:
value = (parsed.get(header) or '').strip()
if value:
match = ses_pattern.search(value)
if match:
found_id = match.group(0)
# Nur nehmen wenn es NICHT die aktuelle Bounce-ID ist
if found_id != current_id:
log(f" Found Message-ID in {header}: {found_id}")
return found_id
# 2. Durchsuche den kompletten Email-Body (inkl. ALLE Attachments/Parts)
try:
body_text = ''
# Hole den kompletten Body als String
if parsed.is_multipart():
for part in parsed.walk():
content_type = part.get_content_type()
# SPEZIALFALL: message/rfc822 (eingebettete Messages)
if content_type == 'message/rfc822':
log(f" Processing embedded message/rfc822", 'DEBUG')
try:
# get_payload() gibt eine Liste mit einem EmailMessage-Objekt zurück!
payload = part.get_payload()
if isinstance(payload, list) and len(payload) > 0:
embedded_msg = payload[0]
# Hole Message-ID aus dem eingebetteten Message
embedded_id = (embedded_msg.get('Message-ID') or '').strip()
match = ses_pattern.search(embedded_id)
if match:
found_id = match.group(0)
log(f" Found ID in embedded msg: {found_id}", 'DEBUG')
# Nur nehmen wenn es NICHT die aktuelle Bounce-ID ist
if found_id != current_id:
log(f" ✓ Found Message-ID in embedded message: {found_id}")
return found_id
# Fallback: Konvertiere eingebettete Message zu String
body_text += embedded_msg.as_string()
except Exception as e:
log(f" Warning: Could not process embedded message: {e}", 'WARNING')
# Durchsuche ALLE anderen Parts (außer Binärdaten wie images)
elif content_type.startswith('text/') or content_type.startswith('application/'):
try:
payload = part.get_payload(decode=True)
if payload:
# Versuche als UTF-8, fallback auf Latin-1
try:
body_text += payload.decode('utf-8', errors='ignore')
except:
try:
body_text += payload.decode('latin-1', errors='ignore')
except:
# Letzter Versuch: als ASCII mit ignore
body_text += str(payload, errors='ignore')
except:
# Falls decode fehlschlägt, String-Payload holen
payload = part.get_payload()
if isinstance(payload, str):
body_text += payload
else:
# Nicht-Multipart Message
payload = parsed.get_payload(decode=True)
if payload:
try:
body_text = payload.decode('utf-8', errors='ignore')
except:
body_text = payload.decode('latin-1', errors='ignore')
# Suche alle SES Message-IDs im Body
matches = ses_pattern.findall(body_text)
if matches:
log(f" Found {len(matches)} total IDs in body: {matches}", 'DEBUG')
# Filtere die aktuelle Bounce-ID raus
candidates = [m for m in matches if m != current_id]
if candidates:
# Nehme die ERSTE der verbleibenden (meist die Original-ID)
log(f" Found {len(matches)} SES Message-ID(s) in body, using first (not bounce): {candidates[0]}")
return candidates[0]
else:
log(f" Found {len(matches)} SES Message-ID(s) but all match the bounce ID")
except Exception as e:
log(f" Warning: Could not search body for Message-ID: {e}", 'WARNING')
return None
def test_with_file(filepath: str):
"""Test mit einer echten Email-Datei"""
print(f"\n{'='*70}")
print(f"Testing: {filepath}")
print('='*70)
with open(filepath, 'rb') as f:
raw_bytes = f.read()
parsed = BytesParser(policy=SMTPPolicy).parsebytes(raw_bytes)
print(f"\nEmail Headers:")
print(f" From: {parsed.get('From')}")
print(f" To: {parsed.get('To')}")
print(f" Subject: {parsed.get('Subject')}")
print(f" Message-ID: {parsed.get('Message-ID')}")
print(f" In-Reply-To: {parsed.get('In-Reply-To')}")
print(f" References: {parsed.get('References')}")
print(f"\n--- EXTRACTION ---")
result = extract_original_message_id(parsed)
print(f"\n{'='*70}")
print(f"RESULT: {result}")
print('='*70)
return result
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
# Email-Datei als Argument
result = test_with_file(sys.argv[1])
# Exit code: 0 = success (ID found), 1 = failure (no ID)
sys.exit(0 if result else 1)
else:
print("Usage: python3 test_extract_v2.py <email_file>")
sys.exit(1)

View File

@@ -1,19 +0,0 @@
#!/bin/bash
# update-all-workers.sh (smart version)
DOMAINS=$(docker ps --filter "name=email-worker-" --format "{{.Names}}" | sed 's/email-worker-//')
if [ -z "$DOMAINS" ]; then
echo "No workers found"
exit 1
fi
echo "Found workers: $DOMAINS"
echo ""
for domain in $DOMAINS; do
echo "═══ $domain ═══"
./manage-worker.sh "$domain" restart
done
echo "✓ Done"

885
worker.py
View File

@@ -1,885 +0,0 @@
import os
import sys
import boto3
import smtplib
import json
import time
import traceback
import signal
from email.parser import BytesParser
from email.policy import SMTP as SMTPPolicy
from datetime import datetime
from botocore.exceptions import ClientError # Neu: Korrekter Import für SES-Exceptions
# AWS Configuration
AWS_REGION = 'us-east-2'
s3 = boto3.client('s3', region_name=AWS_REGION)
sqs = boto3.client('sqs', region_name=AWS_REGION)
ses = boto3.client('ses', region_name=AWS_REGION) # Neu: Für OOO/Forwards
# ✨ Worker Configuration (domain-spezifisch)
WORKER_DOMAIN = os.environ.get('WORKER_DOMAIN') # z.B. 'andreasknuth.de'
WORKER_NAME = os.environ.get('WORKER_NAME', f'worker-{WORKER_DOMAIN}')
# Worker Settings
POLL_INTERVAL = int(os.environ.get('POLL_INTERVAL', '20'))
MAX_MESSAGES = int(os.environ.get('MAX_MESSAGES', '10'))
VISIBILITY_TIMEOUT = int(os.environ.get('VISIBILITY_TIMEOUT', '300'))
# SMTP Configuration (einfach, da nur 1 Domain pro Worker)
SMTP_HOST = os.environ.get('SMTP_HOST', 'localhost')
SMTP_PORT = int(os.environ.get('SMTP_PORT', '25'))
SMTP_USE_TLS = os.environ.get('SMTP_USE_TLS', 'false').lower() == 'true'
SMTP_USER = os.environ.get('SMTP_USER')
SMTP_PASS = os.environ.get('SMTP_PASS')
# Graceful shutdown
shutdown_requested = False
# DynamoDB Ressource für Bounce-Lookup
# DynamoDB Ressource für Bounce-Lookup und Rules
try:
dynamo = boto3.resource('dynamodb', region_name=AWS_REGION)
msg_table = dynamo.Table('ses-outbound-messages')
rules_table = dynamo.Table('email-rules') # Neu: Für OOO/Forwards
except Exception as e:
log(f"Warning: Could not connect to DynamoDB: {e}", 'WARNING')
msg_table = None
rules_table = None
def get_bucket_name(domain):
"""Konvention: domain.tld -> domain-tld-emails"""
return domain.replace('.', '-') + '-emails'
def is_ses_bounce_notification(parsed):
"""
Prüft ob Email von SES MAILER-DAEMON ist
"""
from_h = (parsed.get('From') or '').lower()
return 'mailer-daemon@us-east-2.amazonses.com' in from_h
def get_bounce_info_from_dynamodb(message_id, max_retries=3, retry_delay=1):
"""
Sucht Bounce-Info in DynamoDB anhand der Message-ID
Mit Retry-Logik für Timing-Issues
Returns: dict mit bounce info oder None
"""
import time
for attempt in range(max_retries):
try:
response = msg_table.get_item(Key={'MessageId': message_id})
item = response.get('Item')
if item:
# Gefunden!
return {
'original_source': item.get('original_source', ''),
'bounceType': item.get('bounceType', 'Unknown'),
'bounceSubType': item.get('bounceSubType', 'Unknown'),
'bouncedRecipients': item.get('bouncedRecipients', []),
'timestamp': item.get('timestamp', '')
}
# Nicht gefunden - Retry falls nicht letzter Versuch
if attempt < max_retries - 1:
log(f" Bounce record not found yet, retrying in {retry_delay}s (attempt {attempt + 1}/{max_retries})...")
time.sleep(retry_delay)
else:
log(f"⚠ No bounce record found after {max_retries} attempts for Message-ID: {message_id}")
return None
except Exception as e:
log(f"⚠ DynamoDB Error (attempt {attempt + 1}/{max_retries}): {e}", 'ERROR')
if attempt < max_retries - 1:
time.sleep(retry_delay)
else:
return None
return None
def apply_bounce_logic(parsed, subject):
"""
Prüft auf SES Bounce, sucht in DynamoDB und schreibt Header um.
Returns: (parsed_email_object, was_modified_bool)
"""
if not is_ses_bounce_notification(parsed):
return parsed, False
log("🔍 Detected SES MAILER-DAEMON bounce notification")
# Message-ID aus Header extrahieren
message_id = (parsed.get('Message-ID') or '').strip('<>').split('@')[0]
if not message_id:
log("⚠ Could not extract Message-ID from bounce notification")
return parsed, False
log(f" Looking up Message-ID: {message_id}")
# Lookup in DynamoDB
bounce_info = get_bounce_info_from_dynamodb(message_id)
if not bounce_info:
return parsed, False
# Bounce Info ausgeben
original_source = bounce_info['original_source']
bounced_recipients = bounce_info['bouncedRecipients']
bounce_type = bounce_info['bounceType']
bounce_subtype = bounce_info['bounceSubType']
log(f"✓ Found bounce info:")
log(f" Original sender: {original_source}")
log(f" Bounce type: {bounce_type}/{bounce_subtype}")
log(f" Bounced recipients: {bounced_recipients}")
# Nehme den ersten bounced recipient als neuen Absender
# (bei Multiple Recipients kann es mehrere geben)
if bounced_recipients:
new_from = bounced_recipients[0]
# Rewrite Headers
parsed['X-Original-SES-From'] = parsed.get('From', '')
parsed['X-Bounce-Type'] = f"{bounce_type}/{bounce_subtype}"
parsed.replace_header('From', new_from)
if not parsed.get('Reply-To'):
parsed['Reply-To'] = new_from
# Subject anpassen
if 'delivery status notification' in subject.lower() or 'thanks for your submission' in subject.lower():
parsed.replace_header('Subject', f"Delivery Status: {new_from}")
log(f"✓ Rewritten FROM: {new_from}")
return parsed, True
log("⚠ No bounced recipients found in bounce info")
return parsed, False
def signal_handler(signum, frame):
global shutdown_requested
print(f"\n⚠ Shutdown signal received (signal {signum})")
shutdown_requested = True
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def log(message: str, level: str = 'INFO'):
"""Structured logging with timestamp"""
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"[{timestamp}] [{level}] [{WORKER_NAME}] {message}", flush=True)
def domain_to_queue_name(domain: str) -> str:
"""Konvertiert Domain zu SQS Queue Namen"""
return domain.replace('.', '-') + '-queue'
def get_queue_url() -> str:
"""Ermittelt Queue-URL für die konfigurierte Domain"""
queue_name = domain_to_queue_name(WORKER_DOMAIN)
try:
response = sqs.get_queue_url(QueueName=queue_name)
return response['QueueUrl']
except Exception as e:
raise Exception(f"Failed to get queue URL for {WORKER_DOMAIN}: {e}")
def mark_as_processed(bucket: str, key: str, invalid_inboxes: list = None):
"""
Markiert E-Mail als erfolgreich zugestellt
Wird nur aufgerufen wenn mindestens 1 Recipient erfolgreich war
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['processed'] = 'true'
metadata['processed_at'] = str(int(time.time()))
metadata['processed_by'] = WORKER_NAME
metadata['status'] = 'delivered'
metadata.pop('processing_started', None)
metadata.pop('queued_at', None)
# Invalid inboxes speichern falls vorhanden
if invalid_inboxes:
metadata['invalid_inboxes'] = ','.join(invalid_inboxes)
log(f"⚠ Invalid inboxes recorded: {', '.join(invalid_inboxes)}", 'WARNING')
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✓ Marked s3://{bucket}/{key} as processed", 'SUCCESS')
except Exception as e:
log(f"Failed to mark as processed: {e}", 'WARNING')
def mark_as_all_invalid(bucket: str, key: str, invalid_inboxes: list):
"""
Markiert E-Mail als fehlgeschlagen weil alle Recipients ungültig sind
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['processed'] = 'true'
metadata['processed_at'] = str(int(time.time()))
metadata['processed_by'] = WORKER_NAME
metadata['status'] = 'failed'
metadata['error'] = 'All recipients are invalid (mailboxes do not exist)'
metadata['invalid_inboxes'] = ','.join(invalid_inboxes)
metadata.pop('processing_started', None)
metadata.pop('queued_at', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✓ Marked s3://{bucket}/{key} as failed (all invalid)", 'SUCCESS')
except Exception as e:
log(f"Failed to mark as all invalid: {e}", 'WARNING')
def mark_as_failed(bucket: str, key: str, error: str, receive_count: int):
"""
Markiert E-Mail als komplett fehlgeschlagen
Wird nur aufgerufen wenn ALLE Recipients fehlschlagen
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['status'] = 'failed'
metadata['failed_at'] = str(int(time.time()))
metadata['failed_by'] = WORKER_NAME
metadata['error'] = error[:500] # S3 Metadata limit
metadata['retry_count'] = str(receive_count)
metadata.pop('processing_started', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✗ Marked s3://{bucket}/{key} as failed: {error[:100]}", 'ERROR')
except Exception as e:
log(f"Failed to mark as failed: {e}", 'WARNING')
def is_temporary_smtp_error(error_msg: str) -> bool:
"""
Prüft ob SMTP-Fehler temporär ist (Retry sinnvoll)
4xx Codes = temporär, 5xx = permanent
"""
temporary_indicators = [
'421', # Service not available
'450', # Mailbox unavailable
'451', # Local error
'452', # Insufficient storage
'4', # Generisch 4xx
'timeout',
'connection refused',
'connection reset',
'network unreachable',
'temporarily',
'try again'
]
error_lower = error_msg.lower()
return any(indicator in error_lower for indicator in temporary_indicators)
def is_permanent_recipient_error(error_msg: str) -> bool:
"""
Prüft ob Fehler permanent für diesen Recipient ist (Inbox existiert nicht)
550 = Mailbox not found, 551 = User not local, 553 = Mailbox name invalid
"""
permanent_indicators = [
'550', # Mailbox unavailable / not found
'551', # User not local
'553', # Mailbox name not allowed / invalid
'mailbox not found',
'user unknown',
'no such user',
'recipient rejected',
'does not exist',
'invalid recipient',
'unknown user'
]
error_lower = error_msg.lower()
return any(indicator in error_lower for indicator in permanent_indicators)
def send_email(from_addr: str, recipient: str, raw_message: bytes) -> tuple:
"""
Sendet E-Mail via SMTP an EINEN Empfänger
Returns: (success: bool, error: str or None, is_permanent: bool)
"""
try:
with smtplib.SMTP(SMTP_HOST, SMTP_PORT, timeout=30) as smtp:
smtp.ehlo()
# STARTTLS falls konfiguriert
if SMTP_USE_TLS:
try:
smtp.starttls()
smtp.ehlo()
except Exception as e:
log(f" STARTTLS failed: {e}", 'WARNING')
# Authentication falls konfiguriert
if SMTP_USER and SMTP_PASS:
try:
smtp.login(SMTP_USER, SMTP_PASS)
except Exception as e:
log(f" SMTP auth failed: {e}", 'WARNING')
# E-Mail senden
result = smtp.sendmail(from_addr, [recipient], raw_message)
# Result auswerten
if isinstance(result, dict) and result:
# Empfänger wurde abgelehnt
error = result.get(recipient, 'Unknown refusal')
is_permanent = is_permanent_recipient_error(str(error))
log(f"{recipient}: {error} ({'permanent' if is_permanent else 'temporary'})", 'ERROR')
return False, str(error), is_permanent
else:
# Erfolgreich
log(f"{recipient}: Delivered", 'SUCCESS')
return True, None, False
except smtplib.SMTPException as e:
error_msg = str(e)
is_permanent = is_permanent_recipient_error(error_msg)
log(f"{recipient}: SMTP error - {error_msg}", 'ERROR')
return False, error_msg, is_permanent
except Exception as e:
# Connection errors sind immer temporär
log(f"{recipient}: Connection error - {e}", 'ERROR')
return False, str(e), False
def extract_body_parts(parsed):
"""
Extrahiert sowohl text/plain als auch text/html Body-Parts.
Returns: (text_body: str, html_body: str or None)
"""
text_body = ''
html_body = None
if parsed.is_multipart():
for part in parsed.walk():
content_type = part.get_content_type()
if content_type == 'text/plain':
try:
text_body += part.get_payload(decode=True).decode('utf-8', errors='ignore')
except Exception as e:
log(f"⚠ Error decoding text/plain part: {e}", 'WARNING')
elif content_type == 'text/html':
try:
html_body = part.get_payload(decode=True).decode('utf-8', errors='ignore')
except Exception as e:
log(f"⚠ Error decoding text/html part: {e}", 'WARNING')
else:
try:
payload = parsed.get_payload(decode=True)
if payload:
decoded = payload.decode('utf-8', errors='ignore')
if parsed.get_content_type() == 'text/html':
html_body = decoded
else:
text_body = decoded
except Exception as e:
log(f"⚠ Error decoding non-multipart body: {e}", 'WARNING')
text_body = str(parsed.get_payload())
return text_body.strip() if text_body else '(No body content)', html_body
def create_ooo_reply(original_parsed, recipient, ooo_msg, content_type='text'):
"""
Erstellt eine Out-of-Office Reply als komplette MIME-Message.
Behält Original-Body (text + html) bei.
"""
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
text_body, html_body = extract_body_parts(original_parsed)
original_subject = original_parsed.get('Subject', '(no subject)')
original_from = original_parsed.get('From', 'unknown')
# Neue Message erstellen
msg = MIMEMultipart('mixed')
msg['From'] = recipient
msg['To'] = original_from
msg['Subject'] = f"Out of Office: {original_subject}"
msg['Date'] = formatdate(localtime=True)
msg['Message-ID'] = make_msgid(domain=recipient.split('@')[1])
msg['In-Reply-To'] = original_parsed.get('Message-ID', '')
msg['References'] = original_parsed.get('Message-ID', '')
msg['Auto-Submitted'] = 'auto-replied' # Verhindert Loops
# Body-Teil erstellen
body_part = MIMEMultipart('alternative')
# Text-Version
text_content = f"{ooo_msg}\n\n"
text_content += "--- Original Message ---\n"
text_content += f"From: {original_from}\n"
text_content += f"Subject: {original_subject}\n\n"
text_content += text_body
body_part.attach(MIMEText(text_content, 'plain', 'utf-8'))
# HTML-Version (wenn gewünscht und Original vorhanden)
if content_type == 'html' or html_body:
html_content = f"<div>{ooo_msg}</div><br><hr><br>"
html_content += "<blockquote style='margin:10px 0;padding:10px;border-left:3px solid #ccc;'>"
html_content += f"<strong>Original Message</strong><br>"
html_content += f"<strong>From:</strong> {original_from}<br>"
html_content += f"<strong>Subject:</strong> {original_subject}<br><br>"
html_content += (html_body if html_body else text_body.replace('\n', '<br>'))
html_content += "</blockquote>"
body_part.attach(MIMEText(html_content, 'html', 'utf-8'))
msg.attach(body_part)
return msg
def create_forward_message(original_parsed, recipient, forward_to, original_from):
"""
Erstellt eine Forward-Message als komplette MIME-Message.
Behält ALLE Original-Parts inkl. Attachments bei.
"""
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate, make_msgid
original_subject = original_parsed.get('Subject', '(no subject)')
original_date = original_parsed.get('Date', 'unknown')
# Neue Message erstellen
msg = MIMEMultipart('mixed')
msg['From'] = recipient
msg['To'] = forward_to
msg['Subject'] = f"FWD: {original_subject}"
msg['Date'] = formatdate(localtime=True)
msg['Message-ID'] = make_msgid(domain=recipient.split('@')[1])
msg['Reply-To'] = original_from
# Forward-Header als Text
text_body, html_body = extract_body_parts(original_parsed)
# Body-Teil
body_part = MIMEMultipart('alternative')
# Text-Version
fwd_text = "---------- Forwarded message ---------\n"
fwd_text += f"From: {original_from}\n"
fwd_text += f"Date: {original_date}\n"
fwd_text += f"Subject: {original_subject}\n"
fwd_text += f"To: {recipient}\n\n"
fwd_text += text_body
body_part.attach(MIMEText(fwd_text, 'plain', 'utf-8'))
# HTML-Version
if html_body:
fwd_html = "<div style='border-left:3px solid #ccc;padding-left:10px;margin:10px 0;'>"
fwd_html += "<strong>---------- Forwarded message ---------</strong><br>"
fwd_html += f"<strong>From:</strong> {original_from}<br>"
fwd_html += f"<strong>Date:</strong> {original_date}<br>"
fwd_html += f"<strong>Subject:</strong> {original_subject}<br>"
fwd_html += f"<strong>To:</strong> {recipient}<br><br>"
fwd_html += html_body
fwd_html += "</div>"
body_part.attach(MIMEText(fwd_html, 'html', 'utf-8'))
msg.attach(body_part)
# WICHTIG: Attachments kopieren
if original_parsed.is_multipart():
for part in original_parsed.walk():
# Nur non-body parts (Attachments)
if part.get_content_maintype() == 'multipart':
continue
if part.get_content_type() in ['text/plain', 'text/html']:
continue # Body bereits oben behandelt
# Attachment hinzufügen
msg.attach(part)
return msg
# ==========================================
# HAUPTFUNKTION: PROCESS MESSAGE
# ==========================================
def process_message(message_body: dict, receive_count: int) -> bool:
"""
Verarbeitet eine E-Mail aus der Queue (SNS-wrapped SES Notification)
Returns: True (Erfolg/Löschen), False (Retry/Behalten)
"""
try:
# 1. UNPACKING (SNS -> SES)
# SQS Body ist JSON. Darin ist meist 'Type': 'Notification' und 'Message': '...JSONString...'
if 'Message' in message_body and 'Type' in message_body:
# Es ist eine SNS Notification
sns_content = message_body['Message']
if isinstance(sns_content, str):
ses_msg = json.loads(sns_content)
else:
ses_msg = sns_content
else:
# Fallback: Vielleicht doch direkt SES (Legacy support)
ses_msg = message_body
# 2. DATEN EXTRAHIEREN
mail = ses_msg.get('mail', {})
receipt = ses_msg.get('receipt', {})
message_id = mail.get('messageId') # Das ist der S3 Key!
# FIX: Amazon SES Setup Notification ignorieren
if message_id == "AMAZON_SES_SETUP_NOTIFICATION":
log(" Received Amazon SES Setup Notification. Ignoring.", 'INFO')
return True # Erfolgreich (löschen), da kein Fehler
from_addr = mail.get('source')
recipients = receipt.get('recipients', [])
# S3 Key Validation
if not message_id:
log("❌ Error: No messageId in event payload", 'ERROR')
return True # Löschen, da unbrauchbar
# Domain Validation
# Wir nehmen den ersten Empfänger um die Domain zu prüfen
if recipients:
first_recipient = recipients[0]
domain = first_recipient.split('@')[1]
if domain.lower() != WORKER_DOMAIN.lower():
log(f"⚠ Security: Ignored message for {domain} (I am worker for {WORKER_DOMAIN})", 'WARNING')
return True # Löschen, gehört nicht hierher
else:
log("⚠ Warning: No recipients in event", 'WARNING')
return True
# Bucket Name ableiten
bucket = get_bucket_name(WORKER_DOMAIN)
key = message_id
log(f"\n{'='*70}")
log(f"Processing Email (SNS/SES):")
log(f" ID: {key}")
log(f" Recipients: {len(recipients)} -> {recipients}")
log(f" Bucket: {bucket}")
# 3. LADEN AUS S3
try:
response = s3.get_object(Bucket=bucket, Key=key)
raw_bytes = response['Body'].read()
log(f"✓ Loaded {len(raw_bytes)} bytes from S3")
except s3.exceptions.NoSuchKey:
# Race Condition: SNS war schneller als S3.
# Wir geben False zurück, damit SQS es in 30s nochmal versucht.
if receive_count < 5:
log(f"⏳ S3 Object not found yet (Attempt {receive_count}). Retrying...", 'WARNING')
return False
else:
log(f"❌ S3 Object missing permanently after retries.", 'ERROR')
return True # Löschen
except Exception as e:
log(f"❌ S3 Download Error: {e}", 'ERROR')
return False # Retry
# 4. PARSING & BOUNCE LOGIC
try:
parsed = BytesParser(policy=SMTPPolicy).parsebytes(raw_bytes)
subject = parsed.get('Subject', '(no subject)')
# Hier passiert die Magie: Bounce Header umschreiben
parsed, modified = apply_bounce_logic(parsed, subject)
if modified:
log(" ✨ Bounce detected & headers rewritten via DynamoDB")
# Wir arbeiten mit den modifizierten Bytes weiter
raw_bytes = parsed.as_bytes()
from_addr_final = parsed.get('From') # Neuer Absender für SMTP Envelope
else:
from_addr_final = from_addr # Original Envelope Sender
except Exception as e:
log(f"⚠ Parsing/Logic Error: {e}. Sending original.", 'WARNING')
from_addr_final = from_addr
# 5. OOO & FORWARD LOGIC (neu, vor SMTP-Versand)
if rules_table and not is_ses_bounce_notification(parsed):
for recipient in recipients:
try:
rule = rules_table.get_item(Key={'email_address': recipient}).get('Item', {})
# OOO handling
if rule.get('ooo_active', False):
ooo_msg = rule.get('ooo_message', 'Default OOO message.')
content_type = rule.get('ooo_content_type', 'text')
sender = parsed.get('From')
try:
# Erstelle komplette MIME-Message
ooo_reply = create_ooo_reply(parsed, recipient, ooo_msg, content_type)
# Sende via send_raw_email (unterstützt komplexe MIME)
ses.send_raw_email(
Source=recipient,
Destinations=[sender],
RawMessage={'Data': ooo_reply.as_bytes()}
)
log(f"✓ Sent OOO reply to {sender} from {recipient}")
except ClientError as e:
error_code = e.response['Error']['Code']
log(f"⚠ SES OOO send failed ({error_code}): {e}", 'ERROR')
# Forward handling
forwards = rule.get('forwards', [])
if forwards:
original_from = parsed.get('From')
for forward_to in forwards:
try:
# Erstelle komplette Forward-Message mit Attachments
fwd_msg = create_forward_message(parsed, recipient, forward_to, original_from)
# Sende via send_raw_email
ses.send_raw_email(
Source=recipient,
Destinations=[forward_to],
RawMessage={'Data': fwd_msg.as_bytes()}
)
log(f"✓ Forwarded to {forward_to} from {recipient} (original: {original_from})")
except ClientError as e:
error_code = e.response['Error']['Code']
log(f"⚠ SES forward failed to {forward_to} ({error_code}): {e}", 'ERROR')
except ClientError as e:
error_code = e.response['Error']['Code']
if error_code == 'MessageRejected':
log(f"⚠ SES rejected send for {recipient}: Check verification/quotas.", 'ERROR')
elif error_code == 'AccessDenied':
log(f"⚠ SES AccessDenied for {recipient}: Check IAM policy.", 'ERROR')
else:
log(f"⚠ SES error for {recipient}: {e}", 'ERROR')
except Exception as e:
log(f"⚠ Rule processing error for {recipient}: {e}", 'WARNING')
traceback.print_exc()
# 6. SMTP VERSAND (Loop über Recipients)
log(f"📤 Sending to {len(recipients)} recipient(s)...")
successful = []
failed_permanent = []
failed_temporary = []
for recipient in recipients:
# Wir nutzen raw_bytes (ggf. modifiziert)
# WICHTIG: Als Envelope Sender nutzen wir 'from_addr_final'
# (bei Bounces ist das der Original-Empfänger, sonst der SES Sender)
success, error, is_perm = send_email(from_addr_final, recipient, raw_bytes)
if success:
successful.append(recipient)
elif is_perm:
failed_permanent.append(recipient)
else:
failed_temporary.append(recipient)
# 6. RESULTAT & CLEANUP
log(f"📊 Results: {len(successful)} OK, {len(failed_temporary)} TempFail, {len(failed_permanent)} PermFail")
if len(successful) > 0:
# Mindestens einer durchgegangen -> Erfolg
mark_as_processed(bucket, key, failed_permanent if failed_permanent else None)
log(f"✅ Success. Deleted from queue.")
return True
elif len(failed_permanent) == len(recipients):
# Alle permanent fehlgeschlagen (User unknown) -> Löschen
mark_as_all_invalid(bucket, key, failed_permanent)
log(f"🛑 All recipients invalid. Deleted from queue.")
return True
else:
# Temporäre Fehler -> Retry
log(f"🔄 Temporary failures. Keeping in queue.")
return False
except Exception as e:
log(f"❌ CRITICAL WORKER ERROR: {e}", 'ERROR')
traceback.print_exc()
return False # Retry (außer es crasht immer wieder)
def main_loop():
"""Hauptschleife: Pollt SQS Queue und verarbeitet Nachrichten"""
# Queue URL ermitteln
try:
queue_url = get_queue_url()
except Exception as e:
log(f"FATAL: {e}", 'ERROR')
sys.exit(1)
log(f"\n{'='*70}")
log(f"🚀 Email Worker started")
log(f"{'='*70}")
log(f" Worker Name: {WORKER_NAME}")
log(f" Domain: {WORKER_DOMAIN}")
log(f" Queue: {queue_url}")
log(f" Region: {AWS_REGION}")
log(f" SMTP: {SMTP_HOST}:{SMTP_PORT} (TLS: {SMTP_USE_TLS})")
log(f" Poll interval: {POLL_INTERVAL}s")
log(f" Max messages per poll: {MAX_MESSAGES}")
log(f" Visibility timeout: {VISIBILITY_TIMEOUT}s")
log(f"{'='*70}\n")
consecutive_errors = 0
max_consecutive_errors = 10
messages_processed = 0
last_activity = time.time()
while not shutdown_requested:
try:
# Messages aus Queue holen (Long Polling)
response = sqs.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=MAX_MESSAGES,
WaitTimeSeconds=POLL_INTERVAL,
VisibilityTimeout=VISIBILITY_TIMEOUT,
AttributeNames=['ApproximateReceiveCount', 'SentTimestamp'],
MessageAttributeNames=['All']
)
# Reset error counter bei erfolgreicher Abfrage
consecutive_errors = 0
if 'Messages' not in response:
# Keine Nachrichten
if time.time() - last_activity > 60:
log(f"Waiting for messages... (processed: {messages_processed})")
last_activity = time.time()
continue
message_count = len(response['Messages'])
log(f"\n✉ Received {message_count} message(s) from queue")
last_activity = time.time()
# Messages verarbeiten
for msg in response['Messages']:
if shutdown_requested:
log("Shutdown requested, stopping processing")
break
receipt_handle = msg['ReceiptHandle']
# Receive Count auslesen
receive_count = int(msg.get('Attributes', {}).get('ApproximateReceiveCount', 1))
# Sent Timestamp (für Queue-Zeit-Berechnung)
sent_timestamp = int(msg.get('Attributes', {}).get('SentTimestamp', 0)) / 1000
queue_time = int(time.time() - sent_timestamp) if sent_timestamp else 0
if queue_time > 0:
log(f"Message was in queue for {queue_time}s")
try:
message_body = json.loads(msg['Body'])
# E-Mail verarbeiten
success = process_message(message_body, receive_count)
if success:
# Message aus Queue löschen
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
log("✓ Message deleted from queue")
messages_processed += 1
else:
# Bei Fehler bleibt Message in Queue
log(f"⚠ Message kept in queue for retry (attempt {receive_count}/3)")
except json.JSONDecodeError as e:
log(f"✗ Invalid message format: {e}", 'ERROR')
# Ungültige Messages löschen (nicht retryable)
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
log(f"✗ Error processing message: {e}", 'ERROR')
traceback.print_exc()
# Message bleibt in Queue für Retry
except KeyboardInterrupt:
log("\n⚠ Keyboard interrupt received")
break
except Exception as e:
consecutive_errors += 1
log(f"✗ Error in main loop ({consecutive_errors}/{max_consecutive_errors}): {e}", 'ERROR')
traceback.print_exc()
if consecutive_errors >= max_consecutive_errors:
log("Too many consecutive errors, shutting down", 'ERROR')
break
# Kurze Pause bei Fehlern
time.sleep(5)
log(f"\n{'='*70}")
log(f"👋 Worker shutting down")
log(f" Messages processed: {messages_processed}")
log(f"{'='*70}\n")
if __name__ == '__main__':
# Validierung
if not WORKER_DOMAIN:
log("ERROR: WORKER_DOMAIN not set!", 'ERROR')
sys.exit(1)
try:
main_loop()
except Exception as e:
log(f"Fatal error: {e}", 'ERROR')
traceback.print_exc()
sys.exit(1)

View File

@@ -1,520 +0,0 @@
import os
import sys
import boto3
import smtplib
import json
import time
import traceback
import signal
from email.parser import BytesParser
from email.policy import SMTP as SMTPPolicy
from datetime import datetime
# AWS Configuration
AWS_REGION = 'us-east-2'
s3 = boto3.client('s3', region_name=AWS_REGION)
sqs = boto3.client('sqs', region_name=AWS_REGION)
# ✨ Worker Configuration (domain-spezifisch)
WORKER_DOMAIN = os.environ.get('WORKER_DOMAIN') # z.B. 'andreasknuth.de'
WORKER_NAME = os.environ.get('WORKER_NAME', f'worker-{WORKER_DOMAIN}')
# Worker Settings
POLL_INTERVAL = int(os.environ.get('POLL_INTERVAL', '20'))
MAX_MESSAGES = int(os.environ.get('MAX_MESSAGES', '10'))
VISIBILITY_TIMEOUT = int(os.environ.get('VISIBILITY_TIMEOUT', '300'))
# SMTP Configuration (einfach, da nur 1 Domain pro Worker)
SMTP_HOST = os.environ.get('SMTP_HOST', 'localhost')
SMTP_PORT = int(os.environ.get('SMTP_PORT', '25'))
SMTP_USE_TLS = os.environ.get('SMTP_USE_TLS', 'false').lower() == 'true'
SMTP_USER = os.environ.get('SMTP_USER')
SMTP_PASS = os.environ.get('SMTP_PASS')
# Graceful shutdown
shutdown_requested = False
def signal_handler(signum, frame):
global shutdown_requested
print(f"\n⚠ Shutdown signal received (signal {signum})")
shutdown_requested = True
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def log(message: str, level: str = 'INFO'):
"""Structured logging with timestamp"""
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"[{timestamp}] [{level}] [{WORKER_NAME}] {message}", flush=True)
def domain_to_queue_name(domain: str) -> str:
"""Konvertiert Domain zu SQS Queue Namen"""
return domain.replace('.', '-') + '-queue'
def get_queue_url() -> str:
"""Ermittelt Queue-URL für die konfigurierte Domain"""
queue_name = domain_to_queue_name(WORKER_DOMAIN)
try:
response = sqs.get_queue_url(QueueName=queue_name)
return response['QueueUrl']
except Exception as e:
raise Exception(f"Failed to get queue URL for {WORKER_DOMAIN}: {e}")
def mark_as_processed(bucket: str, key: str, invalid_inboxes: list = None):
"""
Markiert E-Mail als erfolgreich zugestellt
Wird nur aufgerufen wenn mindestens 1 Recipient erfolgreich war
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['processed'] = 'true'
metadata['processed_at'] = str(int(time.time()))
metadata['processed_by'] = WORKER_NAME
metadata['status'] = 'delivered'
metadata.pop('processing_started', None)
metadata.pop('queued_at', None)
# Invalid inboxes speichern falls vorhanden
if invalid_inboxes:
metadata['invalid_inboxes'] = ','.join(invalid_inboxes)
log(f"⚠ Invalid inboxes recorded: {', '.join(invalid_inboxes)}", 'WARNING')
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✓ Marked s3://{bucket}/{key} as processed", 'SUCCESS')
except Exception as e:
log(f"Failed to mark as processed: {e}", 'WARNING')
def mark_as_all_invalid(bucket: str, key: str, invalid_inboxes: list):
"""
Markiert E-Mail als fehlgeschlagen weil alle Recipients ungültig sind
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['processed'] = 'true'
metadata['processed_at'] = str(int(time.time()))
metadata['processed_by'] = WORKER_NAME
metadata['status'] = 'failed'
metadata['error'] = 'All recipients are invalid (mailboxes do not exist)'
metadata['invalid_inboxes'] = ','.join(invalid_inboxes)
metadata.pop('processing_started', None)
metadata.pop('queued_at', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✓ Marked s3://{bucket}/{key} as failed (all invalid)", 'SUCCESS')
except Exception as e:
log(f"Failed to mark as all invalid: {e}", 'WARNING')
def mark_as_failed(bucket: str, key: str, error: str, receive_count: int):
"""
Markiert E-Mail als komplett fehlgeschlagen
Wird nur aufgerufen wenn ALLE Recipients fehlschlagen
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['status'] = 'failed'
metadata['failed_at'] = str(int(time.time()))
metadata['failed_by'] = WORKER_NAME
metadata['error'] = error[:500] # S3 Metadata limit
metadata['retry_count'] = str(receive_count)
metadata.pop('processing_started', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✗ Marked s3://{bucket}/{key} as failed: {error[:100]}", 'ERROR')
except Exception as e:
log(f"Failed to mark as failed: {e}", 'WARNING')
def is_temporary_smtp_error(error_msg: str) -> bool:
"""
Prüft ob SMTP-Fehler temporär ist (Retry sinnvoll)
4xx Codes = temporär, 5xx = permanent
"""
temporary_indicators = [
'421', # Service not available
'450', # Mailbox unavailable
'451', # Local error
'452', # Insufficient storage
'4', # Generisch 4xx
'timeout',
'connection refused',
'connection reset',
'network unreachable',
'temporarily',
'try again'
]
error_lower = error_msg.lower()
return any(indicator in error_lower for indicator in temporary_indicators)
def is_permanent_recipient_error(error_msg: str) -> bool:
"""
Prüft ob Fehler permanent für diesen Recipient ist (Inbox existiert nicht)
550 = Mailbox not found, 551 = User not local, 553 = Mailbox name invalid
"""
permanent_indicators = [
'550', # Mailbox unavailable / not found
'551', # User not local
'553', # Mailbox name not allowed / invalid
'mailbox not found',
'user unknown',
'no such user',
'recipient rejected',
'does not exist',
'invalid recipient',
'unknown user'
]
error_lower = error_msg.lower()
return any(indicator in error_lower for indicator in permanent_indicators)
def send_email(from_addr: str, recipient: str, raw_message: bytes) -> tuple:
"""
Sendet E-Mail via SMTP an EINEN Empfänger
Returns: (success: bool, error: str or None, is_permanent: bool)
"""
try:
with smtplib.SMTP(SMTP_HOST, SMTP_PORT, timeout=30) as smtp:
smtp.ehlo()
# STARTTLS falls konfiguriert
if SMTP_USE_TLS:
try:
smtp.starttls()
smtp.ehlo()
except Exception as e:
log(f" STARTTLS failed: {e}", 'WARNING')
# Authentication falls konfiguriert
if SMTP_USER and SMTP_PASS:
try:
smtp.login(SMTP_USER, SMTP_PASS)
except Exception as e:
log(f" SMTP auth failed: {e}", 'WARNING')
# E-Mail senden
result = smtp.sendmail(from_addr, [recipient], raw_message)
# Result auswerten
if isinstance(result, dict) and result:
# Empfänger wurde abgelehnt
error = result.get(recipient, 'Unknown refusal')
is_permanent = is_permanent_recipient_error(str(error))
log(f"{recipient}: {error} ({'permanent' if is_permanent else 'temporary'})", 'ERROR')
return False, str(error), is_permanent
else:
# Erfolgreich
log(f"{recipient}: Delivered", 'SUCCESS')
return True, None, False
except smtplib.SMTPException as e:
error_msg = str(e)
is_permanent = is_permanent_recipient_error(error_msg)
log(f"{recipient}: SMTP error - {error_msg}", 'ERROR')
return False, error_msg, is_permanent
except Exception as e:
# Connection errors sind immer temporär
log(f"{recipient}: Connection error - {e}", 'ERROR')
return False, str(e), False
def process_message(message_body: dict, receive_count: int) -> bool:
"""
Verarbeitet eine E-Mail aus der Queue
Kann mehrere Recipients haben - sendet an alle
Returns: True wenn erfolgreich (Message löschen), False bei Fehler (Retry)
"""
bucket = message_body['bucket']
key = message_body['key']
from_addr = message_body['from']
recipients = message_body['recipients'] # Liste von Empfängern
domain = message_body['domain']
subject = message_body.get('subject', '(unknown)')
message_id = message_body.get('message_id', '(unknown)')
log(f"\n{'='*70}")
log(f"Processing email (Attempt #{receive_count}):")
log(f" MessageId: {message_id}")
log(f" S3 Key: {key}")
log(f" Domain: {domain}")
log(f" From: {from_addr}")
log(f" Recipients: {len(recipients)}")
for recipient in recipients:
log(f" - {recipient}")
log(f" Subject: {subject}")
log(f" S3: s3://{bucket}/{key}")
log(f"{'='*70}")
# ✨ VALIDATION: Domain muss mit Worker-Domain übereinstimmen
if domain.lower() != WORKER_DOMAIN.lower():
log(f"ERROR: Wrong domain! Expected {WORKER_DOMAIN}, got {domain}", 'ERROR')
log("This message should not be in this queue! Deleting...", 'ERROR')
return True # Message löschen (gehört nicht hierher)
# E-Mail aus S3 laden
try:
response = s3.get_object(Bucket=bucket, Key=key)
raw_bytes = response['Body'].read()
log(f"✓ Loaded {len(raw_bytes):,} bytes ({len(raw_bytes)/1024:.1f} KB)")
except s3.exceptions.NoSuchKey:
log(f"✗ S3 object not found (may have been deleted)", 'ERROR')
return True # Nicht retryable - Message löschen
except Exception as e:
log(f"✗ Failed to load from S3: {e}", 'ERROR')
return False # Könnte temporär sein - retry
# An alle Recipients senden
log(f"\n📤 Sending to {len(recipients)} recipient(s)...")
log(f"Connecting to {SMTP_HOST}:{SMTP_PORT} (TLS: {SMTP_USE_TLS})")
successful = []
failed_temporary = []
failed_permanent = []
for recipient in recipients:
success, error, is_permanent = send_email(from_addr, recipient, raw_bytes)
if success:
successful.append(recipient)
elif is_permanent:
failed_permanent.append(recipient)
else:
failed_temporary.append(recipient)
# Ergebnis-Zusammenfassung
log(f"\n📊 Delivery Results:")
log(f" ✓ Successful: {len(successful)}/{len(recipients)}")
log(f" ✗ Failed (temporary): {len(failed_temporary)}")
log(f" ✗ Failed (permanent): {len(failed_permanent)}")
# Entscheidungslogik
if len(successful) > 0:
# ✅ Fall 1: Mindestens 1 Recipient erfolgreich
# → status=delivered, invalid_inboxes tracken
invalid_inboxes = failed_permanent if failed_permanent else None
mark_as_processed(bucket, key, invalid_inboxes)
log(f"{'='*70}")
log(f"✅ Email delivered to {len(successful)} recipient(s)", 'SUCCESS')
if failed_permanent:
log(f"{len(failed_permanent)} invalid inbox(es): {', '.join(failed_permanent)}", 'WARNING')
if failed_temporary:
log(f"{len(failed_temporary)} temporary failure(s) - NOT retrying (at least 1 success)", 'WARNING')
log(f"{'='*70}\n")
return True # Message löschen
elif len(failed_permanent) == len(recipients):
# ❌ Fall 2: ALLE Recipients permanent fehlgeschlagen (alle Inboxen ungültig)
# → status=failed, invalid_inboxes = ALLE
mark_as_all_invalid(bucket, key, failed_permanent)
log(f"{'='*70}")
log(f"✗ All recipients are invalid inboxes - NO delivery", 'ERROR')
log(f" Invalid: {', '.join(failed_permanent)}", 'ERROR')
log(f"{'='*70}\n")
return True # Message löschen (nicht retryable)
else:
# ⏳ Fall 3: Nur temporäre Fehler, keine erfolgreichen Deliveries
# → Retry wenn noch Versuche übrig
if receive_count < 3:
log(f"⚠ All failures are temporary, will retry", 'WARNING')
log(f"{'='*70}\n")
return False # Message NICHT löschen → Retry
else:
# Max retries erreicht → als failed markieren
error_summary = f"Failed after {receive_count} attempts. Temporary errors for all recipients."
mark_as_failed(bucket, key, error_summary, receive_count)
log(f"{'='*70}")
log(f"✗ Email delivery failed permanently after {receive_count} attempts", 'ERROR')
log(f"{'='*70}\n")
return False # Nach 3 Versuchen → automatisch DLQ
def main_loop():
"""Hauptschleife: Pollt SQS Queue und verarbeitet Nachrichten"""
# Queue URL ermitteln
try:
queue_url = get_queue_url()
except Exception as e:
log(f"FATAL: {e}", 'ERROR')
sys.exit(1)
log(f"\n{'='*70}")
log(f"🚀 Email Worker started")
log(f"{'='*70}")
log(f" Worker Name: {WORKER_NAME}")
log(f" Domain: {WORKER_DOMAIN}")
log(f" Queue: {queue_url}")
log(f" Region: {AWS_REGION}")
log(f" SMTP: {SMTP_HOST}:{SMTP_PORT} (TLS: {SMTP_USE_TLS})")
log(f" Poll interval: {POLL_INTERVAL}s")
log(f" Max messages per poll: {MAX_MESSAGES}")
log(f" Visibility timeout: {VISIBILITY_TIMEOUT}s")
log(f"{'='*70}\n")
consecutive_errors = 0
max_consecutive_errors = 10
messages_processed = 0
last_activity = time.time()
while not shutdown_requested:
try:
# Messages aus Queue holen (Long Polling)
response = sqs.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=MAX_MESSAGES,
WaitTimeSeconds=POLL_INTERVAL,
VisibilityTimeout=VISIBILITY_TIMEOUT,
AttributeNames=['ApproximateReceiveCount', 'SentTimestamp'],
MessageAttributeNames=['All']
)
# Reset error counter bei erfolgreicher Abfrage
consecutive_errors = 0
if 'Messages' not in response:
# Keine Nachrichten
if time.time() - last_activity > 60:
log(f"Waiting for messages... (processed: {messages_processed})")
last_activity = time.time()
continue
message_count = len(response['Messages'])
log(f"\n✉ Received {message_count} message(s) from queue")
last_activity = time.time()
# Messages verarbeiten
for msg in response['Messages']:
if shutdown_requested:
log("Shutdown requested, stopping processing")
break
receipt_handle = msg['ReceiptHandle']
# Receive Count auslesen
receive_count = int(msg.get('Attributes', {}).get('ApproximateReceiveCount', 1))
# Sent Timestamp (für Queue-Zeit-Berechnung)
sent_timestamp = int(msg.get('Attributes', {}).get('SentTimestamp', 0)) / 1000
queue_time = int(time.time() - sent_timestamp) if sent_timestamp else 0
if queue_time > 0:
log(f"Message was in queue for {queue_time}s")
try:
message_body = json.loads(msg['Body'])
# E-Mail verarbeiten
success = process_message(message_body, receive_count)
if success:
# Message aus Queue löschen
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
log("✓ Message deleted from queue")
messages_processed += 1
else:
# Bei Fehler bleibt Message in Queue
log(f"⚠ Message kept in queue for retry (attempt {receive_count}/3)")
except json.JSONDecodeError as e:
log(f"✗ Invalid message format: {e}", 'ERROR')
# Ungültige Messages löschen (nicht retryable)
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
log(f"✗ Error processing message: {e}", 'ERROR')
traceback.print_exc()
# Message bleibt in Queue für Retry
except KeyboardInterrupt:
log("\n⚠ Keyboard interrupt received")
break
except Exception as e:
consecutive_errors += 1
log(f"✗ Error in main loop ({consecutive_errors}/{max_consecutive_errors}): {e}", 'ERROR')
traceback.print_exc()
if consecutive_errors >= max_consecutive_errors:
log("Too many consecutive errors, shutting down", 'ERROR')
break
# Kurze Pause bei Fehlern
time.sleep(5)
log(f"\n{'='*70}")
log(f"👋 Worker shutting down")
log(f" Messages processed: {messages_processed}")
log(f"{'='*70}\n")
if __name__ == '__main__':
# Validierung
if not WORKER_DOMAIN:
log("ERROR: WORKER_DOMAIN not set!", 'ERROR')
sys.exit(1)
try:
main_loop()
except Exception as e:
log(f"Fatal error: {e}", 'ERROR')
traceback.print_exc()
sys.exit(1)