Compare commits

..

251 Commits

Author SHA1 Message Date
947740232c logo 2026-04-15 23:12:23 -05:00
081a0fad4b fix 2026-04-15 14:28:28 -05:00
1e1265ef1b batch imapsync 2026-04-15 13:49:31 -05:00
9862689c0c no markAsBlocked, 2026-04-12 20:43:37 -05:00
bed6c2a398 fix 2026-04-03 17:03:14 -05:00
27c2be664a standby mode, sns or sqs 2026-04-03 16:54:51 -05:00
7aed24bfff Merge branch 'contabo' 2026-04-03 16:16:01 -05:00
2ebe0484a4 create topic, subscription and queues per Domain 2026-04-03 16:15:20 -05:00
61fce745af moving certs 2026-03-24 20:23:34 -05:00
b732cebd94 updated spam corrections 2026-03-20 10:33:33 -05:00
36c122bf53 new spam config 2026-03-19 18:18:42 -05:00
6e2a061cf3 add ip 2026-03-15 15:07:01 -05:00
688d49e218 remove python worker 2026-03-13 20:12:52 -05:00
6016fbe13d remove version 2026-03-13 20:11:35 -05:00
369be75066 fix 2026-03-13 17:52:54 -05:00
2192f146ea remove SRV Records 2026-03-13 17:03:17 -05:00
a11ed8c526 fix 2026-03-12 19:27:07 -05:00
4caa51991f update for mobile 2026-03-12 19:11:36 -05:00
386be31671 update autodiscover 2026-03-12 18:56:18 -05:00
bd8efc867a whitelist feature 2026-03-11 19:47:37 -05:00
d331bd13b5 no buffer 2026-03-11 19:38:02 -05:00
610b01eee7 whitelist helper 2026-03-11 19:26:32 -05:00
c2d4903bc9 ENABLE_FAIL2BAN 0 2026-03-11 09:38:00 -05:00
5e4859a5c4 changes from main branch 2026-03-08 16:32:16 -05:00
613aa30493 logs 2026-03-08 16:15:41 -05:00
29f360ece8 logger console + file 2026-03-08 16:09:30 -05:00
62221e8121 fix 2026-03-08 14:54:33 -05:00
74c4f5801e Prometheus, Grafana, blackbox_exporter 2026-03-08 14:50:43 -05:00
285ffffb3a add missing import 2026-03-07 17:08:50 -06:00
90b120957d add missing import 2026-03-07 17:07:50 -06:00
99ab2a07d8 send mail even if if parsing fails 2026-03-07 17:06:03 -06:00
cd44449067 send mail even in case of parsing error ... 2026-03-07 17:04:50 -06:00
757855866c printstats 2026-03-07 16:44:53 -06:00
d9a91c13ed printstats 2026-03-07 16:41:51 -06:00
12af8577f3 changes 2026-03-07 15:47:14 -06:00
1d53f2d357 pino 2026-03-07 15:34:15 -06:00
9586869c0c neue Ports 2026-03-07 15:26:56 -06:00
d1426afec5 new structure 2026-03-07 15:16:14 -06:00
3ab46f163a ipadresses 2026-03-07 15:05:44 -06:00
56c7b51e35 changed blocked sender list 2026-03-07 15:01:56 -06:00
c826d4c299 move and imports changed 2026-03-07 14:59:41 -06:00
908bb76c3a ip address change 2026-03-07 12:05:06 -06:00
41514a7f51 Merge branch 'contabo' of git.bizmatch.net:aknuth/email-amazon into contabo 2026-03-07 12:02:34 -06:00
4324a5785f Merge branch 'main' into contabo 2026-03-07 12:02:22 -06:00
a70ae78a93 Patch for blocklist 2026-03-07 11:56:54 -06:00
6db032bd4c fail2ban, ignoreip 2026-03-06 16:55:13 -06:00
206ef9b20c executable 2026-03-05 15:34:56 -06:00
f1b2c33996 ENABLE_FAIL2BAN=1 2026-03-05 14:28:39 -06:00
726df19a76 ignoreip for fail2ban 2026-03-05 11:03:32 -06:00
Andreas Knuth
f6601501c0 disabled fail2ban 2026-03-05 17:01:48 +00:00
22d937ddfd imapsync 2026-03-02 18:07:16 -06:00
c56cae16d6 sdfsdf 2026-03-02 16:58:26 -06:00
a090e940f1 sdfsdf 2026-03-02 16:55:47 -06:00
282298c361 change 2026-03-02 16:55:02 -06:00
d91152c035 autodiscover entfernt 2026-03-02 16:49:31 -06:00
80596ab347 autodiscover 2026-03-02 16:48:55 -06:00
7173da31d4 fix 2026-03-02 15:43:54 -06:00
8995cede7d flags SKIP_CLIENT_DNS and SKIP_DMARC 2026-03-02 15:40:55 -06:00
a077b38998 outlook adoptions 2026-02-25 16:43:12 -06:00
73dd442596 sdfsdf 2026-02-23 10:30:16 -06:00
7920ab07b8 gfhgfh 2026-02-23 10:00:36 -06:00
98c78d8dce dfgdfg 2026-02-23 09:51:46 -06:00
3381fd68c2 sdfsdf 2026-02-22 22:30:33 -06:00
3f91936098 dfgdfg 2026-02-22 22:26:49 -06:00
ee02d505c6 sdfsdf 2026-02-22 22:22:28 -06:00
eea0fcc35d sdfsdf 2026-02-22 22:16:22 -06:00
7bc8cbb9f7 sdfsdf 2026-02-22 21:58:27 -06:00
69fbb670f1 move 2026-02-22 21:51:09 -06:00
39e862cdd5 dfgdfg 2026-02-22 21:47:03 -06:00
b2d41e2baa sdfsd 2026-02-22 21:44:13 -06:00
552dd73f0a sdfsd 2026-02-22 21:28:07 -06:00
51405a3ec5 sdfsdf 2026-02-22 21:18:06 -06:00
bd3b2db235 sdfsdf 2026-02-22 20:59:52 -06:00
bbc24cbb63 sdfsd 2026-02-22 20:57:58 -06:00
06e25b33e0 asdasd 2026-02-22 18:33:37 -06:00
a5a7096cc7 sdfsdf 2026-02-22 18:30:04 -06:00
c20d471036 removed 2026-02-22 18:20:35 -06:00
0b0b7ddb82 dfgdfg 2026-02-22 18:06:23 -06:00
42d16063a1 sdfdsf 2026-02-22 18:03:29 -06:00
bf96810d09 sdfsdf 2026-02-22 18:00:42 -06:00
4452dae34c dfgdfg 2026-02-22 17:58:08 -06:00
b1a295df85 sdfsdf 2026-02-22 16:06:54 -06:00
7956d2d6f5 dgdfg 2026-02-22 16:06:30 -06:00
915b0e59be sdfsdf 2026-02-22 16:00:09 -06:00
b90c8aec9e dfgdfg 2026-02-22 15:51:33 -06:00
dd41497f0b asdasd 2026-02-22 15:45:28 -06:00
8f0a899b66 sdsdf 2026-02-22 15:41:21 -06:00
4ac32f43d0 xvcxv 2026-02-22 15:39:02 -06:00
a1c7fecc27 sdf 2026-02-22 15:31:33 -06:00
173b3f382f dfgdf 2026-02-22 15:24:42 -06:00
a84bb23af0 update 2026-02-22 15:10:20 -06:00
3e656dacfa update 2026-02-22 14:55:00 -06:00
ce26d864b5 wildcard instead of * 2026-02-22 14:28:52 -06:00
f9723b2b68 mail-certs 2026-02-22 13:36:01 -06:00
956214f8c9 mail_network 2026-02-22 13:30:38 -06:00
aee2335c48 import mail_certs 2026-02-22 13:19:58 -06:00
8808d81113 update 2026-02-22 13:00:24 -06:00
ee19b5b659 changes 2026-02-22 12:58:24 -06:00
b072083318 caddy 2026-02-22 12:19:34 -06:00
b321e6d2ec BugFixes 2026-02-12 17:48:06 -06:00
16469de068 new node.js impl., removed old stuff 2026-02-12 17:03:00 -06:00
4343aefb76 lifecycle-configuration 14 days 2026-02-12 15:42:30 -06:00
68f00e3873 log output 2026-02-12 10:11:14 -06:00
e0555181a1 backup mails 2026-02-12 10:03:32 -06:00
b3fd560ee1 backup emails 2026-02-12 10:00:36 -06:00
9bb327eada cleanup 2026-02-11 18:33:44 -06:00
67c2440f4a change home folder for dovecot 2026-02-11 17:47:44 -06:00
94ec589a32 fix2 2026-02-10 18:50:19 -06:00
ec6bb989f2 Fix 2026-02-10 18:41:55 -06:00
63d12f8c7c requeue 2026-02-10 18:35:45 -06:00
663faa6a08 better logging 2026-02-10 18:22:31 -06:00
494bfd6a10 lowercase 2026-02-10 17:51:41 -06:00
665c1e611a runnable 2026-02-10 17:23:16 -06:00
9b8217cbd8 changes 2026-02-10 17:17:43 -06:00
07e2449d04 cloudflare next 2026-02-10 14:26:02 -06:00
3bd1ed14cf Forward-Rule mit smtp_override → Mail geht nur zum alten Provider, keine DMS-Delivery
Forward-Rule ohne smtp_override → normaler Forward + DMS-Delivery (bestehendes Verhalten)
Keine Rule → nur DMS-Delivery (bestehendes Verhalten)
2026-02-10 11:57:10 -06:00
994cf9055c FROM_ADDR changed 2026-02-09 18:10:33 -06:00
3849e3fc2d fix for logger 2026-02-09 15:59:29 -06:00
88d526aa00 log rotate 2026-02-09 15:47:24 -06:00
d9b6399dc7 forward raw email 2026-02-09 14:04:00 -06:00
a593db160b DMS_CONTAINER="mailserver" 2026-02-09 13:33:02 -06:00
c8bb77886a message in english 2026-02-09 13:25:35 -06:00
b3f84e91a8 executable 2026-02-09 13:14:51 -06:00
6bfe33d3af Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-02-09 13:13:39 -06:00
8c3db9db95 new scripts & new mail from 2026-02-09 13:13:30 -06:00
6fccc0b2f9 Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-02-09 10:58:31 -06:00
1d66485068 new method _send_via_legacy_smtp 2026-02-09 10:58:03 -06:00
38fcf8c4d8 script 2026-01-30 17:12:26 -06:00
2026e6afcd automated whitelist 2026-01-30 16:52:26 -06:00
fd3c9bedda whitelist based on Domains 2026-01-30 16:39:57 -06:00
d74eb93763 ip instead of radix 2026-01-30 16:22:09 -06:00
c3d992a479 moved 2026-01-30 16:14:29 -06:00
aaec33365e type = "radix"; 2026-01-30 15:43:29 -06:00
8d8b227f6b next fix 2026-01-30 15:33:28 -06:00
7fb7f33e72 try next fix 2026-01-30 14:18:01 -06:00
4d22969238 IP Whitelisting 2026-01-30 09:56:02 -06:00
81c62446f5 whitelisting 2026-01-29 21:23:59 -06:00
1cb469b49d move location 2026-01-29 21:10:56 -06:00
51c5cf673c rspamd pw 2026-01-29 20:59:57 -06:00
4687b8e582 RSPAMD Port only localhost 2026-01-29 17:16:07 -06:00
8dfb5d2aa6 Spam first version 2026-01-29 17:15:05 -06:00
f5c479feb4 Erweiterung fuer iPhones 2026-01-28 14:20:56 -06:00
041b58d4ae add region 2026-01-28 13:43:18 -06:00
98b9306290 autodiscover 2026-01-28 13:31:39 -06:00
96a1815b6a wait for dovecot 2026-01-27 21:21:43 -06:00
1d5e24f541 neuer cron 2026-01-27 21:05:16 -06:00
90e294de82 lang=en_US 2026-01-27 17:28:37 -06:00
e183d2ea2c Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-01-27 17:26:50 -06:00
b7b7142914 add certificate mounting from caddy volume 2026-01-27 17:26:39 -06:00
af4def0874 LANGUAGE=en_US 2026-01-25 21:18:22 -06:00
5acce1a75b wrong tab 2026-01-25 17:54:27 -06:00
406cce6270 _create_forward_message fixed for multipart messages 2026-01-25 17:23:08 -06:00
046111e267 set always header X-SES-Worker-Processed 2026-01-25 16:45:28 -06:00
5108ad5a7d catch exception 2026-01-25 15:36:56 -06:00
cf9bd4c9fb DISABLE_MANAGESIEVE 2026-01-25 14:43:08 -06:00
247d66ef8f removed version 2026-01-25 14:00:36 -06:00
d574f85efb container rename 2026-01-25 13:59:43 -06:00
db5d9e618f just python support 2026-01-25 13:58:41 -06:00
472ac97c56 add cron job 2026-01-25 13:43:26 -06:00
6e83c9e5ad INTERNAL_SMTP_PORT=25 2026-01-25 13:22:54 -06:00
2d9aba7e04 moved 2026-01-25 13:20:58 -06:00
3884abc695 sdfsdf 2026-01-24 16:56:37 -06:00
f9e866d948 dfgdfg 2026-01-24 16:54:33 -06:00
404168143a ASas 2026-01-24 16:49:54 -06:00
30e928c6e6 sdfdsf 2026-01-24 16:46:49 -06:00
adad46ce7d html content 2026-01-24 16:37:46 -06:00
424175fe72 dfgdfg 2026-01-24 16:32:12 -06:00
d42f59533e sdfsdf 2026-01-24 16:27:15 -06:00
92d3a1a858 asdasd 2026-01-24 16:24:11 -06:00
56c83ec182 sdfsdf 2026-01-24 16:20:00 -06:00
d516979b12 symlink 2026-01-24 16:15:58 -06:00
e7eb1eedaa sdfsd 2026-01-24 15:12:34 -06:00
c522c4fa73 activate sieve path 2026-01-24 15:09:07 -06:00
8e187985ea check for inbox folder 2026-01-24 14:56:00 -06:00
d1677a656c sieve generation 2026-01-24 14:51:52 -06:00
182598c402 enable managesieve tmp. 2026-01-24 14:03:26 -06:00
7e5ceae907 updates 2026-01-23 16:41:44 -06:00
8a04151bd2 changed dir 2026-01-23 13:13:31 -06:00
df37f59ff9 modular worker 2026-01-23 13:03:31 -06:00
e5188a064c moved 2026-01-18 18:12:09 -06:00
a616848787 no whitespace 2026-01-17 16:54:07 -06:00
054f894e7d too restrictive 2026-01-17 16:32:16 -06:00
4b08d0d40e roundcude Port 8888 2026-01-17 16:21:42 -06:00
44b792f8d3 port 8080 fuer roundcube 2026-01-17 16:15:26 -06:00
a8fb73c00c --break-system-packages 2026-01-17 15:48:00 -06:00
8ad8cdf1d4 Certificates 2026-01-17 15:39:02 -06:00
2c8963f638 AWS_ACCESS & custom image 2026-01-17 14:59:48 -06:00
049ed91d3d rename 2026-01-17 14:47:03 -06:00
826d4eab7b dummy content removed 2026-01-17 14:36:01 -06:00
23506403e6 new roundcube plugin 2026-01-17 14:35:26 -06:00
f7fe285200 cleanup 2026-01-16 22:16:09 -06:00
5122082914 updates 2026-01-16 21:53:34 -06:00
deed33c0cf forward/reply solution for internal mails 2026-01-16 17:55:54 -06:00
7f9ac1c9e6 avoid loops 2026-01-13 21:47:54 -06:00
397a2f7d98 Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-01-12 17:19:54 -06:00
be9c5b4ceb cleanwork 2026-01-12 17:19:44 -06:00
85d5eface6 new logging ... 2026-01-11 21:14:49 -06:00
87e00ae867 ConnectionResetError Spam, Waiting for messages all 300sec 2026-01-11 18:12:31 -06:00
7dfad647e9 sdfsdf 2026-01-10 18:42:31 -06:00
afe33ef381 logging fix 2026-01-10 18:28:10 -06:00
c27e4dff80 guard 2026-01-10 18:23:55 -06:00
f95461ad75 hot reload & better pooling 2026-01-10 18:03:08 -06:00
06195b9a60 reworked 2026-01-10 17:17:00 -06:00
f372082512 Merge branch 'main' of git.bizmatch.net:aknuth/email-amazon 2026-01-10 16:48:36 -06:00
990218ba95 cleanup + unified worker 2026-01-10 16:48:05 -06:00
1b33990d86 update 2025-12-29 13:34:13 -06:00
585bb285bf ooo/forward plugin 2025-12-29 10:17:24 +01:00
929adcdbc9 content 2025-12-23 17:25:47 -06:00
6d92339225 update all workers 2025-12-23 17:24:29 -06:00
cd91418def Retry-Logik 2025-12-23 17:05:30 -06:00
05f4a38eb6 fix 2025-12-20 16:14:46 -06:00
b8f66a8e8b fix 2025-12-20 15:56:17 -06:00
1d1a384d1b update acc. to bounces 2025-12-19 17:12:40 -06:00
6df8674b72 removed 2025-12-17 16:31:09 -06:00
baaafe711c fix 2025-12-17 16:08:55 -06:00
91e85fa422 chmod 2025-12-17 16:01:25 -06:00
2de49417c4 neues Script 2025-12-17 16:00:28 -06:00
c12e6263f3 fix 2025-12-17 15:49:24 -06:00
e8e8739d07 claudi-fix 2025-12-17 15:45:37 -06:00
3c59075bd1 extract_body 2025-12-17 15:21:28 -06:00
1babc68655 fix 2025-12-17 14:38:57 -06:00
2e3df96ca3 fix 2025-12-17 14:18:39 -06:00
d2dbceb53e delete rule 2025-12-17 14:15:37 -06:00
e756c5f91d BugFixes 2025-12-17 13:44:37 -06:00
c2288d22dc BugFix 2025-12-17 13:33:18 -06:00
e60faffbde new scripts 2025-12-17 12:38:10 -06:00
93f2c0c3bd worker with ooo & forward logic 2025-12-17 12:25:56 -06:00
19b4bb1471 rename 2025-12-15 12:29:04 -06:00
ad8f032285 fix health check 2025-12-15 12:20:51 -06:00
cca66b7833 extract_original_message_id corrected 2025-12-11 17:27:11 -06:00
76debb9f7c dfgdfgdfg 2025-12-10 15:46:27 -06:00
ee7b556211 sdf 2025-12-10 15:37:32 -06:00
335a26f7dc sdfsdf 2025-12-10 15:32:57 -06:00
cfdd840527 update 2025-12-10 15:21:26 -06:00
a5fe94df66 zxcxz 2025-12-10 15:12:43 -06:00
bfebbcd53e update 2025-12-09 09:48:22 -06:00
004d606c26 "CNAME" "imap.${DOMAIN_NAME}" entfernt 2025-12-05 18:11:41 -06:00
add0ca2e75 DMS update 2025-12-03 18:21:22 -06:00
a89d53ce26 update 2025-12-03 17:54:32 -06:00
1a85ed3a89 fix fuer AMAZON_SES_SETUP_NOTIFICATION 2025-11-30 17:20:44 -06:00
8011cd5e1d try/catch 2025-11-30 17:13:35 -06:00
9d92a17e76 -f option 2025-11-30 17:02:21 -06:00
eba3e0462b neuer Worker 2025-11-30 16:41:21 -06:00
442326ac24 "Expiration": "Days": 30 2025-11-29 14:10:01 -06:00
0411f457d5 moved 2025-11-26 17:18:54 -06:00
b1c5d6bfe9 fix 2025-11-26 17:11:03 -06:00
850291c284 error fixes 2025-11-26 17:04:11 -06:00
0bda8a3cf6 update and move 2025-11-26 16:51:32 -06:00
160ea900e1 mail inboxes 2025-11-26 16:21:42 -06:00
11f0cf031a fixes 2025-11-26 12:16:53 -06:00
ac697f9590 bug fixes 2025-11-26 11:13:01 -06:00
94 changed files with 10499 additions and 1885 deletions

3
.gitignore vendored
View File

@@ -1 +1,2 @@
.env .env
node_modules

3
DMS/.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
mail-data
mail-logs
mail-state

31
DMS/Dockerfile Normal file
View File

@@ -0,0 +1,31 @@
FROM docker.io/mailserver/docker-mailserver:latest
LABEL maintainer="andreas.knuth@bayarea-cc.com"
LABEL description="Custom DMS with Python3 support and Sieve Sync"
# 1. Python, pip und dependencies installieren
RUN apt-get update && \
apt-get install -y --no-install-recommends \
python3 \
python3-pip \
&& pip3 install --break-system-packages --no-cache-dir boto3 croniter \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# 2. Verzeichnis erstellen
WORKDIR /scripts
# 3. Script kopieren
COPY sync_dynamodb_to_sieve.py /scripts/sync.py
RUN chmod +x /scripts/sync.py
# 4. Schedule Konfiguration kopieren
COPY sieve-schedule /etc/sieve-schedule
# 5. Supervisor Konfiguration kopieren
COPY sieve-supervisor.conf /etc/supervisor/conf.d/sieve-sync.conf
# 6. Dynamic Whitelist Script und Supervisor-Config kopieren
COPY dynamic_whitelist.py /scripts/dynamic_whitelist.py
RUN chmod +x /scripts/dynamic_whitelist.py
COPY whitelist-supervisor.conf /etc/supervisor/conf.d/dynamic-whitelist.conf

58
DMS/batch_imapsync.sh Normal file
View File

@@ -0,0 +1,58 @@
#!/bin/bash
# batch_imapsync.sh - Führt IMAP-Sync für alle User im Hintergrund aus
# Format der CSV: email@domain.com,SecretPassword123
HOST1=$1
HOST2=$2
CSV_FILE=$3
if [ -z "$HOST1" ] || [ -z "$HOST2" ] || [ -z "$CSV_FILE" ]; then
echo "Usage: $0 <source-host> <target-host> <users.csv>"
echo "Beispiel: $0 secure.emailsrvr.com 147.93.132.244 stxmaterials.csv"
exit 1
fi
# ======================================================================
# Die eigentliche Sync-Funktion (wird in den Hintergrund geschickt)
# ======================================================================
run_sync_jobs() {
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
LOG_DIR="sync_logs_$TIMESTAMP"
mkdir -p "$LOG_DIR"
echo "Beginne Sync-Lauf am $(date)" > "batch_master_${TIMESTAMP}.log"
while IFS=, read -r email password; do
email=$(echo "$email" | tr -d '\r' | xargs)
password=$(echo "$password" | tr -d '\r' | xargs)
[ -z "$email" ] && continue
LOGFILE="$LOG_DIR/imapsync_${email}.log"
echo "[$(date)] Syncing $email -> $LOGFILE" >> "batch_master_${TIMESTAMP}.log"
# Führe Docker imapsync für den aktuellen User aus
docker run --rm gilleslamiral/imapsync imapsync \
--host1 "$HOST1" --user1 "$email" --password1 "$password" --ssl1 \
--host2 "$HOST2" --user2 "$email" --password2 "$password" --ssl2 \
--automap > "$LOGFILE" 2>&1 < /dev/null
done < "$CSV_FILE"
echo "Alle Sync-Jobs beendet am $(date)" >> "batch_master_${TIMESTAMP}.log"
}
# ======================================================================
# Skript-Start: Entkopplung vom Terminal
# ======================================================================
echo "🚀 Starte Batch-IMAP-Sync im Hintergrund..."
# Rufe die Funktion auf, leite alle restlichen Ausgaben ins Nichts und schicke sie in den Hintergrund (&)
run_sync_jobs </dev/null >/dev/null 2>&1 &
echo "✅ Der Job läuft jetzt autark im Hintergrund (sequenziell)."
echo "Du kannst das SSH-Terminal jetzt bedenkenlos schließen!"
echo "Überwache den Gesamtfortschritt mit:"
echo " tail -f batch_master_*.log"
echo "Oder die Details eines einzelnen Postfachs mit:"
echo " tail -f sync_logs_*/imapsync_<email>.log"

View File

@@ -1,67 +1,117 @@
services: services:
mailserver: mailserver:
image: docker.io/mailserver/docker-mailserver:latest build:
container_name: mailserver-new context: .
hostname: mail.email-srvr.com dockerfile: Dockerfile
domainname: email-srvr.com image: dms-custom:latest
container_name: mailserver
# Node-spezifischer Hostname - A-Record zeigt auf DIESEN Server.
# email-srvr.com selbst zeigt auf einen anderen Server und wird hier NICHT verwendet.
hostname: node1.email-srvr.com
ports: ports:
- "25:25" # SMTP (parallel zu MailCow auf Port 25) - "25:25"
- "587:587" # SMTP Submission - "587:587"
- "465:465" # SMTP SSL - "465:465"
- "143:143" # IMAP - "143:143"
- "993:993" # IMAP SSL - "993:993"
- "110:110" # POP3 - "110:110"
- "995:995" # POP3 SSL - "995:995"
- "127.0.0.1:11334:11334"
volumes: volumes:
- ./docker-data/dms/mail-data/:/var/mail/ - ./docker-data/dms/mail-data/:/var/mail/
- ./docker-data/dms/mail-state/:/var/mail-state/ - ./docker-data/dms/mail-state/:/var/mail-state/
- ./docker-data/dms/mail-logs/:/var/log/mail/ - ./docker-data/dms/mail-logs/:/var/log/mail/
- ./docker-data/dms/config/:/tmp/docker-mailserver/ - ./docker-data/dms/config/:/tmp/docker-mailserver/
# - ./docker-data/dms/config/dovecot/10-master.conf:/etc/dovecot/conf.d/10-master.conf - ./docker-data/dms/config/dovecot/conf.d/95-sieve-redirect.conf:/etc/dovecot/conf.d/95-sieve-redirect.conf:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
- ./sync_dynamodb_to_sieve.py:/scripts/sync.py:ro
- ./sieve-cron:/etc/cron.d/sieve-sync:ro
# -------------------------------------------------------
# Caddy Zertifikate: gesamtes Cert-Verzeichnis mounten.
#
# Caddy legt Wildcard-Certs so ab:
# *.andreasknuth.de/
# *.andreasknuth.de.crt
# *.andreasknuth.de.key
# node1.email-srvr.com/
# node1.email-srvr.com.crt
# node1.email-srvr.com.key
#
# setup-dms-tls.sh referenziert per:
# /etc/mail/certs/*.domain/*.domain.crt|.key
# -------------------------------------------------------
# - /var/lib/docker/volumes/caddy_data/_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
- /home/aknuth/git/email-amazon/caddy/caddy-data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
# -------------------------------------------------------
# Dovecot SNI Konfiguration (generiert von setup-dms-tls.sh)
# DMS lädt /tmp/docker-mailserver/dovecot-sni.cf automatisch.
# -------------------------------------------------------
- ./docker-data/dms/config/dovecot-sni.cf:/etc/dovecot/conf.d/99-sni.conf:ro
environment: environment:
# Wichtig: Rspamd und andere Services deaktivieren für ersten Test # -------------------------------------------------------
- ENABLE_RSPAMD=0 # SSL Default-Cert: node1.email-srvr.com
# Das ist das Fallback-Cert wenn kein SNI-Match gefunden wird
# (z.B. bei direktem IP-Connect ohne Hostname).
# Kundendomain-SNI wird über postfix-main.cf + dovecot-sni.cf gesteuert.
# -------------------------------------------------------
- SSL_TYPE=manual
- SSL_CERT_PATH=/etc/mail/certs/node1.email-srvr.com/node1.email-srvr.com.crt
- SSL_KEY_PATH=/etc/mail/certs/node1.email-srvr.com/node1.email-srvr.com.key
# SPAM / Rspamd
- ENABLE_OPENDKIM=1 - ENABLE_OPENDKIM=1
- ENABLE_OPENDMARC=0 - ENABLE_OPENDMARC=0
- ENABLE_POLICYD_SPF=0 - ENABLE_POLICYD_SPF=0
- ENABLE_RSPAMD=1
- RSPAMD_GREYLISTING=0
- RSPAMD_CHECK_AUTHENTICATED=0
- RSPAMD_HFILTER=1
- MOVE_SPAM_TO_JUNK=1
- ENABLE_AMAVIS=0 - ENABLE_AMAVIS=0
- ENABLE_SPAMASSASSIN=0 - ENABLE_SPAMASSASSIN=0
- ENABLE_POSTGREY=0 - ENABLE_POSTGREY=0
- RSPAMD_GREYLISTING=0
- ENABLE_CLAMAV=0 - ENABLE_CLAMAV=0
#- ENABLE_FAIL2BAN=1
- ENABLE_FAIL2BAN=0 # Sicherheit
- ENABLE_MANAGESIEVE=1 - ENABLE_FAIL2BAN=1
- ENABLE_UNBOUND=1
# Sonstige
- ENABLE_MANAGESIEVE=0
- ENABLE_POP3=1 - ENABLE_POP3=1
- RSPAMD_LEARN=1 - RSPAMD_LEARN=1
- MOVE_SPAM_TO_JUNK=1
- RSPAMD_CHECK_AUTHENTICATED=0
- RSPAMD_HFILTER=0
- ONE_DIR=1 - ONE_DIR=1
- ENABLE_UPDATE_CHECK=0 - ENABLE_UPDATE_CHECK=0
- PERMIT_DOCKER=network - PERMIT_DOCKER=network
# - PERMIT_DOCKER=empty - SPOOF_PROTECTION=0
- SSL_TYPE=manual - ENABLE_SRS=0
- SSL_CERT_PATH=/tmp/docker-mailserver/ssl/cert.pem - LOG_LEVEL=info
- SSL_KEY_PATH=/tmp/docker-mailserver/ssl/key.pem
# Amazon SES SMTP Relay # Amazon SES Relay
# - RELAY_HOST=email-smtp.us-east-2.amazonaws.com - RELAY_HOST=email-smtp.us-east-2.amazonaws.com
# - RELAY_PORT=587 - RELAY_PORT=587
# - RELAY_USER=${SES_SMTP_USER} - RELAY_USER=${SES_SMTP_USER}
# - RELAY_PASSWORD=${SES_SMTP_PASSWORD} - RELAY_PASSWORD=${SES_SMTP_PASSWORD}
# Weitere Einstellungen
- POSTFIX_OVERRIDE_HOSTNAME=email-srvr.com # AWS Credentials
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
- AWS_REGION=us-east-2
# Postfix
# POSTFIX_OVERRIDE_HOSTNAME: Was Postfix im EHLO/HELO Banner sendet.
# node1.email-srvr.com passt zum TLS-Cert und ist der echte Hostname.
- POSTFIX_OVERRIDE_HOSTNAME=node1.email-srvr.com
- POSTFIX_MYNETWORKS=172.16.0.0/12 172.17.0.0/12 172.18.0.0/12 [::1]/128 [fe80::]/64 - POSTFIX_MYNETWORKS=172.16.0.0/12 172.17.0.0/12 172.18.0.0/12 [::1]/128 [fe80::]/64
- POSTFIX_MAILBOX_SIZE_LIMIT=0 - POSTFIX_MAILBOX_SIZE_LIMIT=0
- POSTFIX_MESSAGE_SIZE_LIMIT=0 - POSTFIX_MESSAGE_SIZE_LIMIT=0
- SPOOF_PROTECTION=0
- ENABLE_SRS=1
- SRS_SENDER_CLASSES=envelope_sender,header_sender
- SRS_SECRET=EBk/ndWRA2s8ZMQFIXq0mJnS6SRbgoj77wv00PZNpNw=
# Debug-Einstellungen
- LOG_LEVEL=debug
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN
- SYS_PTRACE - SYS_PTRACE
@@ -69,38 +119,48 @@ services:
networks: networks:
mail_network: mail_network:
aliases: aliases:
- mail.email-srvr.com
- mailserver - mailserver
- node1.email-srvr.com
roundcube: roundcube:
image: roundcube/roundcubemail:latest image: roundcube/roundcubemail:latest
container_name: roundcube-new container_name: roundcube
depends_on: depends_on:
- roundcube-db - roundcube-db
- mailserver - mailserver
environment: environment:
- ROUNDCUBEMAIL_DEFAULT_LANGUAGE=en_US
- ROUNDCUBEMAIL_DB_TYPE=pgsql - ROUNDCUBEMAIL_DB_TYPE=pgsql
- ROUNDCUBEMAIL_DB_HOST=roundcube-db - ROUNDCUBEMAIL_DB_HOST=roundcube-db
- ROUNDCUBEMAIL_DB_NAME=roundcube - ROUNDCUBEMAIL_DB_NAME=roundcube
- ROUNDCUBEMAIL_DB_USER=roundcube - ROUNDCUBEMAIL_DB_USER=roundcube
- ROUNDCUBEMAIL_DB_PASSWORD=${ROUNDCUBE_DB_PASSWORD} - ROUNDCUBEMAIL_DB_PASSWORD=${ROUNDCUBE_DB_PASSWORD}
# Einfache Konfiguration ohne SSL-Probleme (für ersten Test) # Roundcube verbindet intern über den Docker-Alias
- ROUNDCUBEMAIL_DEFAULT_HOST=ssl://mail.email-srvr.com - ROUNDCUBEMAIL_DEFAULT_HOST=ssl://node1.email-srvr.com
- ROUNDCUBEMAIL_DEFAULT_PORT=993 - ROUNDCUBEMAIL_DEFAULT_PORT=993
- ROUNDCUBEMAIL_SMTP_SERVER=tls://mail.email-srvr.com # Interner Traffic ohne TLS
- ROUNDCUBEMAIL_SMTP_PORT=587 - ROUNDCUBEMAIL_SMTP_SERVER=ssl://node1.email-srvr.com
- ROUNDCUBEMAIL_PLUGINS=password,managesieve - ROUNDCUBEMAIL_SMTP_PORT=465
# WICHTIG: Variablen LEER lassen, damit Roundcube keine Authentifizierung versucht!
- ROUNDCUBEMAIL_SMTP_USER=
- ROUNDCUBEMAIL_SMTP_PASSWORD=
- ROUNDCUBEMAIL_PLUGINS=password,email_config
# NEU: Schaltet die strikte PHP-Zertifikatsprüfung für interne Verbindungen ab
- ROUNDCUBEMAIL_IMAP_CONN_OPTIONS={"ssl":{"verify_peer":false,"verify_peer_name":false}}
- ROUNDCUBEMAIL_SMTP_CONN_OPTIONS={"ssl":{"verify_peer":false,"verify_peer_name":false}}
ports:
- "8888:80"
volumes: volumes:
- ./docker-data/roundcube/config:/var/roundcube/config # - ./docker-data/roundcube/config:/var/www/html/config
# ENTFERNEN Sie diese Zeile: - ./docker-data/roundcube/plugins/email_config:/var/www/html/plugins/email_config:ro
# - ./roundcube-config.php:/var/www/html/config/config.inc.php:ro
networks: networks:
- mail_network - mail_network
restart: unless-stopped restart: unless-stopped
roundcube-db: roundcube-db:
image: postgres:15 image: postgres:15
container_name: roundcube-db-new container_name: roundcube-db
environment: environment:
- POSTGRES_DB=roundcube - POSTGRES_DB=roundcube
- POSTGRES_USER=roundcube - POSTGRES_USER=roundcube
@@ -115,4 +175,4 @@ services:
networks: networks:
mail_network: mail_network:
external: true external: true

View File

@@ -0,0 +1,3 @@
# Eigene Dovecot-Optimierungen für Outlook
mail_max_userip_connections = 50
imap_client_workarounds = delay-newmail tb-extra-mailbox-sep tb-lsub-flags

View File

@@ -0,0 +1,11 @@
[DEFAULT]
# Whitelist: Localhost, private Docker-Netze und die Budd Electric Office-IP
ignoreip = 127.0.0.1/8 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 24.155.193.233 69.223.70.143 24.155.193.233
[dovecot]
# Erhöht die Anzahl der erlaubten Fehlversuche auf 20
maxretry = 20
[postfix]
# Erhöht die Anzahl der erlaubten Fehlversuche auf 20
maxretry = 20

View File

@@ -0,0 +1,8 @@
DOCKER_WL {
type = "from";
filter = "email:domain";
map = "/etc/rspamd/override.d/docker_whitelist.map";
symbol = "DOCKER_WHITELIST";
description = "Whitelist fuer eigene Domains";
score = -50.0;
}

View File

@@ -0,0 +1,4 @@
# Standard-Aktionen überschreiben
reject = 500; # Erst ab 500 Punkten ablehnen (passiert nie)
add_header = 6; # Ab 6 Punkten: X-Spam Header setzen
greylist = 500; # Greylisting faktisch deaktivieren (Schwellwert unerreichbar hoch)

View File

@@ -0,0 +1,6 @@
rules {
DOCKER_WHITELIST_FORCE {
action = "no action";
expression = "DOCKER_WHITELIST";
}
}

View File

@@ -0,0 +1 @@
enabled = false;

View File

@@ -0,0 +1 @@
password = "$2$eitni68mkfaaq957jx3bcx57sg1mmd9c$d9xyhjmmih7sjm3fpfu7r7tshhfm4ud93km65w5dkh151f5phiiy";

View File

@@ -0,0 +1,47 @@
#!/bin/bash
# user-patches.sh laeuft bei jedem Start von DMS automatisch
ACCOUNTS_FILE="/tmp/docker-mailserver/postfix-accounts.cf"
WHITELIST_FILE="/etc/rspamd/override.d/docker_whitelist.map"
# --- Rspamd Whitelist generieren ---
STATIC_DOMAINS=(
"bayarea-cc.com"
"ruehrgedoens.de"
"annavillesda.org"
"bizmatch.net"
"biz-match.com"
"qrmaster.net"
"nqsltd.com"
"iitwelders.com"
)
echo "Patching: Generiere Rspamd Whitelist aus Accounts + statischen Domains..."
{
for domain in "${STATIC_DOMAINS[@]}"; do
echo "$domain"
done
if [ -f "$ACCOUNTS_FILE" ]; then
awk -F'|' '{print $1}' "$ACCOUNTS_FILE" | cut -d'@' -f2
fi
} | sort | uniq > "$WHITELIST_FILE"
chmod 644 "$WHITELIST_FILE"
chown _rspamd:_rspamd "$WHITELIST_FILE" 2>/dev/null || true
echo "Whitelist erstellt:"
cat "$WHITELIST_FILE"
# --- local.d configs manuell kopieren (DMS kopiert local.d nicht automatisch) ---
echo "Patching: Kopiere custom rspamd local.d configs..."
SRC="/tmp/docker-mailserver/rspamd/local.d"
DST="/etc/rspamd/local.d"
if [ -d "$SRC" ]; then
for f in "$SRC"/*; do
[ -f "$f" ] || continue
cp "$f" "$DST/$(basename "$f")"
chown root:root "$DST/$(basename "$f")"
chmod 644 "$DST/$(basename "$f")"
echo " Kopiert: $(basename "$f") -> $DST/"
done
fi

View File

@@ -0,0 +1,16 @@
<?php
// WICHTIG: Zuerst die vom Docker-Container generierte Config einbinden!
// Deine Overrides (hier wird alles überschrieben, was wir brauchen)
$config['smtp_server'] = 'ssl://mailserver';
$config['smtp_port'] = 465;
$config['smtp_conn_options'] = array(
'ssl' => array(
'verify_peer' => false,
'verify_peer_name' => false,
'allow_self_signed' => true,
),
);
$config['language'] = 'en_US';

View File

@@ -0,0 +1,11 @@
{
"name": "local/email_config",
"type": "roundcube-plugin",
"description": "Email Configuration - Manage OOO and Forwarding",
"license": "MIT",
"version": "1.0.0",
"require": {
"php": ">=7.0.0",
"roundcube/plugin-installer": ">=0.1.3"
}
}

View File

@@ -0,0 +1,20 @@
/**
* Email Configuration Plugin - Client Side
*/
if (window.rcmail) {
rcmail.addEventListener('init', function(evt) {
rcmail.register_command('email_config_open', function() {
rcmail.http_post('plugin.email_config_generate_url', {},
rcmail.set_busy(true, 'loading'));
}, true);
});
rcmail.addEventListener('responseafterplugin.email_config_generate_url', function(response) {
rcmail.set_busy(false);
if (response && response.url) {
window.open(response.url, '_blank');
} else {
rcmail.display_message('Failed to generate configuration URL', 'error');
}
});
}

View File

@@ -0,0 +1,75 @@
<?php
class email_config extends rcube_plugin
{
public $task = 'settings';
function init()
{
$this->add_texts('localization/', false);
$this->add_hook('settings_actions', array($this, 'settings_actions'));
$this->register_action('plugin.email_config', array($this, 'email_config_init'));
}
function settings_actions($args)
{
$args['actions'][] = array(
'action' => 'plugin.email_config',
'class' => 'email-config',
'label' => 'email_config',
'domain' => 'email_config',
);
return $args;
}
function email_config_init()
{
$rcmail = rcube::get_instance();
$this->register_handler('plugin.body', array($this, 'email_config_form'));
$rcmail->output->set_pagetitle('Email Configuration');
$rcmail->output->send('plugin');
}
function email_config_form()
{
$rcmail = rcube::get_instance();
$email = $rcmail->user->get_username();
$secret_key = 'SHARED_SECRET_KEY_987654321';
$config_url = 'https://config.email-bayarea.com';
$expires = time() + 3600;
$data = $email . '|' . $expires;
$signature = hash_hmac('sha256', $data, $secret_key);
$url = $config_url . '/?email=' . urlencode($email) . '&expires=' . $expires . '&signature=' . $signature;
$out = '
<div class="box" style="max-width: 600px; margin: 40px auto; padding: 30px; background: #fff; border-radius: 8px; box-shadow: 0 2px 8px rgba(0,0,0,0.1);">
<div style="text-align: center; margin-bottom: 30px;">
<svg width="64" height="64" viewBox="0 0 24 24" fill="none" style="margin: 0 auto 20px;">
<path d="M20 4H4C2.9 4 2.01 4.9 2.01 6L2 18C2 19.1 2.9 20 4 20H20C21.1 20 22 19.1 22 18V6C22 4.9 21.1 4 20 4ZM20 8L12 13L4 8V6L12 11L20 6V8Z" fill="#4A90E2"/>
</svg>
<h2 style="margin: 0; color: #333; font-size: 24px; font-weight: 600;">Email Rules Configuration</h2>
</div>
<div style="background: #f8f9fa; padding: 20px; border-radius: 6px; margin-bottom: 25px;">
<p style="margin: 0 0 8px 0; color: #666; font-size: 14px;">Signed in as:</p>
<p style="margin: 0; color: #333; font-size: 16px; font-weight: 500;">' . htmlspecialchars($email) . '</p>
</div>
<p style="color: #666; line-height: 1.6; margin-bottom: 25px; text-align: center;">
Configure out-of-office auto-replies and email forwarding rules for your account.
</p>
<div style="text-align: center;">
<a href="' . htmlspecialchars($url) . '" target="_blank"
style="display: inline-block; background: #4A90E2; color: white; padding: 12px 32px;
border-radius: 6px; text-decoration: none; font-weight: 500; font-size: 16px;
transition: background 0.2s; box-shadow: 0 2px 4px rgba(74,144,226,0.3);"
onmouseover="this.style.background=\'#357ABD\'"
onmouseout="this.style.background=\'#4A90E2\'">
Open Email Configuration →
</a>
</div>
</div>';
return $out;
}
}

View File

@@ -0,0 +1,3 @@
<?php
$labels['email_config'] = 'Email Configuration';
$messages = array();

View File

@@ -0,0 +1,3 @@
<?php
$labels['email_config'] = 'Email Configuration';
$messages = array();

87
DMS/dynamic_whitelist.py Normal file
View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python3
import os
import re
import time
import subprocess
import threading
from datetime import datetime
try:
from croniter import croniter
except ImportError:
print("Bitte 'croniter' via pip installieren!")
exit(1)
LOG_FILE = '/var/log/mail/mail.log'
WHITELIST_DURATION_SEC = 24 * 60 * 60 # 24 Stunden
CRON_SCHEDULE = "0 * * * *" # Jede Stunde
active_ips = {}
# Regex für Dovecot IMAP/POP3 erfolgreiche Logins
LOGIN_REGEX = re.compile(r"dovecot: (?:imap|pop3)-login: Login: user=<[^>]+>.*rip=([0-9]{1,3}(?:\.[0-9]{1,3}){3}),")
# Private Netze (Docker/Local) ignorieren
IGNORE_REGEX = re.compile(r"^(172\.|10\.|192\.168\.|127\.)")
def run_command(cmd):
try:
subprocess.run(cmd, shell=True, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
except Exception as e:
print(f"Fehler bei: {cmd} - {e}")
def cleanup_job():
"""Cron-Thread für das stündliche Aufräumen abgelaufener IPs."""
iter = croniter(CRON_SCHEDULE, datetime.now())
while True:
next_run = iter.get_next(datetime)
sleep_seconds = (next_run - datetime.now()).total_seconds()
if sleep_seconds > 0:
time.sleep(sleep_seconds)
print(f"[{datetime.now()}] Starte stündlichen Whitelist-Cleanup...")
now = time.time()
expired_ips = [ip for ip, timestamp in active_ips.items() if now - timestamp > WHITELIST_DURATION_SEC]
for ip in expired_ips:
print(f"[{datetime.now()}] Whitelist für {ip} abgelaufen. Entferne...")
run_command(f"fail2ban-client set dovecot delignoreip {ip}")
run_command(f"fail2ban-client set postfix delignoreip {ip}")
del active_ips[ip]
def follow_log():
"""Verwendet System 'tail -F', da dies Log-Rotation automatisch handhabt."""
print(f"[{datetime.now()}] Dynamic Whitelist Monitor gestartet...")
while not os.path.exists(LOG_FILE):
time.sleep(2)
process = subprocess.Popen(['tail', '-F', LOG_FILE], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, text=True)
for line in process.stdout:
match = LOGIN_REGEX.search(line)
if match:
ip = match.group(1)
if IGNORE_REGEX.match(ip):
continue
now = time.time()
# Neue IP in die Fail2ban Whitelist eintragen
if ip not in active_ips:
print(f"[{datetime.now()}] Neuer erfolgreicher Login von {ip}. Setze auf Whitelist...")
run_command(f"fail2ban-client set dovecot addignoreip {ip}")
run_command(f"fail2ban-client set postfix addignoreip {ip}")
# Timestamp (Last Seen) aktualisieren
active_ips[ip] = now
if __name__ == '__main__':
# Warte kurz, bis Fail2ban nach einem Container-Start hochgefahren ist
time.sleep(15)
# Cron-Cleanup im Hintergrund starten
threading.Thread(target=cleanup_job, daemon=True).start()
# Log-Überwachung in der Endlosschleife starten
follow_log()

41
DMS/run_sync.sh Executable file
View File

@@ -0,0 +1,41 @@
#!/bin/bash
# 1. Prüfen, ob die Umgebungsvariablen HOST1 und HOST2 gesetzt sind
if [ -z "$HOST1" ] || [ -z "$HOST2" ]; then
echo "Fehler: Die Umgebungsvariablen HOST1 und/oder HOST2 sind nicht gesetzt."
echo "Bitte setze diese vor dem Ausführen des Skripts, zum Beispiel mit:"
echo 'export HOST1="65.254.254.50"'
echo 'export HOST2="147.93.132.244"'
exit 1
fi
# 2. E-Mail-Adresse interaktiv abfragen
read -p "Bitte E-Mail-Adresse eingeben: " EMAIL
# 3. Passwort interaktiv und unsichtbar (-s) abfragen
read -s -p "Bitte Passwort eingeben: " PASSWORD
echo "" # Zeilenumbruch für eine saubere Darstellung nach der Passworteingabe
# 4. Log-Datei mit Zeitstempel und E-Mail definieren
LOGFILE="imapsync_${EMAIL}_$(date +%Y%m%d_%H%M%S).log"
echo "Starte imapsync für $EMAIL..."
echo "Quell-Host (HOST1): $HOST1"
echo "Ziel-Host (HOST2): $HOST2"
echo "Logs werden gespeichert in: $LOGFILE"
echo "---------------------------------------------------"
# 5. Docker-Container ausführen und Output mit 'tee' loggen
docker run --rm -i gilleslamiral/imapsync imapsync \
--host1 "$HOST1" \
--user1 "$EMAIL" \
--password1 "$PASSWORD" \
--ssl1 \
--host2 "$HOST2" \
--user2 "$EMAIL" \
--password2 "$PASSWORD" \
--ssl2 \
--automap 2>&1 | tee "$LOGFILE"
echo "---------------------------------------------------"
echo "Sync abgeschlossen. Das vollständige Log findest du in: $LOGFILE"

207
DMS/setup-dms-tls.sh Executable file
View File

@@ -0,0 +1,207 @@
#!/bin/bash
# setup-dms-tls.sh
# Gehört ins Root-Verzeichnis des DMS (neben docker-compose.yml).
#
# Generiert Dovecot- und Postfix-SNI-Konfigurationen für Multi-Domain TLS.
# Liest Domains aus dem laufenden DMS und erstellt:
# - docker-data/dms/config/dovecot-sni.cf
# - docker-data/dms/config/postfix-main.cf
# - docker-data/dms/config/postfix-sni.map (NEU für Postfix SNI)
#
# Cert-Konvention (Caddy Wildcard):
# Caddy speichert *.domain.tld unter: wildcard_.domain.tld/wildcard_.domain.tld.crt
# Im Container (gemountet unter /etc/mail/certs):
# /etc/mail/certs/wildcard_.domain.tld/wildcard_.domain.tld.crt
# /etc/mail/certs/wildcard_.domain.tld/wildcard_.domain.tld.key
#
# Usage:
# ./setup-dms-tls.sh
# DMS_CONTAINER=mailserver NODE_HOSTNAME=node1.email-srvr.com ./setup-dms-tls.sh
set -e
DMS_CONTAINER=${DMS_CONTAINER:-"mailserver"}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
CONFIG_DIR="$SCRIPT_DIR/docker-data/dms/config"
CERTS_BASE_PATH=${CERTS_BASE_PATH:-"/etc/mail/certs"}
# Node-Hostname: Fallback-Cert für DMS (kein Wildcard, direktes Cert)
# Muss mit dem 'hostname' in docker-compose.yml übereinstimmen.
NODE_HOSTNAME=${NODE_HOSTNAME:-"node1.email-srvr.com"}
echo "============================================================"
echo " 🔐 DMS TLS SNI Setup (Multi-Domain)"
echo " DMS Container: $DMS_CONTAINER"
echo " Config Dir: $CONFIG_DIR"
echo " Certs Base: $CERTS_BASE_PATH"
echo " Node Hostname: $NODE_HOSTNAME"
echo "============================================================"
# --- Domains aus DMS lesen ---
echo ""
echo "📋 Lese Domains aus DMS..."
DOMAINS=$(docker exec "$DMS_CONTAINER" setup email list 2>/dev/null \
| grep -oP '(?<=@)[^\s]+' \
| sort -u)
if [ -z "$DOMAINS" ]; then
echo "❌ Keine Accounts im DMS gefunden!"
echo " Bitte zuerst anlegen: ./manage_mail_user.sh add user@domain.com PW"
exit 1
fi
echo " Gefundene Domains:"
for d in $DOMAINS; do echo " - $d"; done
# --- Cert-Pfad Hilfsfunktionen ---
wildcard_cert_path() {
echo "$CERTS_BASE_PATH/wildcard_.${1}/wildcard_.${1}.crt"
}
wildcard_key_path() {
echo "$CERTS_BASE_PATH/wildcard_.${1}/wildcard_.${1}.key"
}
# --- Cert-Verfügbarkeit im Container prüfen ---
echo ""
echo "🔍 Prüfe Zertifikat-Verfügbarkeit..."
DOMAINS_OK=""
DOMAINS_MISSING=""
for domain in $DOMAINS; do
CERT_PATH=$(wildcard_cert_path "$domain")
KEY_PATH=$(wildcard_key_path "$domain")
if docker exec "$DMS_CONTAINER" test -f "$CERT_PATH" 2>/dev/null; then
echo "$domain$CERT_PATH"
DOMAINS_OK="$DOMAINS_OK $domain"
else
echo " ⚠️ $domain → KEIN Cert unter $CERT_PATH"
echo " → update-caddy-certs.sh ausführen + caddy reload!"
DOMAINS_MISSING="$DOMAINS_MISSING $domain"
fi
done
# Node-Hostname Cert prüfen (direktes Cert, kein Wildcard)
NODE_CERT_PATH="$CERTS_BASE_PATH/$NODE_HOSTNAME/$NODE_HOSTNAME.crt"
NODE_KEY_PATH="$CERTS_BASE_PATH/$NODE_HOSTNAME/$NODE_HOSTNAME.key"
if docker exec "$DMS_CONTAINER" test -f "$NODE_CERT_PATH" 2>/dev/null; then
echo "$NODE_HOSTNAME → Cert vorhanden (Node Default)"
else
echo " ⚠️ $NODE_HOSTNAME → KEIN Cert! Caddy-Block im Caddyfile prüfen."
fi
if [ -n "$DOMAINS_MISSING" ]; then
echo ""
echo " ⚠️ Fehlende Certs:$DOMAINS_MISSING"
echo " Diese Domains werden NICHT in SNI-Config eingetragen."
fi
if [ -z "$DOMAINS_OK" ]; then
echo "❌ Kein einziges Kundendomain-Cert gefunden!"
echo " Bitte zuerst update-caddy-certs.sh ausführen + caddy reload abwarten."
exit 1
fi
# ================================================================
# DOVECOT SNI Konfiguration
# ================================================================
DOVECOT_CFG="$CONFIG_DIR/dovecot-sni.cf"
echo ""
echo "📝 Generiere: $DOVECOT_CFG"
cat > "$DOVECOT_CFG" << 'HEADER'
# dovecot-sni.cf - Automatisch generiert von setup-dms-tls.sh
# SNI-basierte Zertifikat-Auswahl für Dovecot (IMAP/POP3).
# Dovecot liest dieses File über den Volume-Mount in /tmp/docker-mailserver/
# und wendet es automatisch an.
HEADER
for domain in $DOMAINS_OK; do
CERT_PATH=$(wildcard_cert_path "$domain")
KEY_PATH=$(wildcard_key_path "$domain")
cat >> "$DOVECOT_CFG" << EOF
# $domain
local_name mail.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
local_name imap.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
local_name smtp.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
local_name pop.$domain {
ssl_cert = <$CERT_PATH
ssl_key = <$KEY_PATH
}
EOF
done
echo " ✅ Dovecot SNI: $(echo $DOMAINS_OK | wc -w) Domain(s)"
# ================================================================
# POSTFIX SNI Konfiguration (Neu geschrieben für echte SNI Maps)
# ================================================================
POSTFIX_CFG="$CONFIG_DIR/postfix-main.cf"
POSTFIX_MAP="$CONFIG_DIR/postfix-sni.map"
echo ""
echo "📝 Generiere: $POSTFIX_CFG und $POSTFIX_MAP"
if [ -f "$POSTFIX_CFG" ]; then
cp "$POSTFIX_CFG" "${POSTFIX_CFG}.bak.$(date +%Y%m%d%H%M%S)"
fi
# 1. postfix-main.cf erstellen
cat > "$POSTFIX_CFG" << POSTFIX_EOF
# postfix-main.cf - Automatisch generiert von setup-dms-tls.sh
#
# 1. Fallback-Zertifikat (Wird genutzt, wenn kein SNI-Match gefunden wird)
smtpd_tls_chain_files = ${NODE_KEY_PATH}, ${NODE_CERT_PATH}
# 2. SNI-Mapping aktivieren
# Wir nutzen 'texthash', damit Postfix die Map direkt lesen kann,
# ohne dass 'postmap' ausgeführt werden muss!
tls_server_sni_maps = texthash:/tmp/docker-mailserver/postfix-sni.map
POSTFIX_EOF
# 2. postfix-sni.map erstellen
echo "# postfix-sni.map - Automatisch generiert (Format: host key_pfad cert_pfad)" > "$POSTFIX_MAP"
for domain in $DOMAINS_OK; do
KEY_PATH=$(wildcard_key_path "$domain")
CERT_PATH=$(wildcard_cert_path "$domain")
cat >> "$POSTFIX_MAP" << EOF
mail.${domain} ${KEY_PATH} ${CERT_PATH}
smtp.${domain} ${KEY_PATH} ${CERT_PATH}
imap.${domain} ${KEY_PATH} ${CERT_PATH}
pop.${domain} ${KEY_PATH} ${CERT_PATH}
${domain} ${KEY_PATH} ${CERT_PATH}
EOF
done
echo " ✅ Postfix SNI: $(echo $DOMAINS_OK | wc -w) Domain(s) konfiguriert"
# ================================================================
# Zusammenfassung
# ================================================================
echo ""
echo "============================================================"
echo "✅ Konfigurationen generiert."
echo ""
echo "🔄 Lade Postfix und Dovecot neu (ohne Downtime)..."
docker exec "$DMS_CONTAINER" postfix reload || echo "⚠️ Postfix Reload fehlgeschlagen"
docker exec "$DMS_CONTAINER" dovecot reload || echo "⚠️ Dovecot Reload fehlgeschlagen"
echo ""
echo "📋 Nächste Schritte:"
echo ""
echo "1. TLS testen (SNI):"
for domain in $DOMAINS_OK; do
echo " openssl s_client -connect mail.$domain:993 -servername mail.$domain 2>/dev/null | grep 'subject\|issuer'"
done
echo "============================================================"

1
DMS/sieve-schedule Normal file
View File

@@ -0,0 +1 @@
*/5 * * * *

10
DMS/sieve-supervisor.conf Normal file
View File

@@ -0,0 +1,10 @@
[program:sieve-sync]
# Das "-u" ist entscheidend für sofortige Logs!
command=/usr/bin/python3 -u /scripts/sync.py
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
stderr_logfile=/dev/stderr
stderr_logfile_maxbytes=0
autostart=true
autorestart=true
startsecs=5

View File

@@ -0,0 +1,225 @@
#!/usr/bin/env python3
import boto3
import os
import shutil
from pathlib import Path
import json
import time
from datetime import datetime
try:
from croniter import croniter
except ImportError:
print("Bitte 'croniter' via pip installieren!")
exit(1)
# Config
REGION = 'us-east-2'
TABLE = 'email-rules'
VMAIL_BASE = '/var/mail'
dynamodb = boto3.resource('dynamodb', region_name=REGION)
table = dynamodb.Table(TABLE)
def generate_sieve(email, rules):
"""Generate Sieve script from DynamoDB rules"""
lines = ['require ["copy","vacation","variables"];', '']
# Skip if already processed by worker
lines.extend([
'# Skip if already processed by worker',
'if header :contains "X-SES-Worker-Processed" "" {',
' keep;',
' stop;',
'}',
''
])
# Forwards
forwards = rules.get('forwards', [])
if forwards:
lines.append('# rule:[forward]')
for fwd in forwards:
lines.append(f'redirect :copy "{fwd}";')
lines.append('')
# OOO
if rules.get('ooo_active'):
msg = rules.get('ooo_message', 'I am away')
content_type = rules.get('ooo_content_type', 'text')
lines.append('# rule:[reply]')
if content_type == 'html':
lines.extend([
f'vacation :days 1 :from "{email}" :mime text:',
'Content-Type: text/html; charset=utf-8',
'',
msg,
'.',
';'
])
else:
safe_msg = json.dumps(msg, ensure_ascii=False)
lines.append(f'vacation :days 1 :from "{email}" {safe_msg};')
return '\n'.join(lines) + '\n'
def deactivate_sieve(email, mailbox_home):
"""
SICHERHEITS-VARIANTE:
Überschreibt das Sieve-Skript mit einem leeren 'keep;',
anstatt Dateien zu löschen.
"""
# Pfad zur aktiven Datei
sieve_path = mailbox_home / '.dovecot.sieve'
# Inhalt: Nur "keep;" -> Mail behalten, nichts tun.
safe_content = (
'# Script deactivated by DynamoDB Sync (User not in DB)\n'
'keep;\n'
)
# Prüfen, ob wir überhaupt etwas tun müssen (um unnötige Schreibvorgänge zu meiden)
# Wenn der Inhalt schon "keep;" ist, brechen wir ab.
if sieve_path.exists() and not sieve_path.is_symlink():
try:
current_content = sieve_path.read_text()
if "Script deactivated" in current_content:
return # Ist schon deaktiviert
except:
pass
# Datei sicher schreiben (überschreibt auch Symlinks, wenn os.open genutzt wird,
# aber pathlib write_text folgt symlinks oder überschreibt file).
# Um sicher zu gehen, dass wir keinen Symlink auf eine Systemdatei überschreiben:
if sieve_path.is_symlink():
try:
os.unlink(sieve_path) # Link entfernen
except OSError:
pass
try:
sieve_path.write_text(safe_content)
# Kompilieren (wichtig, damit Dovecot die Änderung sofort sieht)
os.system(f'sievec {sieve_path}')
# Ownership sicherstellen
os.system(f'chown docker:docker {sieve_path}')
print(f'{email} (Regeln deaktiviert/geleert)')
except Exception as e:
print(f"Fehler beim Deaktivieren von {email}: {e}")
def sync():
"""Sync logic"""
# 1. DB Status abrufen
try:
response = table.scan()
db_users = {item['email_address']: item for item in response.get('Items', [])}
except Exception as e:
print(f"FATAL: Konnte DynamoDB nicht lesen ({e}). Breche ab, um keine Regeln zu löschen.")
return
# 2. Filesystem scannen
base_path = Path(VMAIL_BASE)
if not base_path.exists():
print("Warnung: /var/mail existiert nicht.")
return
# Iteriere durch Domains
for domain_dir in base_path.iterdir():
if not domain_dir.is_dir(): continue
# Iteriere durch User
for user_dir in domain_dir.iterdir():
if not user_dir.is_dir(): continue
user = user_dir.name
domain = domain_dir.name
email = f"{user}@{domain}"
# WICHTIG: Wir arbeiten NUR im 'home' Unterordner
# Die Mails liegen in user_dir/cur etc. -> Die fassen wir nicht an.
mailbox_home = user_dir / 'home'
# --- FALL A: User ist in der DB (Update) ---
if email in db_users:
item = db_users[email]
if not mailbox_home.exists():
mailbox_home.mkdir(exist_ok=True)
os.system(f'chown docker:docker {mailbox_home}')
sieve_path = mailbox_home / '.dovecot.sieve'
script = generate_sieve(email, item)
sieve_path.write_text(script)
os.system(f'sievec {sieve_path}')
# Ownership
os.system(f'chown docker:docker {sieve_path}')
# (Optional) Auch in den sieve/ Ordner spiegeln für Roundcube Kompatibilität
sieve_dir = mailbox_home / 'sieve'
if sieve_dir.exists():
managed_script = sieve_dir / 'default.sieve'
managed_script.write_text(script)
os.system(f'sievec {managed_script}')
os.system(f'chown -R docker:docker {sieve_dir}')
print(f'{email}')
# --- FALL B: User ist NICHT in DB (Deaktivieren) ---
else:
# Nur wenn der Home-Ordner existiert (wir legen keine Leichen für nicht-existente User an)
if mailbox_home.exists():
deactivate_sieve(email, mailbox_home)
def wait_for_dovecot():
socket_path = '/var/run/dovecot/auth-userdb'
print("⏳ Warte auf Dovecot Start...")
while not os.path.exists(socket_path):
time.sleep(5)
print("✅ Dovecot ist bereit!")
if __name__ == '__main__':
wait_for_dovecot()
CRON_FILE = '/etc/sieve-schedule'
cron_string = "*/5 * * * *"
if os.path.exists(CRON_FILE):
with open(CRON_FILE, 'r') as f:
content = f.read().strip()
if content and not content.startswith('#'):
cron_string = content
print(f"DynamoDB Sieve Sync (Safe Mode) gestartet. Zeitplan: {cron_string}")
sync()
base_time = datetime.now()
iter = croniter(cron_string, base_time)
while True:
next_run = iter.get_next(datetime)
now = datetime.now()
sleep_seconds = (next_run - now).total_seconds()
if sleep_seconds > 0:
time.sleep(sleep_seconds)
try:
print(f"[{datetime.now()}] Starte Sync...")
sync()
except Exception as e:
print(f"Fehler beim Sync: {e}")
pass

View File

@@ -0,0 +1,6 @@
[program:dynamic-whitelist]
command=/usr/bin/python3 -u /scripts/dynamic_whitelist.py
autostart=true
autorestart=true
stderr_logfile=/var/log/supervisor/dynamic-whitelist.err.log
stdout_logfile=/var/log/supervisor/dynamic-whitelist.out.log

View File

@@ -1,20 +0,0 @@
# Worker bauen
docker-compose build
# Worker starten
docker-compose up -d
# Logs ansehen
docker-compose logs -f
# Logs nur für eine Domain
docker-compose logs -f worker-andreasknuth
# Status prüfen
docker-compose ps
# Worker neu starten
docker-compose restart
# Worker stoppen
docker-compose down

View File

@@ -1,26 +0,0 @@
FROM python:3.11-slim
# Metadata
LABEL maintainer="your-email@example.com"
LABEL description="Domain-specific email worker for SMTP delivery"
# Non-root user für Security
RUN useradd -m -u 1000 worker && \
mkdir -p /app && \
chown -R worker:worker /app
# Boto3 installieren
RUN pip install --no-cache-dir boto3
# Worker Code
COPY --chown=worker:worker worker.py /app/worker.py
WORKDIR /app
USER worker
# Healthcheck
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD pgrep -f worker.py || exit 1
# Start worker mit unbuffered output
CMD ["python", "-u", "worker.py"]

View File

@@ -1,35 +0,0 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:HeadObject",
"s3:ListBucket",
"s3:CopyObject"
],
"Resource": [
"arn:aws:s3:::*-emails",
"arn:aws:s3:::*-emails/*"
]
},
{
"Effect": "Allow",
"Action": [
"sqs:SendMessage",
"sqs:GetQueueUrl"
],
"Resource": "arn:aws:sqs:eu-central-1:123456789:*-queue"
},
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:::*"
}
]
}

13
TESTS
View File

@@ -1,13 +0,0 @@
# Via AWS SES CLI
aws ses send-email \
--from "sender@example.com" \
--destination "ToAddresses=test@andreasknuth.de" \
--message "Subject={Data='Test',Charset=utf-8},Body={Text={Data='Test message',Charset=utf-8}}" \
--region us-east-2
# Mail an mehrere Domains
aws ses send-email \
--from "sender@example.com" \
--destination "ToAddresses=test@andreasknuth.de,test@bizmatch.net" \
--message "Subject={Data='Multi-Domain Test',Charset=utf-8},Body={Text={Data='Testing multiple domains',Charset=utf-8}}" \
--region us-east-2

92
backup_mail.sh Executable file
View File

@@ -0,0 +1,92 @@
#!/bin/bash
# ==============================================================================
# KONFIGURATION
# ==============================================================================
BASE_MAIL_PATH="/home/aknuth/git/email-amazon/DMS/docker-data/dms/mail-data"
RCLONE_CONFIG="/home/aknuth/.config/rclone/rclone.conf"
LOGFILE="/var/log/mail_backup.log"
# ==============================================================================
# HILFSFUNKTION: LOGGING (Bildschirm + Datei)
# ==============================================================================
log() {
# Schreibt Zeitstempel + Text auf stdout (Bildschirm) UND in das Logfile
echo "$(date '+%Y-%m-%d %H:%M:%S') | $1" | tee -a "$LOGFILE"
}
# ==============================================================================
# PARAMETER & VALIDIERUNG
# ==============================================================================
DOMAIN=$1
REMOTE_NAME=$2
BUCKET_NAME=$3
if [[ -z "$DOMAIN" || -z "$REMOTE_NAME" || -z "$BUCKET_NAME" ]]; then
echo "FEHLER: Fehlende Parameter."
echo "Benutzung: sudo $0 <DOMAIN> <RCLONE_REMOTE> <BUCKET>"
echo "Beispiel: sudo $0 buddelectric.net buddelectric buddelectric-mails"
exit 1
fi
SOURCE_PATH="${BASE_MAIL_PATH}/${DOMAIN}"
if [ ! -d "$SOURCE_PATH" ]; then
log "FEHLER: Quell-Ordner existiert nicht: $SOURCE_PATH"
exit 1
fi
if [[ $EUID -ne 0 ]]; then
echo "FEHLER: Dieses Skript muss als root ausgeführt werden (sudo)."
exit 1
fi
# ==============================================================================
# LOCKING
# ==============================================================================
LOCKFILE_PATH="/var/run/rclone_mail_${DOMAIN}.lock"
if [ -e ${LOCKFILE_PATH} ] && kill -0 `cat ${LOCKFILE_PATH}` 2>/dev/null; then
log "ABBRUCH: Backup für $DOMAIN läuft bereits."
exit 1
fi
trap "rm -f ${LOCKFILE_PATH}; exit" INT TERM EXIT
echo $$ > ${LOCKFILE_PATH}
# ==============================================================================
# BACKUP START
# ==============================================================================
log "----------------------------------------------------------------"
log "START Backup"
log "Domain: $DOMAIN"
log "Quelle: $SOURCE_PATH"
log "Ziel: $REMOTE_NAME:$BUCKET_NAME"
# Rclone ausführen
# --progress zeigt den Balken im Terminal (landet nicht im Logfile, das ist gut so)
# --log-file schreibt technische Details NUR ins Logfile (nicht auf den Schirm, um ihn nicht zu fluten)
/usr/bin/rclone sync "$SOURCE_PATH" "$REMOTE_NAME:$BUCKET_NAME" \
--config "$RCLONE_CONFIG" \
--exclude "**/tmp/**" \
--exclude "*.lock" \
--exclude "dovecot-uidlist.lock" \
--exclude ".dovecot.lda-dupes" \
--transfers 32 \
--checkers 32 \
--fast-list \
--min-age 15m \
--log-file $LOGFILE \
--log-level INFO \
--progress
EXIT_CODE=$?
if [ $EXIT_CODE -eq 0 ]; then
log "STATUS: ERFOLG - $DOMAIN erfolgreich gesichert."
else
log "STATUS: FEHLER - Exit Code $EXIT_CODE. Details siehe $LOGFILE"
fi
log "----------------------------------------------------------------"
exit $EXIT_CODE

View File

@@ -43,7 +43,7 @@ aws s3api put-bucket-lifecycle-configuration \
"ID": "DeleteOldEmails", "ID": "DeleteOldEmails",
"Status": "Enabled", "Status": "Enabled",
"Expiration": { "Expiration": {
"Days": 90 "Days": 14
}, },
"Filter": { "Filter": {
"Prefix": "" "Prefix": ""

View File

@@ -1,155 +1,208 @@
#!/bin/bash #!/bin/bash
# awsses.sh - Konfiguriert Amazon SES für eine Domain und erstellt eine Receipt Rule # awsses.sh - SES Setup mit S3 + Global Lambda Shim -> SQS
# Dieses Skript ist idempotent: Es kann sicher mehrfach ausgeführt werden.
# Globale Lambda für alle Domains.
#
# MAIL FROM Subdomain:
# Standard: mail.${DOMAIN_NAME}
# Override: export MAIL_FROM_SUBDOMAIN="mailfrom" (nur der Prefix, ohne Domain)
#
# Beispiel:
# export DOMAIN_NAME="buddelectric.net"
# export MAIL_FROM_SUBDOMAIN="mailfrom" # → mailfrom.buddelectric.net
# ./awsses.sh
# Überprüfen, ob die Domain-Variable gesetzt ist set -e
if [ -z "$DOMAIN_NAME" ]; then
echo "Fehler: DOMAIN_NAME ist nicht gesetzt." # --- CHECKS ---
echo "Bitte setzen Sie die Variable mit: export DOMAIN_NAME='IhreDomain.de'" if ! command -v jq &> /dev/null; then echo "Fehler: 'jq' fehlt."; exit 1; fi
exit 1 if [ -z "$DOMAIN_NAME" ]; then echo "Fehler: DOMAIN_NAME ist nicht gesetzt."; exit 1; fi
# Prüfen ob Python Code da ist
PYTHON_FILE="ses_sns_shim_global.py"
if [ ! -f "$PYTHON_FILE" ]; then
echo "Fehler: $PYTHON_FILE nicht gefunden!"
exit 1
fi fi
# Überprüfen, ob S3_BUCKET_NAME gesetzt ist # --- VARIABLEN ---
if [ -z "$S3_BUCKET_NAME" ]; then
echo "Warnung: S3_BUCKET_NAME ist nicht gesetzt."
echo "Wird automatisch aus DOMAIN_NAME generiert, verwenden Sie idealerweise zuerst awss3.sh."
S3_BUCKET_NAME=$(echo "$DOMAIN_NAME" | tr '.' '-' | awk '{print $0 "-emails"}')
echo "Generierter Bucket-Name: $S3_BUCKET_NAME"
fi
# Konfiguration
AWS_REGION=${AWS_REGION:-"us-east-2"} AWS_REGION=${AWS_REGION:-"us-east-2"}
EMAIL_PREFIX=${EMAIL_PREFIX:-""} EMAIL_PREFIX=${EMAIL_PREFIX:-""}
RULE_NAME="store-$(echo "$DOMAIN_NAME" | tr '.' '-')-to-s3" CONFIGURATION_SET_NAME="relay-outbound"
echo "=== SES Konfiguration für $DOMAIN_NAME ===" # MAIL FROM Subdomain (konfigurierbar)
echo "Region: $AWS_REGION" MAIL_FROM_SUBDOMAIN=${MAIL_FROM_SUBDOMAIN:-"mail"}
echo "S3 Bucket: $S3_BUCKET_NAME" MAIL_FROM_DOMAIN="${MAIL_FROM_SUBDOMAIN}.${DOMAIN_NAME}"
echo "Receipt Rule Name: $RULE_NAME"
# ------------------------ # Bucket Name generieren falls leer
# SES Domain-Identität erstellen if [ -z "$S3_BUCKET_NAME" ]; then
# ------------------------ S3_BUCKET_NAME=$(echo "$DOMAIN_NAME" | tr '.' '-' | awk '{print $0 "-emails"}')
echo "SES Domain-Identität erstellen..." fi
IDENTITY_RESULT=$(aws sesv2 create-email-identity \
--email-identity ${DOMAIN_NAME} \
--region ${AWS_REGION})
echo "Identity erstellt. Überprüfen Sie die DNS-Einträge für die Domain-Verifizierung." # Namen (Global Lambda!)
echo "$IDENTITY_RESULT" | jq . RULE_SET_NAME="bizmatch-ruleset"
RULE_NAME="store-${DOMAIN_NAME//./-}-to-s3"
QUEUE_NAME="${DOMAIN_NAME//./-}-queue"
LAMBDA_NAME="ses-shim-global"
LAMBDA_ROLE_NAME="SesShimGlobalRole"
# DKIM-Signierung aktivieren echo "=========================================================="
echo "DKIM-Signierung aktivieren..." echo " SES Setup (S3 -> Global Lambda Shim -> SQS) für $DOMAIN_NAME"
aws sesv2 put-email-identity-dkim-attributes \ echo " MAIL FROM: $MAIL_FROM_DOMAIN"
--email-identity ${DOMAIN_NAME} \ echo "=========================================================="
--signing-enabled \
--region ${AWS_REGION}
# Mail-From-Domain konfigurieren # ---------------------------------------------------------
echo "Mail-From-Domain konfigurieren..." # 1. SES Identity & Config Set
aws sesv2 put-email-identity-mail-from-attributes \ # ---------------------------------------------------------
--email-identity ${DOMAIN_NAME} \ echo "[1/6] SES Identity Setup..."
--mail-from-domain "mail.${DOMAIN_NAME}" \ if ! aws sesv2 get-email-identity --email-identity ${DOMAIN_NAME} --region ${AWS_REGION} >/dev/null 2>&1; then
--behavior-on-mx-failure USE_DEFAULT_VALUE \ aws sesv2 create-email-identity --email-identity ${DOMAIN_NAME} --region ${AWS_REGION} >/dev/null
--region ${AWS_REGION} fi
# Update Attributes (Idempotent)
aws sesv2 put-email-identity-dkim-attributes --email-identity ${DOMAIN_NAME} --signing-enabled --region ${AWS_REGION}
aws sesv2 put-email-identity-mail-from-attributes --email-identity ${DOMAIN_NAME} --mail-from-domain "${MAIL_FROM_DOMAIN}" --behavior-on-mx-failure USE_DEFAULT_VALUE --region ${AWS_REGION}
aws sesv2 put-email-identity-configuration-set-attributes --email-identity ${DOMAIN_NAME} --configuration-set-name "$CONFIGURATION_SET_NAME" --region ${AWS_REGION}
# Überprüfen, ob der Rule Set existiert, sonst erstellen echo " -> MAIL FROM Domain: ${MAIL_FROM_DOMAIN}"
echo "Überprüfe oder erstelle Receipt Rule Set..."
RULESET_EXISTS=$(aws ses describe-receipt-rule-sets --region ${AWS_REGION} | jq -r '.RuleSets[] | select(.Name == "bizmatch-ruleset") | .Name')
if [ -z "$RULESET_EXISTS" ]; then # ---------------------------------------------------------
echo "Receipt Rule Set 'bizmatch-ruleset' existiert nicht, wird erstellt..." # 2. SQS Queue holen (nur zur Validierung, Lambda holt dynamisch)
# ---------------------------------------------------------
echo "[2/6] Queue URL ermitteln (zur Validierung)..."
QUEUE_URL=$(aws sqs get-queue-url --queue-name "$QUEUE_NAME" --region "$AWS_REGION" --output text --query 'QueueUrl' 2>/dev/null)
if [ -z "$QUEUE_URL" ]; then echo "FEHLER: Queue $QUEUE_NAME nicht gefunden! ./create-queue.sh zuerst ausführen."; exit 1; fi
QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "$QUEUE_URL" --attribute-names QueueArn --region "$AWS_REGION" --output text --query 'Attributes.QueueArn')
# ---------------------------------------------------------
# 3. IAM Role für Global Lambda erstellen
# ---------------------------------------------------------
echo "[3/6] IAM Role für Lambda prüfen/erstellen..."
TRUST_POLICY='{"Version": "2012-10-17","Statement": [{"Effect": "Allow","Principal": {"Service": "lambda.amazonaws.com"},"Action": "sts:AssumeRole"}]}'
if ! aws iam get-role --role-name "$LAMBDA_ROLE_NAME" >/dev/null 2>&1; then
aws iam create-role --role-name "$LAMBDA_ROLE_NAME" --assume-role-policy-document "$TRUST_POLICY" >/dev/null
echo " -> Rolle erstellt."
else
echo " -> Rolle existiert bereits."
fi
# Permissions Policy (Lambda darf Logs schreiben und in ALLE Queues mit *-queue senden)
LAMBDA_POLICY=$(jq -n '{
Version: "2012-10-17",
Statement: [
{
Effect: "Allow",
Action: ["logs:CreateLogGroup","logs:CreateLogStream","logs:PutLogEvents"],
Resource: "arn:aws:logs:*:*:*"
},
{
Effect: "Allow",
Action: "sqs:SendMessage",
Resource: "arn:aws:sqs:*:*:*-queue"
},
{
Effect: "Allow",
Action: "sqs:GetQueueUrl",
Resource: "*"
}
]
}' | jq -c .)
aws iam put-role-policy --role-name "$LAMBDA_ROLE_NAME" --policy-name "SesShimGlobalPermissions" --policy-document "$LAMBDA_POLICY"
echo " -> Permissions aktualisiert."
# Kurze Pause für IAM Propagation, falls Rolle neu war
sleep 5
# ---------------------------------------------------------
# 4. Lambda Funktion erstellen/updaten (Global!)
# ---------------------------------------------------------
echo "[4/6] Global Lambda Shim deployen..."
# Zip erstellen
cp "$PYTHON_FILE" lambda_function.py
zip -q lambda.zip lambda_function.py
# Keine Env-Vars nötig, da dynamisch
ROLE_ARN=$(aws iam get-role --role-name "$LAMBDA_ROLE_NAME" --query 'Role.Arn' --output text)
if ! aws lambda get-function --function-name "$LAMBDA_NAME" --region "$AWS_REGION" >/dev/null 2>&1; then
echo " -> Erstelle neue Lambda-Funktion..."
aws lambda create-function --function-name "$LAMBDA_NAME" \
--runtime python3.11 --handler lambda_function.lambda_handler \
--role "$ROLE_ARN" --zip-file fileb://lambda.zip \
--region "$AWS_REGION" >/dev/null
else
echo " -> Aktualisiere existierende Lambda-Funktion..."
aws lambda update-function-code --function-name "$LAMBDA_NAME" --zip-file fileb://lambda.zip --region "$AWS_REGION" >/dev/null
# Warte kurz
sleep 2
aws lambda update-function-configuration --function-name "$LAMBDA_NAME" --region "$AWS_REGION" >/dev/null
fi
# Aufräumen
rm lambda.zip lambda_function.py
# ---------------------------------------------------------
# 5. Permission: SES darf Lambda aufrufen (Global, einmalig)
# ---------------------------------------------------------
echo "[5/6] SES Permission für Lambda..."
aws lambda add-permission --function-name "$LAMBDA_NAME" \
--statement-id "AllowSESInvoke-Global" \
--action "lambda:InvokeFunction" \
--principal "ses.amazonaws.com" \
--region "$AWS_REGION" 2>/dev/null || true
# ---------------------------------------------------------
# 6. SES Rule (S3 + Global Lambda)
# ---------------------------------------------------------
echo "[6/6] SES Receipt Rule (S3 + Lambda) konfigurieren..."
LAMBDA_ARN=$(aws lambda get-function --function-name "$LAMBDA_NAME" --region "$AWS_REGION" --query 'Configuration.FunctionArn' --output text)
# Rule Set prüfen
if ! aws ses list-receipt-rule-sets --region ${AWS_REGION} | grep -q "bizmatch-ruleset"; then
aws ses create-receipt-rule-set --rule-set-name "bizmatch-ruleset" --region ${AWS_REGION} aws ses create-receipt-rule-set --rule-set-name "bizmatch-ruleset" --region ${AWS_REGION}
else
echo "Receipt Rule Set 'bizmatch-ruleset' existiert bereits."
fi fi
# Regel-Definition
# Receipt Rule erstellen RULE_JSON=$(jq -n \
echo "Receipt Rule für E-Mail-Empfang erstellen..." --arg bucket "$S3_BUCKET_NAME" \
aws ses create-receipt-rule --rule-set-name "bizmatch-ruleset" --rule '{ --arg prefix "$EMAIL_PREFIX" \
"Name": "'"${RULE_NAME}"'", --arg larn "$LAMBDA_ARN" \
"Enabled": true, --arg rule "$RULE_NAME" \
"ScanEnabled": true, --arg domain "$DOMAIN_NAME" \
"Actions": [{ --arg subdomain "${MAIL_FROM_DOMAIN}" \
"S3Action": { '{
"BucketName": "'"${S3_BUCKET_NAME}"'", Name: $rule,
"ObjectKeyPrefix": "'"${EMAIL_PREFIX}"'" Enabled: true,
} ScanEnabled: true,
}], TlsPolicy: "Require",
"TlsPolicy": "Require", Recipients: [$domain, $subdomain],
"Recipients": ["'"${DOMAIN_NAME}"'"] Actions: [
}' --region ${AWS_REGION} {
S3Action: {
# Prüfen, ob der Rule Set aktiv ist BucketName: $bucket,
ACTIVE_RULESET=$(aws ses describe-active-receipt-rule-set --region ${AWS_REGION} | jq -r '.Metadata.Name') ObjectKeyPrefix: $prefix
}
if [ "$ACTIVE_RULESET" != "bizmatch-ruleset" ]; then },
echo "Aktiviere Rule Set 'bizmatch-ruleset'..." {
aws ses set-active-receipt-rule-set --rule-set-name "bizmatch-ruleset" --region ${AWS_REGION} LambdaAction: {
else FunctionArn: $larn,
echo "Rule Set 'bizmatch-ruleset' ist bereits aktiv." InvocationType: "Event"
fi }
# ------------------------
# Lambda-Funktion mit SES verknüpfen
# ------------------------
echo "Verknüpfe Lambda-Funktion 'ses-to-sqs' mit SES..."
# Lambda ARN ermitteln
LAMBDA_ARN=$(aws lambda get-function \
--function-name ses-to-sqs \
--region ${AWS_REGION} \
--query 'Configuration.FunctionArn' \
--output text)
if [ -z "$LAMBDA_ARN" ]; then
echo "FEHLER: Lambda-Funktion 'ses-to-sqs' nicht gefunden!"
echo "Bitte zuerst Lambda-Funktion deployen."
exit 1
fi
echo "Lambda ARN: $LAMBDA_ARN"
# SES Permission für Lambda hinzufügen (falls noch nicht vorhanden)
echo "Füge SES-Berechtigung zur Lambda-Funktion hinzu..."
aws lambda add-permission \
--function-name ses-to-sqs \
--statement-id "AllowSESInvoke-${DOMAIN_NAME//./}" \
--action "lambda:InvokeFunction" \
--principal ses.amazonaws.com \
--source-account $(aws sts get-caller-identity --query Account --output text) \
--region ${AWS_REGION} 2>/dev/null || echo "Permission bereits vorhanden"
# Receipt Rule UPDATE: Lambda Action hinzufügen
echo "Aktualisiere Receipt Rule mit Lambda Action..."
aws ses update-receipt-rule --rule-set-name "bizmatch-ruleset" --rule '{
"Name": "'"${RULE_NAME}"'",
"Enabled": true,
"ScanEnabled": true,
"Actions": [
{
"S3Action": {
"BucketName": "'"${S3_BUCKET_NAME}"'",
"ObjectKeyPrefix": "'"${EMAIL_PREFIX}"'"
} }
}, ]
{ }')
"LambdaAction": { # Check ob Regel existiert -> Update, sonst Create
"FunctionArn": "'"${LAMBDA_ARN}"'", if aws ses describe-receipt-rule --rule-set-name "bizmatch-ruleset" --rule-name "$RULE_NAME" --region "$AWS_REGION" >/dev/null 2>&1; then
"InvocationType": "Event" echo " -> Aktualisiere existierende Regel..."
} aws ses update-receipt-rule --rule-set-name "bizmatch-ruleset" --rule "$RULE_JSON" --region "$AWS_REGION"
} else
], echo " -> Erstelle neue Regel..."
"TlsPolicy": "Require", aws ses create-receipt-rule --rule-set-name "bizmatch-ruleset" --rule "$RULE_JSON" --region "$AWS_REGION"
"Recipients": ["'"${DOMAIN_NAME}"'"] fi
}' --region ${AWS_REGION} # Aktivieren
aws ses set-active-receipt-rule-set --rule-set-name "bizmatch-ruleset" --region ${AWS_REGION}
echo "✅ Lambda-Funktion erfolgreich mit SES verknüpft!" echo "========================================================"
echo "✅ Setup erfolgreich."
echo "SES-Konfiguration für $DOMAIN_NAME abgeschlossen." echo " Domain: $DOMAIN_NAME"
echo echo " MAIL FROM: $MAIL_FROM_DOMAIN"
echo "WICHTIG: Überprüfen Sie die Ausgabe oben für DNS-Einträge, die Sie bei Ihrem DNS-Provider setzen müssen:" echo " S3 Bucket: $S3_BUCKET_NAME"
echo "1. DKIM-Einträge (3 CNAME-Einträge)" echo " SQS Queue: $QUEUE_NAME"
echo "2. MAIL FROM MX und TXT-Einträge" echo " Lambda: $LAMBDA_NAME (global)"
echo "3. SPF-Eintrag (TXT): v=spf1 include:amazonses.com ~all" echo "========================================================"
echo
echo "Nach dem Setzen der DNS-Einträge kann es bis zu 72 Stunden dauern, bis die Verifizierung abgeschlossen ist."

View File

@@ -1,160 +1,171 @@
#!/bin/bash #!/bin/bash
# Cloudflare API-Konfiguration # ==========================================
# Setze deine API-Schlüssel und Zone-ID als Umgebungsvariablen oder ersetze sie direkt # KONFIGURATION
# ==========================================
AWS_REGION="us-east-2"
# CADDY_SERVER_IP="DEINE_CADDY_IP_HIER" # WICHTIG: IP deines Caddy Servers eintragen
# MAIL_SERVER_HOSTNAME="mail.email-srvr.com" # Der Server, mit dem sich Outlook/iPhone verbinden
# CF_ZONE_ID="1b7756cee93ed8ba8c05bdc3cb0a5da8" # Die Zone-ID deiner Domain bei Cloudflare
AWS_REGION="us-east-2" # AWS-Region
if [ -z "$DOMAIN_NAME" ]; then if [ -z "$DOMAIN_NAME" ]; then
echo "Fehler: DOMAIN_NAME ist nicht gesetzt." echo "Fehler: DOMAIN_NAME ist nicht gesetzt (z.B. export DOMAIN_NAME='bayarea-cc.com')."
echo "Bitte setzen Sie die Variable mit: export DOMAIN_NAME='IhreDomain.de'" exit 1
exit 1 # Skript mit Fehlercode beenden
fi fi
# Überprüfen, ob der erforderliche API-Token gesetzt ist
if [ -z "$CF_API_TOKEN" ]; then if [ -z "$CF_API_TOKEN" ]; then
echo "Fehler: Bitte setze CF_API_TOKEN als Umgebungsvariable oder im Skript." echo "Fehler: CF_API_TOKEN fehlt."
exit 1
fi
if [ -z "$CADDY_SERVER_IP" ]; then
echo "Fehler: CADDY_SERVER_IP fehlt. Bitte im Skript eintragen."
exit 1 exit 1
fi fi
# Zone ID basierend auf Domain-Namen abrufen # Fallback für Mailserver Variable
if [ -z "$MAIL_SERVER_HOSTNAME" ]; then
MAIL_SERVER_HOSTNAME="mail.email-srvr.com"
fi
# ==========================================
# ZONE ID ERMITTELN
# ==========================================
echo "Zone ID für $DOMAIN_NAME abrufen..." echo "Zone ID für $DOMAIN_NAME abrufen..."
ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN_NAME" \ ZONE_RESPONSE=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" \ -H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type: application/json") -H "Content-Type: application/json")
# Überprüfen, ob die Antwort erfolgreich war
if [ "$(echo $ZONE_RESPONSE | jq -r '.success')" != "true" ]; then if [ "$(echo $ZONE_RESPONSE | jq -r '.success')" != "true" ]; then
echo "Fehler beim Abrufen der Zone ID:" echo "Fehler beim Abrufen der Zone ID:"
echo $ZONE_RESPONSE | jq . echo $ZONE_RESPONSE | jq .
exit 1 exit 1
fi fi
# Zone ID extrahieren
CF_ZONE_ID=$(echo $ZONE_RESPONSE | jq -r '.result[0].id') CF_ZONE_ID=$(echo $ZONE_RESPONSE | jq -r '.result[0].id')
echo "Zone ID: $CF_ZONE_ID"
# Überprüfen, ob eine Zone ID gefunden wurde # ==========================================
if [ -z "$CF_ZONE_ID" ] || [ "$CF_ZONE_ID" = "null" ]; then # FUNKTIONEN
echo "Keine Zone ID für $DOMAIN_NAME gefunden. Bitte stelle sicher, dass die Domain bei Cloudflare registriert ist." # ==========================================
exit 1
fi
echo "Zone ID für $DOMAIN_NAME: $CF_ZONE_ID"
# Hilfsfunktion für DNS-Einträge anlegen
create_dns_record() { create_dns_record() {
local TYPE=$1 local TYPE=$1
local NAME=$2 local NAME=$2
local CONTENT=$3 local CONTENT=$3
local PROXIED=$4 local PROXIED=$4
local TTL=$5 local TTL=$5
local PRIORITY=$6 # Neu: MX-Priority local PRIORITY=$6
# Standardwerte für Proxied und TTL setzen, falls nicht angegeben if [ -z "$PROXIED" ]; then PROXIED="false"; fi
if [ -z "$PROXIED" ]; then if [ -z "$TTL" ]; then TTL=3600; fi
PROXIED="false"
fi
if [ -z "$TTL" ]; then
TTL=3600 # 1 Stunde
fi
echo "Erstelle $TYPE-Eintrag für $NAME mit Inhalt $CONTENT..." echo "Erstelle $TYPE-Eintrag für $NAME..."
# Json Payload vorbereiten abhängig vom Record-Typ
local JSON_DATA="" local JSON_DATA=""
if [ "$TYPE" = "MX" ]; then if [ "$TYPE" = "MX" ]; then
# Bei MX-Einträgen müssen wir die Priority separat angeben if [ -z "$PRIORITY" ]; then PRIORITY=10; fi
if [ -z "$PRIORITY" ]; then
PRIORITY=10 # Standard-Priority, falls nicht angegeben
fi
JSON_DATA="{ JSON_DATA="{
\"type\": \"$TYPE\", \"type\": \"$TYPE\", \"name\": \"$NAME\", \"content\": \"$CONTENT\",
\"name\": \"$NAME\", \"ttl\": $TTL, \"priority\": $PRIORITY, \"proxied\": $PROXIED
\"content\": \"$CONTENT\",
\"ttl\": $TTL,
\"priority\": $PRIORITY,
\"proxied\": $PROXIED
}" }"
elif [ "$TYPE" = "TXT" ]; then elif [ "$TYPE" = "TXT" ]; then
# Bei TXT-Einträgen müssen wir sicherstellen, dass der Inhalt in Anführungszeichen steht
# Aber Anführungszeichen innerhalb von JSON müssen escaped werden
# Wir entfernen zuerst alle vorhandenen Anführungszeichen und fügen sie dann korrekt hinzu
CONTENT=$(echo "$CONTENT" | sed 's/"//g') CONTENT=$(echo "$CONTENT" | sed 's/"//g')
JSON_DATA="{ JSON_DATA="{
\"type\": \"$TYPE\", \"type\": \"$TYPE\", \"name\": \"$NAME\", \"content\": \"\\\"$CONTENT\\\"\",
\"name\": \"$NAME\", \"ttl\": $TTL, \"proxied\": $PROXIED
\"content\": \"\\\"$CONTENT\\\"\",
\"ttl\": $TTL,
\"proxied\": $PROXIED
}" }"
else else
# Für alle anderen Record-Typen (z.B. CNAME)
JSON_DATA="{ JSON_DATA="{
\"type\": \"$TYPE\", \"type\": \"$TYPE\", \"name\": \"$NAME\", \"content\": \"$CONTENT\",
\"name\": \"$NAME\", \"ttl\": $TTL, \"proxied\": $PROXIED
\"content\": \"$CONTENT\",
\"ttl\": $TTL,
\"proxied\": $PROXIED
}" }"
fi fi
# API-Aufruf an Cloudflare
curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records" \ curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records" \
-H "Authorization: Bearer $CF_API_TOKEN" \ -H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
--data "$JSON_DATA" | jq . --data "$JSON_DATA" | jq -r '.success'
} }
# DKIM-Einträge abrufen und bei Cloudflare eintragen create_srv_record() {
echo "DKIM-Tokens abrufen von AWS SES..." local SERVICE=$1 # z.B. _imap
DKIM_TOKENS=$(aws ses get-identity-dkim-attributes \ local PROTO=$2 # z.B. _tcp
--identities ${DOMAIN_NAME} \ local PORT=$3 # z.B. 993
--region ${AWS_REGION} \ local TARGET=$4 # z.B. mail.email-srvr.com
--query "DkimAttributes.\"${DOMAIN_NAME}\".DkimTokens" \ local NAME="${SERVICE}.${PROTO}.${DOMAIN_NAME}"
--output text)
# Überprüfen, ob DKIM-Tokens abgerufen wurden echo "Erstelle SRV-Eintrag für $NAME -> $TARGET:$PORT..."
if [ -z "$DKIM_TOKENS" ]; then
echo "Fehler: Konnte DKIM-Tokens nicht abrufen. Ist die Domain bei AWS SES verifiziert?" local JSON_DATA="{
exit 1 \"type\": \"SRV\",
\"name\": \"$NAME\",
\"data\": {
\"service\": \"$SERVICE\",
\"proto\": \"$PROTO\",
\"name\": \"$DOMAIN_NAME\",
\"priority\": 0,
\"weight\": 1,
\"port\": $PORT,
\"target\": \"$TARGET\"
},
\"ttl\": 3600
}"
curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$CF_ZONE_ID/dns_records" \
-H "Authorization: Bearer $CF_API_TOKEN" \
-H "Content-Type: application/json" \
--data "$JSON_DATA" | jq -r '.success'
}
# ==========================================
# 1. AWS SES Setup (DKIM & Verifizierung)
# ==========================================
echo "--- AWS SES Konfiguration ---"
DKIM_TOKENS=$(aws ses get-identity-dkim-attributes \
--identities ${DOMAIN_NAME} --region ${AWS_REGION} \
--query "DkimAttributes.\"${DOMAIN_NAME}\".DkimTokens" --output text)
VERIFICATION_TOKEN=$(aws ses get-identity-verification-attributes \
--identities ${DOMAIN_NAME} --region ${AWS_REGION} \
--query "VerificationAttributes.\"${DOMAIN_NAME}\".VerificationToken" --output text)
if [ -n "$DKIM_TOKENS" ]; then
for TOKEN in ${DKIM_TOKENS}; do
create_dns_record "CNAME" "${TOKEN}._domainkey.${DOMAIN_NAME}" "${TOKEN}.dkim.amazonses.com" "false"
done
fi fi
# Domain-Verifizierungstoken abrufen if [ -n "$VERIFICATION_TOKEN" ]; then
VERIFICATION_TOKEN=$(aws ses get-identity-verification-attributes \ create_dns_record "TXT" "_amazonses.${DOMAIN_NAME}" "${VERIFICATION_TOKEN}" "false"
--identities ${DOMAIN_NAME} \ fi
--region ${AWS_REGION} \
--query "VerificationAttributes.\"${DOMAIN_NAME}\".VerificationToken" \
--output text)
# DKIM-Einträge anlegen # ==========================================
echo "DKIM-Einträge anlegen bei Cloudflare..." # 2. MX Records (AWS SES Ingest)
for TOKEN in ${DKIM_TOKENS}; do # ==========================================
create_dns_record "CNAME" "${TOKEN}._domainkey.${DOMAIN_NAME}" "${TOKEN}.dkim.amazonses.com" "false" 3600 echo "--- MX Records (AWS SES) ---"
done # Hier leiten wir eingehende Mails an Amazon S3/SQS Pipeline
# Domain-Verifizierungs-TXT-Eintrag anlegen
echo "Domain-Verifizierungs-TXT-Eintrag anlegen bei Cloudflare..."
create_dns_record "TXT" "_amazonses.${DOMAIN_NAME}" "${VERIFICATION_TOKEN}" "false" 3600
# MX-Einträge anlegen
echo "MX-Einträge anlegen bei Cloudflare..."
create_dns_record "MX" "${DOMAIN_NAME}" "inbound-smtp.${AWS_REGION}.amazonaws.com" "false" 3600 10 create_dns_record "MX" "${DOMAIN_NAME}" "inbound-smtp.${AWS_REGION}.amazonaws.com" "false" 3600 10
create_dns_record "MX" "mail.${DOMAIN_NAME}" "feedback-smtp.${AWS_REGION}.amazonses.com" "false" 3600 10
# CNAME für mail.{Domain} anlegen
echo "CNAME für mail.${DOMAIN_NAME} anlegen bei Cloudflare..."
create_dns_record "CNAME" "imap.${DOMAIN_NAME}" "${DOMAIN_NAME}" "false" 3600
# SPF-Eintrag anlegen # ==========================================
echo "SPF-Eintrag anlegen bei Cloudflare..." # 3. Autodiscover & Caddy (Client Access)
create_dns_record "TXT" "mail.${DOMAIN_NAME}" "v=spf1 include:amazonses.com ~all" "false" 3600 # ==========================================
echo "--- Autodiscover & Caddy Konfiguration ---"
# DMARC-Eintrag anlegen # A-Records: Autodiscover Domains zeigen auf deinen Caddy
echo "DMARC-Eintrag anlegen bei Cloudflare..." create_dns_record "A" "autodiscover.${DOMAIN_NAME}" "$CADDY_SERVER_IP" "false"
create_dns_record "TXT" "_dmarc.${DOMAIN_NAME}" "v=DMARC1; p=quarantine; pct=100; rua=mailto:postmaster@${DOMAIN_NAME}" "false" 3600 create_dns_record "A" "autoconfig.${DOMAIN_NAME}" "$CADDY_SERVER_IP" "false"
echo "DNS-Einrichtung abgeschlossen." # ==========================================
echo "Es kann bis zu 72 Stunden dauern, bis AWS SES die Domain verifiziert hat." # 4. SPF & DMARC
# ==========================================
echo "--- E-Mail Sicherheit (SPF & DMARC) ---"
# SPF: Nur Amazon SES erlaubt (Versand läuft darüber)
create_dns_record "TXT" "${DOMAIN_NAME}" "v=spf1 include:amazonses.com ~all" "false"
create_dns_record "TXT" "mail.${DOMAIN_NAME}" "v=spf1 include:amazonses.com ~all" "false"
# DMARC: Standard Policy
create_dns_record "TXT" "_dmarc.${DOMAIN_NAME}" "v=DMARC1; p=none; pct=100; rua=mailto:postmaster@${DOMAIN_NAME}" "false"
echo "Fertig. Konfiguration für $DOMAIN_NAME abgeschlossen."

View File

@@ -0,0 +1,329 @@
#!/bin/bash
# cloudflareMigrationDns.sh
# Setzt DNS Records für Amazon SES Migration + Cloudflare
# Unterstützt: DKIM, SPF (Merge), DMARC, MX, Autodiscover
# Setzt mail/imap/smtp/pop Subdomains für domain-spezifischen Mailserver-Zugang
#
# MIGRATIONS-FLAGS:
# SKIP_CLIENT_DNS=true → Abschnitt 8 (imap/smtp/pop/webmail) + 10 (SRV) überspringen
# Nutzen: Client-Subdomains bleiben beim alten Provider
# SKIP_DMARC=true → Abschnitt 7 (DMARC) überspringen
# Nutzen: Bestehenden DMARC-Record nicht anfassen
#
# Typischer Migrations-Ablauf:
# Phase 0 (Vorbereitung): SKIP_CLIENT_DNS=true SKIP_DMARC=true → nur SES + SPF
# Phase 1 (MX Cutover): MX umstellen (manuell)
# Phase 2 (Client Switch): ohne SKIP Flags → alle Records setzen
set -e
# --- KONFIGURATION ---
AWS_REGION=${AWS_REGION:-"us-east-2"}
DRY_RUN=${DRY_RUN:-"false"}
# Migrations-Flags (NEU)
SKIP_CLIENT_DNS=${SKIP_CLIENT_DNS:-"false"}
SKIP_DMARC=${SKIP_DMARC:-"false"}
# IP des Mailservers - PFLICHT wenn keine CNAME-Kette gewünscht
MAIL_SERVER_IP=${MAIL_SERVER_IP:-""}
# Ziel-Server für Mailclients. Standard: mail.<kundendomain>
TARGET_MAIL_SERVER=${TARGET_MAIL_SERVER:-"mail.${DOMAIN_NAME}"}
# --- CHECKS ---
if [ -z "$DOMAIN_NAME" ]; then echo "❌ Fehler: DOMAIN_NAME fehlt."; exit 1; fi
if [ -z "$CF_API_TOKEN" ]; then echo "❌ Fehler: CF_API_TOKEN fehlt."; exit 1; fi
if ! command -v jq &> /dev/null; then echo "❌ Fehler: 'jq' fehlt."; exit 1; fi
if ! command -v aws &> /dev/null; then echo "❌ Fehler: 'aws' CLI fehlt."; exit 1; fi
if [ -z "$MAIL_SERVER_IP" ] && [ "$TARGET_MAIL_SERVER" == "mail.$DOMAIN_NAME" ]; then
echo "⚠️ WARNUNG: MAIL_SERVER_IP ist nicht gesetzt!"
echo " mail.$DOMAIN_NAME braucht einen A-Record."
echo " Setze: export MAIL_SERVER_IP=<deine-server-ip>"
# Kein exit - Abschnitt 8 wird ggf. übersprungen
fi
echo "============================================================"
echo " 🛡️ DNS Migration Setup für: $DOMAIN_NAME"
echo " 🌍 Region: $AWS_REGION"
echo " 📬 Mail-Server Target: $TARGET_MAIL_SERVER"
[ -n "$MAIL_SERVER_IP" ] && echo " 🖥️ Server IP: $MAIL_SERVER_IP"
[ "$DRY_RUN" = "true" ] && echo " ⚠️ DRY RUN MODE - Keine Änderungen!"
[ "$SKIP_CLIENT_DNS" = "true" ] && echo " ⏭️ SKIP: Client-Subdomains (imap/smtp/pop/webmail/SRV)"
[ "$SKIP_DMARC" = "true" ] && echo " ⏭️ SKIP: DMARC Record"
echo "============================================================"
# 1. ZONE ID HOLEN
echo "🔍 Suche Cloudflare Zone ID..."
ZONE_ID=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones?name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" | jq -r '.result[0].id')
if [ "$ZONE_ID" == "null" ] || [ -z "$ZONE_ID" ]; then
echo "❌ Zone nicht gefunden."
exit 1
fi
echo " ✅ Zone ID: $ZONE_ID"
# ------------------------------------------------------------------
# FUNKTION: ensure_record
# Prüft Existenz -> Create oder Update (je nach Typ)
# ------------------------------------------------------------------
ensure_record() {
local type=$1
local name=$2
local content=$3
local proxied=${4:-false}
local priority=$5 # Optional für MX
echo " ⚙️ Prüfe $type $name..."
local search_res=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=$type&name=$name" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json")
local rec_id
local rec_content
if [ "$type" == "TXT" ] && [ "$name" == "$DOMAIN_NAME" ] && [[ "$content" == v=spf1* ]]; then
# Spezialfall Root-Domain SPF: Filtere gezielt den SPF-Eintrag heraus,
# damit z.B. Google Site Verification nicht überschrieben wird.
rec_id=$(echo "$search_res" | jq -r '.result[] | select(.content | contains("v=spf1")) | .id' | head -n 1)
rec_content=$(echo "$search_res" | jq -r '.result[] | select(.content | contains("v=spf1")) | .content' | head -n 1)
else
# Standardverhalten für alle anderen (A, CNAME, MX, etc.)
rec_id=$(echo "$search_res" | jq -r '.result[0].id')
rec_content=$(echo "$search_res" | jq -r '.result[0].content')
fi
# Fallback für jq, damit das restliche Skript funktioniert
[ -z "$rec_id" ] && rec_id="null"
[ -z "$rec_content" ] && rec_content="null"
if [ "$type" == "MX" ]; then
json_data=$(jq -n --arg t "$type" --arg n "$name" --arg c "$content" --argjson p "$proxied" --argjson prio "$priority" \
'{type: $t, name: $n, content: $c, ttl: 3600, proxied: $p, priority: $prio}')
else
json_data=$(jq -n --arg t "$type" --arg n "$name" --arg c "$content" --argjson p "$proxied" \
'{type: $t, name: $n, content: $c, ttl: 3600, proxied: $p}')
fi
if [ "$rec_id" == "null" ]; then
if [ "$DRY_RUN" = "true" ]; then
echo " [DRY] Würde ERSTELLEN: $content"
else
res=$(curl -s -X POST "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" --data "$json_data")
if [ "$(echo $res | jq -r .success)" == "true" ]; then
echo " ✅ Erstellt."
else
echo " ❌ Fehler beim Erstellen: $(echo $res | jq -r '.errors[0].message')"
fi
fi
else
if [ "$rec_content" == "$content" ]; then
echo " 🆗 Identisch. Überspringe."
else
if [ "$type" == "MX" ] && [ "$name" == "$DOMAIN_NAME" ]; then
echo " ⛔ Root-MX existiert aber ist anders: $rec_content"
echo " → Wird NICHT automatisch geändert (Migrations-Schutz)"
return
fi
if [ "$DRY_RUN" = "true" ]; then
echo " [DRY] Würde UPDATEN: '$rec_content' → '$content'"
else
res=$(curl -s -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$rec_id" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" --data "$json_data")
if [ "$(echo $res | jq -r .success)" == "true" ]; then
echo " ✅ Aktualisiert."
else
echo " ❌ Fehler beim Updaten: $(echo $res | jq -r '.errors[0].message')"
fi
fi
fi
fi
}
# ------------------------------------------------------------------
# SCHRITT 1: MAIL FROM Domain (aus SES lesen)
# ------------------------------------------------------------------
echo ""
echo "--- 1. MAIL FROM Domain ---"
MAIL_FROM_DOMAIN=$(aws sesv2 get-email-identity \
--email-identity "$DOMAIN_NAME" \
--region "$AWS_REGION" \
--query 'MailFromAttributes.MailFromDomain' \
--output text 2>/dev/null || echo "NONE")
if [ "$MAIL_FROM_DOMAIN" == "NONE" ] || [ "$MAIL_FROM_DOMAIN" == "None" ] || [ -z "$MAIL_FROM_DOMAIN" ]; then
echo " Keine MAIL FROM Domain in SES konfiguriert."
echo " → Überspringe MAIL FROM DNS Setup."
MAIL_FROM_DOMAIN=""
fi
# ------------------------------------------------------------------
# SCHRITT 2: DKIM Records
# ------------------------------------------------------------------
echo ""
echo "--- 2. DKIM Records ---"
DKIM_TOKENS=$(aws sesv2 get-email-identity \
--email-identity "$DOMAIN_NAME" \
--region "$AWS_REGION" \
--query 'DkimAttributes.Tokens' \
--output text 2>/dev/null || echo "")
if [ -n "$DKIM_TOKENS" ] && [ "$DKIM_TOKENS" != "None" ]; then
for TOKEN in $DKIM_TOKENS; do
ensure_record "CNAME" "${TOKEN}._domainkey.${DOMAIN_NAME}" "${TOKEN}.dkim.amazonses.com" false
done
else
echo " ⚠️ Keine DKIM Tokens gefunden. SES Identity angelegt?"
fi
# ------------------------------------------------------------------
# SCHRITT 3: SES Verification TXT
# ------------------------------------------------------------------
echo ""
echo "--- 3. SES Verification TXT ---"
VERIFICATION_TOKEN=$(aws ses get-identity-verification-attributes \
--identities "$DOMAIN_NAME" \
--region "$AWS_REGION" \
--query "VerificationAttributes.\"${DOMAIN_NAME}\".VerificationToken" \
--output text 2>/dev/null || echo "")
if [ -n "$VERIFICATION_TOKEN" ] && [ "$VERIFICATION_TOKEN" != "None" ]; then
ensure_record "TXT" "_amazonses.${DOMAIN_NAME}" "$VERIFICATION_TOKEN" false
else
echo " ⚠️ Kein Verification Token. SES Identity angelegt?"
fi
# ------------------------------------------------------------------
# SCHRITT 4: MAIL FROM Subdomain (MX + SPF)
# ------------------------------------------------------------------
echo ""
echo "--- 4. MAIL FROM Subdomain (${MAIL_FROM_DOMAIN:-'nicht konfiguriert'}) ---"
if [ -n "$MAIL_FROM_DOMAIN" ]; then
# Prüfe ob CNAME-Konflikt auf der MAIL FROM Subdomain existiert
CNAME_CHECK=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=CNAME&name=$MAIL_FROM_DOMAIN" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" | jq -r '.result[0].content')
if [ "$CNAME_CHECK" != "null" ] && [ -n "$CNAME_CHECK" ]; then
echo " ⛔ CNAME-Konflikt! $MAIL_FROM_DOMAIN hat CNAME → $CNAME_CHECK"
echo " MX + TXT können nicht neben CNAME existieren."
echo " → awsses.sh mit anderem MAIL_FROM_SUBDOMAIN erneut ausführen"
exit 1
fi
ensure_record "MX" "$MAIL_FROM_DOMAIN" "feedback-smtp.${AWS_REGION}.amazonses.com" false 10
ensure_record "TXT" "$MAIL_FROM_DOMAIN" "v=spf1 include:amazonses.com ~all" false
else
echo " Übersprungen (keine MAIL FROM Domain konfiguriert)."
fi
# ------------------------------------------------------------------
# SCHRITT 5: Root Domain SPF (Merge mit altem Provider)
# ------------------------------------------------------------------
echo ""
echo "--- 5. Root Domain SPF ---"
# Aktuellen SPF-Record lesen
# Cloudflare liefert TXT-Content manchmal mit Anführungszeichen,
# daher erst alle TXT-Records holen und dann filtern
CURRENT_SPF=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=TXT&name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" \
| jq -r '[.result[] | select(.content | gsub("^\"|\"$";"") | startswith("v=spf1"))][0].content // ""')
# Anführungszeichen sofort entfernen
CURRENT_SPF=$(echo "$CURRENT_SPF" | tr -d '"')
if [ -n "$CURRENT_SPF" ]; then
echo " 📋 Aktueller SPF: $CURRENT_SPF"
# Prüfe ob amazonses.com schon drin ist
if echo "$CURRENT_SPF" | grep -q "include:amazonses.com"; then
echo " 🆗 SPF enthält bereits include:amazonses.com"
else
# amazonses.com einfügen direkt nach v=spf1
NEW_SPF=$(echo "$CURRENT_SPF" | sed 's/v=spf1 /v=spf1 include:amazonses.com /')
# ?all → ~all upgraden
NEW_SPF=$(echo "$NEW_SPF" | sed 's/?all/~all/')
echo " 📝 Neuer SPF: $NEW_SPF"
ensure_record "TXT" "$DOMAIN_NAME" "$NEW_SPF" false
fi
else
echo " Kein SPF Record vorhanden. Erstelle neuen."
ensure_record "TXT" "$DOMAIN_NAME" "v=spf1 include:amazonses.com ~all" false
fi
# ------------------------------------------------------------------
# SCHRITT 6: Root Domain MX (nur Info, wird nicht geändert)
# ------------------------------------------------------------------
echo ""
echo "--- 6. Root Domain MX (nur Info, wird nicht geändert) ---"
CURRENT_MX=$(curl -s -X GET "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records?type=MX&name=$DOMAIN_NAME" \
-H "Authorization: Bearer $CF_API_TOKEN" -H "Content-Type: application/json" \
| jq -r '.result[0].content // "keiner"')
echo " MX vorhanden: $CURRENT_MX (wird nicht geändert)"
# ------------------------------------------------------------------
# SCHRITT 7: DMARC
# ------------------------------------------------------------------
echo ""
echo "--- 7. DMARC ---"
if [ "$SKIP_DMARC" = "true" ]; then
echo " ⏭️ Übersprungen (SKIP_DMARC=true)"
echo " Bestehender DMARC-Record bleibt unverändert."
else
ensure_record "TXT" "_dmarc.$DOMAIN_NAME" "v=DMARC1; p=none; rua=mailto:postmaster@$DOMAIN_NAME" false
fi
# ------------------------------------------------------------------
# SCHRITT 8: Mailclient Subdomains (A + CNAME)
# ------------------------------------------------------------------
echo ""
echo "--- 8. Mailclient Subdomains (A + CNAME) ---"
if [ "$SKIP_CLIENT_DNS" = "true" ]; then
echo " ⏭️ Übersprungen (SKIP_CLIENT_DNS=true)"
echo " imap/smtp/pop/webmail bleiben beim alten Provider."
echo " Setze SKIP_CLIENT_DNS=false nach MX-Cutover + Client-Umstellung."
else
if [ -n "$MAIL_SERVER_IP" ]; then
# A-Record für mail.<domain> direkt auf Server-IP
ensure_record "A" "mail.$DOMAIN_NAME" "$MAIL_SERVER_IP" false
else
# CNAME auf externen Ziel-Host (nur wenn verschieden)
if [ "$TARGET_MAIL_SERVER" != "mail.$DOMAIN_NAME" ]; then
ensure_record "CNAME" "mail.$DOMAIN_NAME" "$TARGET_MAIL_SERVER" false
fi
fi
# imap, smtp, pop, webmail → CNAME auf mail.<domain>
ensure_record "CNAME" "imap.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "smtp.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "pop.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "webmail.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
fi
# ------------------------------------------------------------------
# SCHRITT 9: Autodiscover / Autoconfig
# ------------------------------------------------------------------
echo ""
echo "--- 9. Autodiscover / Autoconfig ---"
ensure_record "CNAME" "autodiscover.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
ensure_record "CNAME" "autoconfig.$DOMAIN_NAME" "mail.$DOMAIN_NAME" false
echo ""
echo "============================================================"
echo "✅ Fertig für Domain: $DOMAIN_NAME"
if [ "$SKIP_CLIENT_DNS" = "true" ]; then
echo ""
echo " ⚠️ Client-Subdomains wurden NICHT geändert."
echo " Nach MX-Cutover + Worker-Validierung erneut ausführen mit:"
echo " SKIP_CLIENT_DNS=false SKIP_DMARC=false ./cloudflareMigrationDns.sh"
fi
echo ""
echo " Mailclient-Konfiguration für Kunden:"
echo " IMAP: imap.$DOMAIN_NAME Port 993 (SSL)"
echo " SMTP: smtp.$DOMAIN_NAME Port 587 (STARTTLS) oder 465 (SSL)"
echo " POP3: pop.$DOMAIN_NAME Port 995 (SSL)"
echo " Webmail: webmail.$DOMAIN_NAME"
echo "============================================================"

207
basic_setup/create-queue.sh Executable file
View File

@@ -0,0 +1,207 @@
#!/bin/bash
# create-queue.sh (v2 — mit SNS Fan-Out + Standby Queue)
# Usage: DOMAIN=andreasknuth.de ./create-queue.sh
#
# Erstellt pro Domain:
# - Primary Queue + DLQ (wie bisher, für Contabo)
# - Standby Queue + DLQ (NEU, für Office-VM)
# - SNS Topic (NEU, Fan-Out)
# - 2 SNS Subscriptions (NEU, Topic → Primary + Standby)
set -e
AWS_REGION="us-east-2"
if [ -z "$DOMAIN" ]; then
echo "Error: DOMAIN environment variable not set"
echo "Usage: DOMAIN=andreasknuth.de $0"
exit 1
fi
DOMAIN_SLUG="${DOMAIN//./-}"
QUEUE_NAME="${DOMAIN_SLUG}-queue"
DLQ_NAME="${QUEUE_NAME}-dlq"
STANDBY_QUEUE_NAME="${DOMAIN_SLUG}-standby-queue"
STANDBY_DLQ_NAME="${STANDBY_QUEUE_NAME}-dlq"
TOPIC_NAME="${DOMAIN_SLUG}-topic"
ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
echo "========================================"
echo "Creating SQS + SNS for Email Delivery"
echo "========================================"
echo ""
echo "📧 Domain: $DOMAIN"
echo " Region: $AWS_REGION"
echo " Account: $ACCOUNT_ID"
echo ""
# ============================================================
# 1. Primary DLQ + Queue (wie bisher)
# ============================================================
echo "━━━ Primary Queue (Contabo) ━━━"
echo "Creating DLQ: $DLQ_NAME"
DLQ_URL=$(aws sqs create-queue \
--queue-name "${DLQ_NAME}" \
--region "${AWS_REGION}" \
--attributes '{"MessageRetentionPeriod": "1209600"}' \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
DLQ_ARN=$(aws sqs get-queue-attributes --queue-url "${DLQ_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ DLQ: ${DLQ_ARN}"
echo "Creating Queue: $QUEUE_NAME"
QUEUE_URL=$(aws sqs create-queue \
--queue-name "${QUEUE_NAME}" \
--region "${AWS_REGION}" \
--attributes "{
\"VisibilityTimeout\": \"300\",
\"MessageRetentionPeriod\": \"86400\",
\"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "${QUEUE_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Queue: ${QUEUE_ARN}"
echo ""
# ============================================================
# 2. Standby DLQ + Queue (NEU)
# ============================================================
echo "━━━ Standby Queue (Office-VM) ━━━"
echo "Creating Standby DLQ: $STANDBY_DLQ_NAME"
STANDBY_DLQ_URL=$(aws sqs create-queue \
--queue-name "${STANDBY_DLQ_NAME}" \
--region "${AWS_REGION}" \
--attributes '{"MessageRetentionPeriod": "1209600"}' \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${STANDBY_DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
STANDBY_DLQ_ARN=$(aws sqs get-queue-attributes --queue-url "${STANDBY_DLQ_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Standby DLQ: ${STANDBY_DLQ_ARN}"
echo "Creating Standby Queue: $STANDBY_QUEUE_NAME"
STANDBY_QUEUE_URL=$(aws sqs create-queue \
--queue-name "${STANDBY_QUEUE_NAME}" \
--region "${AWS_REGION}" \
--attributes "{
\"VisibilityTimeout\": \"300\",
\"MessageRetentionPeriod\": \"86400\",
\"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${STANDBY_DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${STANDBY_QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
STANDBY_QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "${STANDBY_QUEUE_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Standby Queue: ${STANDBY_QUEUE_ARN}"
echo ""
# ============================================================
# 3. SNS Topic (NEU)
# ============================================================
echo "━━━ SNS Topic (Fan-Out) ━━━"
echo "Creating Topic: $TOPIC_NAME"
TOPIC_ARN=$(aws sns create-topic \
--name "${TOPIC_NAME}" \
--region "${AWS_REGION}" \
--query 'TopicArn' --output text)
echo " ✓ Topic: ${TOPIC_ARN}"
echo ""
# ============================================================
# 4. SNS → SQS Subscriptions (NEU)
# ============================================================
echo "━━━ Subscriptions ━━━"
# SNS braucht Berechtigung, in die SQS Queues zu schreiben
# Policy für Primary Queue
POLICY_PRIMARY="{
\"Version\": \"2012-10-17\",
\"Statement\": [{
\"Effect\": \"Allow\",
\"Principal\": {\"Service\": \"sns.amazonaws.com\"},
\"Action\": \"sqs:SendMessage\",
\"Resource\": \"${QUEUE_ARN}\",
\"Condition\": {\"ArnEquals\": {\"aws:SourceArn\": \"${TOPIC_ARN}\"}}
}]
}"
aws sqs set-queue-attributes \
--queue-url "${QUEUE_URL}" \
--region "${AWS_REGION}" \
--attributes "{\"Policy\": $(echo "$POLICY_PRIMARY" | jq -c '.' | jq -Rs '.')}" \
> /dev/null
echo " ✓ Primary Queue Policy gesetzt"
# Policy für Standby Queue
POLICY_STANDBY="{
\"Version\": \"2012-10-17\",
\"Statement\": [{
\"Effect\": \"Allow\",
\"Principal\": {\"Service\": \"sns.amazonaws.com\"},
\"Action\": \"sqs:SendMessage\",
\"Resource\": \"${STANDBY_QUEUE_ARN}\",
\"Condition\": {\"ArnEquals\": {\"aws:SourceArn\": \"${TOPIC_ARN}\"}}
}]
}"
aws sqs set-queue-attributes \
--queue-url "${STANDBY_QUEUE_URL}" \
--region "${AWS_REGION}" \
--attributes "{\"Policy\": $(echo "$POLICY_STANDBY" | jq -c '.' | jq -Rs '.')}" \
> /dev/null
echo " ✓ Standby Queue Policy gesetzt"
# Subscription: Topic → Primary Queue
SUB_PRIMARY=$(aws sns subscribe \
--topic-arn "${TOPIC_ARN}" \
--protocol sqs \
--notification-endpoint "${QUEUE_ARN}" \
--region "${AWS_REGION}" \
--attributes '{"RawMessageDelivery": "true"}' \
--query 'SubscriptionArn' --output text)
echo " ✓ Subscription Primary: ${SUB_PRIMARY}"
# Subscription: Topic → Standby Queue
SUB_STANDBY=$(aws sns subscribe \
--topic-arn "${TOPIC_ARN}" \
--protocol sqs \
--notification-endpoint "${STANDBY_QUEUE_ARN}" \
--region "${AWS_REGION}" \
--attributes '{"RawMessageDelivery": "true"}' \
--query 'SubscriptionArn' --output text)
echo " ✓ Subscription Standby: ${SUB_STANDBY}"
echo ""
# ============================================================
# Zusammenfassung
# ============================================================
echo "========================================"
echo "✅ Setup complete for $DOMAIN"
echo "========================================"
echo ""
echo "Primary (Contabo):"
echo " Queue: $QUEUE_URL"
echo " DLQ: $DLQ_URL"
echo ""
echo "Standby (Office-VM):"
echo " Queue: $STANDBY_QUEUE_URL"
echo " DLQ: $STANDBY_DLQ_URL"
echo ""
echo "SNS Fan-Out:"
echo " Topic: $TOPIC_ARN"
echo " → Primary: $SUB_PRIMARY"
echo " → Standby: $SUB_STANDBY"
echo ""
echo "⚠️ Nächste Schritte:"
echo " 1. Lambda-Funktion updaten: sns.publish() statt sqs.send_message()"
echo " 2. Lambda IAM Role: sns:Publish Berechtigung hinzufügen"
echo " 3. Worker auf Office-VM: QUEUE_SUFFIX=-standby-queue konfigurieren"
echo " 4. Worker auf Office-VM: STANDBY_MODE=true setzen"

482
basic_setup/email-rule.sh Executable file
View File

@@ -0,0 +1,482 @@
#!/bin/bash
# AWS Region
REGION="us-east-2"
TABLE_NAME="email-rules"
# Farben für Output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# Logging-Funktionen
info() { echo -e "${GREEN}${NC} $1"; }
warn() { echo -e "${YELLOW}${NC} $1"; }
error() { echo -e "${RED}${NC} $1"; exit 1; }
# Hilfe-Funktion
show_help() {
cat << EOF
Usage: $0 <command> [options]
Commands:
set-ooo <email> <message> [--html]
Setzt Out-of-Office Nachricht
--html: Nachricht ist HTML-formatiert (default: text)
set-forward <email> <address1>[,address2,...]
Setzt Forward-Adressen (überschreibt bestehende)
add-forward <email> <address1>[,address2,...]
Fügt Forward-Adressen hinzu (behält bestehende)
remove-ooo <email>
Entfernt Out-of-Office Regel
remove-forward <email>
Entfernt alle Forward-Adressen
remove-all <email>
Löscht komplette Regel (OOO + Forwards)
show <email>
Zeigt aktuelle Regel
list
Listet alle Regeln auf
Examples:
$0 set-ooo john@example.com "I'm on vacation until Monday"
$0 set-ooo john@example.com "<p>Out of office</p>" --html
$0 set-forward john@example.com alice@example.com,bob@example.com
$0 add-forward john@example.com charlie@example.com
$0 show john@example.com
$0 remove-all john@example.com
EOF
}
# Prüft ob Regel existiert
rule_exists() {
local email=$1
local result=$(aws dynamodb get-item \
--table-name "$TABLE_NAME" \
--key '{"email_address": {"S": "'"$email"'"}}' \
--region "$REGION" \
--output json 2>/dev/null)
if echo "$result" | jq -e '.Item' > /dev/null 2>&1; then
return 0 # Existiert
else
return 1 # Existiert nicht
fi
}
# Holt aktuelle Regel
get_rule() {
local email=$1
aws dynamodb get-item \
--table-name "$TABLE_NAME" \
--key '{"email_address": {"S": "'"$email"'"}}' \
--region "$REGION" \
--output json 2>/dev/null | jq -r '.Item'
}
# Validiert E-Mail-Adresse (basic)
validate_email() {
local email=$1
if [[ ! "$email" =~ ^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$ ]]; then
error "Ungültige E-Mail-Adresse: $email"
fi
}
# ==============================================
# COMMAND: set-ooo
# ==============================================
cmd_set_ooo() {
local email=$1
local message=$2
local content_type="text"
if [ "$3" = "--html" ]; then
content_type="html"
fi
if [ -z "$email" ] || [ -z "$message" ]; then
error "Usage: $0 set-ooo <email> <message> [--html]"
fi
validate_email "$email"
# Hole bestehende Forwards (falls vorhanden)
local forwards_json='[]'
if rule_exists "$email"; then
local existing_rule=$(get_rule "$email")
# Prüfe ob existing_rule nicht null ist
if [ "$existing_rule" != "null" ] && [ -n "$existing_rule" ]; then
# Extrahiere forwards, fallback auf leeres Array
local existing_forwards=$(echo "$existing_rule" | jq -r '.forwards.L // []')
if [ "$existing_forwards" != "[]" ] && [ "$existing_forwards" != "null" ]; then
forwards_json="$existing_forwards"
warn "Regel existiert bereits, behalte bestehende Forwards bei"
fi
fi
fi
# Escape JSON-String korrekt
local escaped_message=$(echo "$message" | jq -Rs .)
# Setze Regel
aws dynamodb put-item \
--table-name "$TABLE_NAME" \
--item '{
"email_address": {"S": "'"$email"'"},
"ooo_active": {"BOOL": true},
"ooo_message": {"S": '"$escaped_message"'},
"ooo_content_type": {"S": "'"$content_type"'"},
"forwards": {"L": '"$forwards_json"'},
"last_updated": {"S": "'$(date -u +"%Y-%m-%dT%H:%M:%SZ")'"}
}' \
--region "$REGION" 2>&1
if [ $? -eq 0 ]; then
info "Out-of-Office aktiviert für $email ($content_type)"
else
error "Fehler beim Setzen der OOO-Regel"
fi
}
# ==============================================
# COMMAND: set-forward
# ==============================================
cmd_set_forward() {
local email=$1
local forward_addresses=$2
if [ -z "$email" ] || [ -z "$forward_addresses" ]; then
error "Usage: $0 set-forward <email> <address1>[,address2,...]"
fi
validate_email "$email"
# Validiere alle Forward-Adressen
IFS=',' read -ra ADDRS <<< "$forward_addresses"
for addr in "${ADDRS[@]}"; do
addr=$(echo "$addr" | xargs) # xargs trimmt whitespace
if [ -n "$addr" ]; then
validate_email "$addr"
fi
done
# Konvertiere zu JSON-Array
local forwards_list=$(echo "$forward_addresses" | jq -R 'split(",") | map(gsub("^\\s+|\\s+$";"")) | map(select(length > 0)) | map({"S": .})')
# Hole bestehende OOO-Einstellungen (falls vorhanden)
local ooo_active="false"
local ooo_message=""
local ooo_content_type="text"
if rule_exists "$email"; then
local existing_rule=$(get_rule "$email")
if [ "$existing_rule" != "null" ] && [ -n "$existing_rule" ]; then
ooo_active=$(echo "$existing_rule" | jq -r '.ooo_active.BOOL // false')
ooo_message=$(echo "$existing_rule" | jq -r '.ooo_message.S // ""')
ooo_content_type=$(echo "$existing_rule" | jq -r '.ooo_content_type.S // "text"')
warn "Regel existiert bereits, behalte bestehende OOO-Einstellungen bei"
fi
fi
# Escape message für JSON
local escaped_ooo_message=$(echo "$ooo_message" | jq -Rs .)
# Setze Regel
aws dynamodb put-item \
--table-name "$TABLE_NAME" \
--item '{
"email_address": {"S": "'"$email"'"},
"ooo_active": {"BOOL": '"$ooo_active"'},
"ooo_message": {"S": '"$escaped_ooo_message"'},
"ooo_content_type": {"S": "'"$ooo_content_type"'"},
"forwards": {"L": '"$forwards_list"'},
"last_updated": {"S": "'$(date -u +"%Y-%m-%dT%H:%M:%SZ")'"}
}' \
--region "$REGION" 2>&1
if [ $? -eq 0 ]; then
info "Forwards gesetzt für $email: ${ADDRS[*]}"
else
error "Fehler beim Setzen der Forward-Regel"
fi
}
# ==============================================
# COMMAND: add-forward
# ==============================================
cmd_add_forward() {
local email=$1
local new_addresses=$2
if [ -z "$email" ] || [ -z "$new_addresses" ]; then
error "Usage: $0 add-forward <email> <address1>[,address2,...]"
fi
validate_email "$email"
if ! rule_exists "$email"; then
error "Keine Regel für $email gefunden. Nutze 'set-forward' stattdessen."
fi
# Hole bestehende Forwards
local existing_rule=$(get_rule "$email")
local existing_forwards=""
if [ "$existing_rule" != "null" ] && [ -n "$existing_rule" ]; then
existing_forwards=$(echo "$existing_rule" | jq -r '.forwards.L // [] | map(.S) | join(",")')
fi
# Kombiniere und dedupliziere
local combined="$existing_forwards,$new_addresses"
IFS=',' read -ra ALL_ADDRS <<< "$combined"
# Dedupliziere und validiere
declare -A seen
local unique_addrs=()
for addr in "${ALL_ADDRS[@]}"; do
addr=$(echo "$addr" | xargs) # Trim whitespace
if [ -n "$addr" ] && [ -z "${seen[$addr]}" ]; then
validate_email "$addr"
seen[$addr]=1
unique_addrs+=("$addr")
fi
done
# Konvertiere zu Komma-separiertem String
local final_list=$(IFS=','; echo "${unique_addrs[*]}")
# Nutze set-forward mit kombinierter Liste
cmd_set_forward "$email" "$final_list"
info "Forwards hinzugefügt (insgesamt ${#unique_addrs[@]} Adressen)"
}
# ==============================================
# COMMAND: remove-ooo
# ==============================================
cmd_remove_ooo() {
local email=$1
if [ -z "$email" ]; then
error "Usage: $0 remove-ooo <email>"
fi
validate_email "$email"
if ! rule_exists "$email"; then
warn "Keine Regel für $email gefunden"
return 0
fi
# Update: OOO deaktivieren, Message-Felder entfernen
aws dynamodb update-item \
--table-name "$TABLE_NAME" \
--key '{"email_address": {"S": "'"$email"'"}}' \
--update-expression "SET ooo_active = :false, last_updated = :timestamp REMOVE ooo_message, ooo_content_type" \
--expression-attribute-values '{
":false": {"BOOL": false},
":timestamp": {"S": "'$(date -u +"%Y-%m-%dT%H:%M:%SZ")'"}
}' \
--region "$REGION" 2>&1 >/dev/null
if [ $? -eq 0 ]; then
info "Out-of-Office entfernt für $email"
else
error "Fehler beim Entfernen der OOO-Regel"
fi
}
# ==============================================
# COMMAND: remove-forward
# ==============================================
cmd_remove_forward() {
local email=$1
if [ -z "$email" ]; then
error "Usage: $0 remove-forward <email>"
fi
validate_email "$email"
if ! rule_exists "$email"; then
warn "Keine Regel für $email gefunden"
return 0
fi
# Update: Forwards leeren
aws dynamodb update-item \
--table-name "$TABLE_NAME" \
--key '{"email_address": {"S": "'"$email"'"}}' \
--update-expression "SET forwards = :empty, last_updated = :timestamp" \
--expression-attribute-values '{
":empty": {"L": []},
":timestamp": {"S": "'$(date -u +"%Y-%m-%dT%H:%M:%SZ")'"}
}' \
--region "$REGION" 2>&1 >/dev/null
if [ $? -eq 0 ]; then
info "Forwards entfernt für $email"
else
error "Fehler beim Entfernen der Forwards"
fi
}
# ==============================================
# COMMAND: remove-all
# ==============================================
cmd_remove_all() {
local email=$1
if [ -z "$email" ]; then
error "Usage: $0 remove-all <email>"
fi
validate_email "$email"
if ! rule_exists "$email"; then
warn "Keine Regel für $email gefunden"
return 0
fi
aws dynamodb delete-item \
--table-name "$TABLE_NAME" \
--key '{"email_address": {"S": "'"$email"'"}}' \
--region "$REGION" 2>&1 >/dev/null
if [ $? -eq 0 ]; then
info "Regel komplett gelöscht für $email"
else
error "Fehler beim Löschen der Regel"
fi
}
# ==============================================
# COMMAND: show
# ==============================================
cmd_show() {
local email=$1
if [ -z "$email" ]; then
error "Usage: $0 show <email>"
fi
validate_email "$email"
if ! rule_exists "$email"; then
warn "Keine Regel für $email gefunden"
return 0
fi
local rule=$(get_rule "$email")
echo ""
echo "Regel für: $email"
echo "─────────────────────────────────────────"
# OOO Status
local ooo_active=$(echo "$rule" | jq -r '.ooo_active.BOOL // false')
if [ "$ooo_active" = "true" ]; then
local ooo_msg=$(echo "$rule" | jq -r '.ooo_message.S // ""')
local ooo_type=$(echo "$rule" | jq -r '.ooo_content_type.S // "text"')
echo -e "Out-of-Office: ${GREEN}AKTIV${NC} ($ooo_type)"
echo "Nachricht: $ooo_msg"
else
echo -e "Out-of-Office: ${RED}INAKTIV${NC}"
fi
# Forwards
local forwards=$(echo "$rule" | jq -r '.forwards.L // [] | map(.S) | join(", ")')
if [ -n "$forwards" ]; then
echo "Forwards: $forwards"
else
echo -e "Forwards: ${RED}KEINE${NC}"
fi
# Letztes Update
local updated=$(echo "$rule" | jq -r '.last_updated.S // "unbekannt"')
echo "Letzte Änderung: $updated"
echo ""
}
# ==============================================
# COMMAND: list
# ==============================================
cmd_list() {
echo ""
echo "Alle E-Mail-Regeln:"
echo "═════════════════════════════════════════════════════════════════"
local result=$(aws dynamodb scan \
--table-name "$TABLE_NAME" \
--region "$REGION" \
--output json 2>/dev/null)
local items=$(echo "$result" | jq -r '.Items // []')
local count=$(echo "$items" | jq 'length')
if [ "$count" -eq 0 ]; then
warn "Keine Regeln gefunden"
echo ""
return 0
fi
echo "$items" | jq -r '.[] |
"\(.email_address.S)\n" +
" OOO: \(if .ooo_active.BOOL then "✓ AKTIV (\(.ooo_content_type.S // "text"))" else "✗ INAKTIV" end)\n" +
" Forwards: \(if (.forwards.L | length) > 0 then (.forwards.L | map(.S) | join(", ")) else "KEINE" end)\n" +
" Updated: \(.last_updated.S // "unbekannt")\n"'
echo "─────────────────────────────────────────────────────────────────"
echo "Gesamt: $count Regel(n)"
echo ""
}
# ==============================================
# MAIN
# ==============================================
COMMAND=$1
shift
case "$COMMAND" in
set-ooo)
cmd_set_ooo "$@"
;;
set-forward)
cmd_set_forward "$@"
;;
add-forward)
cmd_add_forward "$@"
;;
remove-ooo)
cmd_remove_ooo "$@"
;;
remove-forward)
cmd_remove_forward "$@"
;;
remove-all)
cmd_remove_all "$@"
;;
show)
cmd_show "$@"
;;
list)
cmd_list
;;
help|--help|-h|"")
show_help
;;
*)
error "Unbekannter Befehl: $COMMAND\n\nNutze '$0 help' für Hilfe"
;;
esac

123
basic_setup/manage_mail_user.sh Executable file
View File

@@ -0,0 +1,123 @@
#!/bin/bash
# manage_mail_user.sh
# Verwaltet E-Mail-User im DMS und synchronisiert die Liste mit AWS SES
#
# Usage:
# ./manage_mail_user.sh add user@domain.com "Password123"
# ./manage_mail_user.sh del user@domain.com
# ./manage_mail_user.sh sync domain.com
set -e
COMMAND=$1
EMAIL=$2
PASSWORD=$3
# Container Name deines DMS
DMS_CONTAINER="mailserver"
AWS_REGION="us-east-2"
# Hilfsfunktion: Usage
usage() {
echo "Verwendung:"
echo " Account anlegen: $0 add <email> <password>"
echo " Account löschen: $0 del <email>"
echo " Nur Sync zu AWS: $0 sync <domain>"
exit 1
}
# Parameter prüfen
if [ -z "$COMMAND" ]; then usage; fi
# Domain extrahieren (falls Email gegeben)
if [ "$COMMAND" != "sync" ]; then
if [ -z "$EMAIL" ]; then usage; fi
DOMAIN=$(echo "$EMAIL" | cut -d'@' -f2)
else
# Beim Sync ist der 2. Parameter die Domain
DOMAIN=$2
if [ -z "$DOMAIN" ]; then usage; fi
fi
# Regel-Namen konstruieren (Dein Namensschema: store-domain-com-to-s3)
RULE_SET_NAME="bizmatch-ruleset" # Dein globaler Ruleset Name
RULE_NAME="store-${DOMAIN//./-}-to-s3"
echo "============================================================"
echo "Managing User for Domain: $DOMAIN"
echo "Action: $COMMAND"
echo "SES Rule: $RULE_NAME"
echo "============================================================"
# 1. Aktion im Docker Mailserver ausführen
if [ "$COMMAND" == "add" ]; then
if [ -z "$PASSWORD" ]; then
echo "Error: Passwort fehlt für 'add'."
exit 1
fi
echo "-> [DMS] Erstelle Postfach $EMAIL..."
docker exec "$DMS_CONTAINER" setup email add "$EMAIL" "$PASSWORD"
elif [ "$COMMAND" == "del" ]; then
echo "-> [DMS] Lösche Postfach $EMAIL..."
docker exec "$DMS_CONTAINER" setup email del "$EMAIL"
elif [ "$COMMAND" == "sync" ]; then
echo "-> [Sync] Starte manuelle Synchronisation..."
else
usage
fi
# 2. Liste aller User dieser Domain aus DMS holen ("Source of Truth")
echo "-> Lese aktuelle User-Liste aus DMS..."
# 'setup email list' format: "* user@domain.com (0/0)" -> awk holt die Email
RECIPIENTS_LIST=$(docker exec "$DMS_CONTAINER" setup email list | grep "@$DOMAIN" | awk '{print $2}' | sort)
# Prüfen ob User vorhanden sind
if [ -z "$RECIPIENTS_LIST" ]; then
echo "WARNUNG: Keine User für $DOMAIN im DMS gefunden!"
echo "SES Rule wird NICHT aktualisiert, um Stillstand zu vermeiden."
exit 1
fi
# Zählen (Limit Check)
COUNT=$(echo "$RECIPIENTS_LIST" | wc -l)
echo "-> Gefundene User: $COUNT"
if [ "$COUNT" -gt 100 ]; then
echo "CRITICAL ERROR: AWS SES erlaubt maximal 100 Empfänger pro Regel!"
echo "Gefunden: $COUNT. Abbruch."
exit 1
fi
# JSON Array für AWS CLI bauen
# Wir bauen: "user1@domain.com", "user2@domain.com", ...
JSON_RECIPIENTS=$(echo "$RECIPIENTS_LIST" | jq -R . | jq -s .)
echo "-> Neue Empfänger-Liste für SES:"
echo "$JSON_RECIPIENTS"
# 3. AWS SES Rule updaten
echo "-> [AWS] Hole aktuelle Regel-Definition..."
CURRENT_RULE=$(aws ses describe-receipt-rule \
--rule-set-name "$RULE_SET_NAME" \
--rule-name "$RULE_NAME" \
--region "$AWS_REGION" \
--output json)
# Wir müssen das JSON patchen. Wir ersetzen den "Recipients" Block.
# jq Trick: .Rule.Recipients = $new_list | .Rule
NEW_RULE_JSON=$(echo "$CURRENT_RULE" | jq --argjson recipients "$JSON_RECIPIENTS" '.Rule | .Recipients = $recipients')
# WICHTIG: TlsPolicy ist im Output oft lowercase, update erwartet CamelCase oder es ist tricky.
# Wir bauen das Update Command sicherheitshalber explizit.
echo "-> [AWS] Sende Update an SES..."
# KORREKTUR: --rule-name entfernt, da der Name im JSON ($NEW_RULE_JSON) enthalten ist
aws ses update-receipt-rule \
--rule-set-name "$RULE_SET_NAME" \
--rule "$NEW_RULE_JSON" \
--region "$AWS_REGION"
echo "============================================================"
echo "✅ Success! User angelegt/gelöscht und SES Rule aktualisiert."
echo "============================================================"

111
basic_setup/requeue_email.sh Executable file
View File

@@ -0,0 +1,111 @@
#!/bin/bash
# requeue_email.sh
# Nimmt eine existierende Email aus S3 und stellt ein Event in die SQS Queue,
# um eine erneute Verarbeitung durch den Worker auszulösen.
set -e
# --- Parameter ---
DOMAIN="$1"
RECIPIENT="$2"
MESSAGE_ID="$3" # Das ist der S3 Key (die lange Zeichenkette aus dem Log)
AWS_REGION=${AWS_REGION:-"us-east-2"}
if [ -z "$DOMAIN" ] || [ -z "$RECIPIENT" ] || [ -z "$MESSAGE_ID" ]; then
echo "Usage: $0 <domain> <recipient> <s3-message-id>"
echo "Example: $0 buddelectric.net Tyler@buddelectric.net cn8j6j970atkh7n3fstdhgqr9imgrivegnm70jg1"
exit 1
fi
# --- Variablen ableiten ---
BUCKET_NAME=$(echo "$DOMAIN" | tr '.' '-')"-emails"
QUEUE_NAME=$(echo "$DOMAIN" | tr '.' '-')"-queue"
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
echo "============================================================"
echo " Requeue S3 Email"
echo "============================================================"
echo " Domain: $DOMAIN"
echo " Recipient: $RECIPIENT"
echo " Bucket: $BUCKET_NAME"
echo " Key (ID): $MESSAGE_ID"
echo ""
# --- Schritt 1: Prüfen ob S3 Objekt existiert ---
echo "[1/2] Checking S3 object: s3://${BUCKET_NAME}/${MESSAGE_ID} ..."
if aws s3 ls "s3://${BUCKET_NAME}/${MESSAGE_ID}" --region "$AWS_REGION" > /dev/null 2>&1; then
echo " ✓ Object found."
else
echo " ❌ ERROR: Object s3://${BUCKET_NAME}/${MESSAGE_ID} does not exist!"
exit 1
fi
# --- Schritt 2: Nachricht in SQS stellen ---
echo "[2/2] Placing message in SQS queue..."
QUEUE_URL=$(aws sqs get-queue-url \
--queue-name "$QUEUE_NAME" \
--region "$AWS_REGION" \
--output text \
--query 'QueueUrl')
if [ -z "$QUEUE_URL" ]; then
echo " ❌ ERROR: Queue $QUEUE_NAME not found!"
exit 1
fi
# SES event payload (Simuliert die Lambda-Ausgabe)
# Wir nutzen "requeue@admin" als Source, da der Worker den echten Absender
# ohnehin aus den Email-Headern im S3-File parst.
SES_DATA=$(jq -n \
--arg msgId "$MESSAGE_ID" \
--arg source "requeue-admin@${DOMAIN}" \
--arg recipient "$RECIPIENT" \
--arg ts "$TIMESTAMP" \
--arg bucket "$BUCKET_NAME" \
'{
mail: {
messageId: $msgId,
source: $source,
timestamp: $ts,
destination: [$recipient]
},
receipt: {
recipients: [$recipient],
timestamp: $ts,
action: {
type: "S3",
bucketName: $bucket,
objectKey: $msgId
}
}
}')
# Fake SNS wrapper (Gleiches Format wie Lambda Shim)
SQS_BODY=$(jq -n \
--arg sesData "$SES_DATA" \
--arg ts "$TIMESTAMP" \
'{
Type: "Notification",
MessageId: "requeue-\(now | tostring)",
TopicArn: "arn:aws:sns:ses-shim:global-topic",
Subject: "Amazon SES Email Receipt Notification",
Message: $sesData,
Timestamp: $ts
}')
# Senden
SQS_MSG_ID=$(aws sqs send-message \
--queue-url "$QUEUE_URL" \
--region "$AWS_REGION" \
--message-body "$SQS_BODY" \
--output text \
--query 'MessageId')
echo " ✓ Done (SQS MessageId: ${SQS_MSG_ID})"
echo ""
echo "============================================================"
echo " Email successfully requeued!"
echo " Worker should pick it up immediately."
echo "============================================================"

View File

@@ -0,0 +1,164 @@
import json
import os
import boto3
import uuid
import logging
from datetime import datetime
from botocore.exceptions import ClientError
import time
import random
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sqs = boto3.client('sqs')
sns = boto3.client('sns')
sts_account_id = None
MAX_RETRIES = 3
BASE_BACKOFF = 1
def exponential_backoff(attempt):
return BASE_BACKOFF * (2 ** attempt) + random.uniform(0, 1)
def get_account_id():
global sts_account_id
if sts_account_id is None:
sts_account_id = boto3.client('sts').get_caller_identity()['Account']
return sts_account_id
def get_topic_arn(domain):
"""
Generiert Topic-ARN aus Domain.
Konvention: domain.tld -> domain-tld-topic
"""
topic_name = domain.replace('.', '-') + '-topic'
region = os.environ.get('AWS_REGION', 'us-east-2')
account_id = get_account_id()
return f"arn:aws:sns:{region}:{account_id}:{topic_name}"
def get_queue_url(domain):
"""
Fallback: Direkter SQS-Send für Domains ohne SNS-Topic.
"""
queue_name = domain.replace('.', '-') + '-queue'
try:
response = sqs.get_queue_url(QueueName=queue_name)
return response['QueueUrl']
except ClientError as e:
if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue':
logger.error(f"Queue nicht gefunden für Domain: {domain}")
raise ValueError(f"Keine Queue für Domain {domain}")
else:
raise
def publish_to_sns(topic_arn, message_body, msg_id):
attempt = 0
while attempt < MAX_RETRIES:
try:
sns.publish(
TopicArn=topic_arn,
Message=message_body
)
logger.info(f"✅ Published {msg_id} to SNS: {topic_arn}")
return True
except ClientError as e:
error_code = e.response['Error']['Code']
# Fallback auf SQS bei Topic-nicht-gefunden ODER fehlender Berechtigung
if error_code in ('NotFound', 'NotFoundException', 'AuthorizationError'):
logger.info(f" SNS unavailable for {topic_arn} ({error_code}) — falling back to SQS")
return False
attempt += 1
logger.warning(f"Retry {attempt}/{MAX_RETRIES} SNS: {error_code}")
if attempt == MAX_RETRIES:
raise
time.sleep(exponential_backoff(attempt))
return False
def send_to_sqs(queue_url, message_body, msg_id):
"""Fallback: Direkter SQS-Send (wie bisher)."""
attempt = 0
while attempt < MAX_RETRIES:
try:
sqs.send_message(
QueueUrl=queue_url,
MessageBody=message_body
)
logger.info(f"✅ Sent {msg_id} to SQS: {queue_url}")
return
except ClientError as e:
attempt += 1
error_code = e.response['Error']['Code']
logger.warning(f"Retry {attempt}/{MAX_RETRIES} SQS: {error_code}")
if attempt == MAX_RETRIES:
raise
time.sleep(exponential_backoff(attempt))
def lambda_handler(event, context):
"""
Nimmt SES Event entgegen, extrahiert Domain dynamisch.
Strategie: SNS Publish (Fan-Out an Primary + Standby Queue).
Fallback: Direkter SQS-Send falls kein SNS-Topic existiert.
"""
try:
records = event.get('Records', [])
logger.info(f"Received event with {len(records)} records.")
for record in records:
ses_data = record.get('ses', {})
if not ses_data:
logger.warning(f"Invalid SES event: Missing 'ses' in record")
continue
mail = ses_data.get('mail', {})
receipt = ses_data.get('receipt', {})
recipients = receipt.get('recipients', []) or mail.get('destination', [])
if not recipients:
logger.warning("No recipients in event - skipping")
continue
first_recipient = recipients[0]
domain = first_recipient.split('@')[-1].lower()
if not domain:
logger.error("Could not extract domain from recipient")
continue
msg_id = mail.get('messageId', 'unknown')
source = mail.get('source', 'unknown')
logger.info(f"Processing Message-ID: {msg_id} for domain: {domain}")
logger.info(f" From: {source}")
logger.info(f" To: {recipients}")
ses_json_string = json.dumps(ses_data)
payload_size = len(ses_json_string.encode('utf-8'))
logger.info(f" Metadata Payload Size: {payload_size} bytes")
if payload_size > 200000:
raise ValueError("Payload too large")
fake_sns_payload = {
"Type": "Notification",
"MessageId": str(uuid.uuid4()),
"TopicArn": "arn:aws:sns:ses-shim:global-topic",
"Subject": "Amazon SES Email Receipt Notification",
"Message": ses_json_string,
"Timestamp": datetime.utcnow().isoformat() + "Z"
}
message_body = json.dumps(fake_sns_payload)
# Strategie: SNS zuerst, SQS als Fallback
topic_arn = get_topic_arn(domain)
sns_success = publish_to_sns(topic_arn, message_body, msg_id)
if not sns_success:
# Kein SNS-Topic für diese Domain → direkt in SQS (wie bisher)
queue_url = get_queue_url(domain)
send_to_sqs(queue_url, message_body, msg_id)
return {'status': 'ok'}
except Exception as e:
logger.error(f"❌ Critical Error in Lambda Shim: {str(e)}", exc_info=True)
raise e

View File

@@ -1,38 +0,0 @@
#!/bin/bash
# setup_email_domain.sh - Ein Wrapper-Script, das alle drei Skripte in der richtigen Reihenfolge ausführt
# Überprüfen, ob die Domain-Variable gesetzt ist
if [ -z "$1" ]; then
echo "Fehler: Keine Domain angegeben."
echo "Verwendung: ./setup_email_domain.sh domain.de [region]"
exit 1
fi
DOMAIN_NAME=$1
AWS_REGION=${2:-"us-east-2"}
# Variablen exportieren
export DOMAIN_NAME
export AWS_REGION
echo "=== AWS E-Mail-Infrastruktur für $DOMAIN_NAME einrichten ==="
echo "AWS-Region: $AWS_REGION"
echo
# Skripte nacheinander ausführen
echo "1. S3-Bucket erstellen..."
./awss3.sh
echo
echo "2. SES-Konfiguration einrichten..."
export S3_BUCKET_NAME=$(echo "$DOMAIN_NAME" | tr '.' '-' | awk '{print $0 "-emails"}')
./awsses.sh
echo
echo "3. IAM-Benutzer und SMTP-Zugangsdaten erstellen..."
./awsiam.sh
echo
echo "=== Setup abgeschlossen ==="
echo "Alle Schritte wurden abgeschlossen. Bitte überprüfen Sie die Ausgaben der einzelnen Skripte."
echo "Vergessen Sie nicht, die benötigten DNS-Einträge für Ihre Domain zu setzen, um die SES-Verifizierung abzuschließen."

View File

@@ -0,0 +1,168 @@
#!/bin/bash
# test_migration_email.sh - Places a test email into S3 + SQS
#
# Simulates the complete SES inbound flow: Mail goes to S3, metadata to SQS.
# The worker picks it up and processes it (Delivery or Forward).
#
# Usage:
# ./test_migration_email.sh cielectrical.com carlosr@cielectrical.com
# ./test_migration_email.sh buddelectric.net service@buddelectric.net
#
# Optional sender address:
# ./test_migration_email.sh cielectrical.com carlosr@cielectrical.com sender@example.com
set -e
# --- Parameters ---
DOMAIN="$1"
RECIPIENT="$2"
FROM_ADDR="${3:-support@bayarea-cc.com}"
AWS_REGION=${AWS_REGION:-"us-east-2"}
if [ -z "$DOMAIN" ] || [ -z "$RECIPIENT" ]; then
echo "Usage: $0 <domain> <recipient> [from-address]"
echo "Example: $0 cielectrical.com carlosr@cielectrical.com"
exit 1
fi
# --- Derived variables ---
BUCKET_NAME=$(echo "$DOMAIN" | tr '.' '-')"-emails"
QUEUE_NAME=$(echo "$DOMAIN" | tr '.' '-')"-queue"
MESSAGE_ID="test-migration-$(date +%s)-$$"
TIMESTAMP=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
DATE_RFC2822=$(date -R)
echo "============================================================"
echo " Migration Test Email"
echo "============================================================"
echo " Domain: $DOMAIN"
echo " Recipient: $RECIPIENT"
echo " Sender: $FROM_ADDR"
echo " Bucket: $BUCKET_NAME"
echo " Queue: $QUEUE_NAME"
echo " Key: $MESSAGE_ID"
echo ""
# --- Step 1: Create RFC822 email ---
echo "[1/3] Creating test email..."
TMP_FILE=$(mktemp /tmp/test-mail-XXXXXX.eml)
cat > "$TMP_FILE" << EOF
From: Migration Test <${FROM_ADDR}>
To: ${RECIPIENT}
Subject: Migration Test $(date '+%Y-%m-%d %H:%M:%S')
Date: ${DATE_RFC2822}
Message-ID: <${MESSAGE_ID}@test.email-srvr.com>
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 7bit
Hello!
This is a test email to validate the email migration pipeline.
Sent: $(date)
Domain: ${DOMAIN}
Recipient: ${RECIPIENT}
Message-ID: ${MESSAGE_ID}
If you see this email in your inbox, the complete path is working:
S3 -> SQS -> Worker -> Forward/Delivery
--
Bay Area Affiliates - Migration Test
EOF
echo " Done ($(wc -c < "$TMP_FILE") bytes)"
# --- Step 2: Upload to S3 ---
echo "[2/3] Uploading to S3: s3://${BUCKET_NAME}/${MESSAGE_ID} ..."
aws s3 cp "$TMP_FILE" "s3://${BUCKET_NAME}/${MESSAGE_ID}" \
--region "$AWS_REGION" \
--quiet
echo " Done"
# --- Step 3: Place SQS message in fake-SNS format ---
echo "[3/3] Placing message in SQS queue..."
QUEUE_URL=$(aws sqs get-queue-url \
--queue-name "$QUEUE_NAME" \
--region "$AWS_REGION" \
--output text \
--query 'QueueUrl')
if [ -z "$QUEUE_URL" ]; then
echo " ERROR: Queue $QUEUE_NAME not found!"
rm -f "$TMP_FILE"
exit 1
fi
# SES event payload (what the Lambda normally produces)
SES_DATA=$(jq -n \
--arg msgId "$MESSAGE_ID" \
--arg source "$FROM_ADDR" \
--arg recipient "$RECIPIENT" \
--arg ts "$TIMESTAMP" \
'{
mail: {
messageId: $msgId,
source: $source,
timestamp: $ts,
destination: [$recipient]
},
receipt: {
recipients: [$recipient],
timestamp: $ts,
action: {
type: "S3",
bucketName: "test",
objectKey: $msgId
}
}
}')
# Fake SNS wrapper (same format as ses_sns_shim_global.py)
SQS_BODY=$(jq -n \
--arg sesData "$SES_DATA" \
--arg ts "$TIMESTAMP" \
'{
Type: "Notification",
MessageId: "test-\(now | tostring)",
TopicArn: "arn:aws:sns:ses-shim:global-topic",
Subject: "Amazon SES Email Receipt Notification",
Message: $sesData,
Timestamp: $ts
}')
SQS_MSG_ID=$(aws sqs send-message \
--queue-url "$QUEUE_URL" \
--region "$AWS_REGION" \
--message-body "$SQS_BODY" \
--output text \
--query 'MessageId')
echo " Done (SQS MessageId: ${SQS_MSG_ID})"
# --- Cleanup ---
rm -f "$TMP_FILE"
echo ""
echo "============================================================"
echo " Test email placed successfully!"
echo "============================================================"
echo ""
echo " Watch worker logs:"
echo " docker logs -f email-worker --tail 50"
echo ""
echo " Expected output:"
echo " Processing: ${MESSAGE_ID:0:20}... -> ${RECIPIENT}"
echo " Forwarded via legacy SMTP ... (if forward rule exists)"
echo " OR"
echo " Delivered to ${RECIPIENT} (if DMS mailbox exists)"
echo ""
echo " Check S3 object:"
echo " aws s3 ls s3://${BUCKET_NAME}/${MESSAGE_ID} --region ${AWS_REGION}"
echo "============================================================"

137
bounces/5.4.1.json Normal file
View File

@@ -0,0 +1,137 @@
{
"version": "0",
"id": "68eb43ad-3ad6-25ef-2b49-2389fc4460cc",
"detail-type": "Email Bounced",
"source": "aws.ses",
"account": "339712845857",
"time": "2025-12-19T02:24:37Z",
"region": "us-east-2",
"resources": [
"arn:aws:ses:us-east-2:339712845857:configuration-set/relay-outbound"
],
"detail": {
"eventType": "Bounce",
"bounce": {
"feedbackId": "010f019b346c64dc-ebd1959f-ac85-4d28-b2c2-e2db414889d2-000000",
"bounceType": "Permanent",
"bounceSubType": "General",
"bouncedRecipients": [
{
"emailAddress": "pishing@paypal.com",
"action": "failed",
"status": "5.0.0",
"diagnosticCode": "smtp; 5.1.0 - Unknown address error 550-'5.4.1 Recipient address rejected: Access denied. For more information see https: //aka.ms/EXOSmtpErrors [DS2PEPF00003441.namprd04.prod.outlook.com 2025-12-19T02:24:36.588Z 08DE3C04B3813774] (delivery attempts: 0)"
}
],
"timestamp": "2025-12-19T02:24:37.521Z",
"reportingMTA": "dns; mx2.paypalcorp.com"
},
"mail": {
"timestamp": "2025-12-19T02:24:34.082Z",
"source": "andreas.knuth@bayarea-cc.com",
"sourceArn": "arn:aws:ses:us-east-2:339712845857:identity/bayarea-cc.com",
"sendingAccountId": "339712845857",
"messageId": "010f019b346c5722-7f94b168-0d66-444c-8333-99f80801ee6e-000000",
"destination": [
"pishing@paypal.com"
],
"headersTruncated": False,
"headers": [
{
"name": "Received",
"value": "from mail.email-srvr.com (mail.email-srvr.com [2.56.188.138]) by email-smtp.amazonaws.com with SMTP (SimpleEmailService-d-4T8YRF3HF) id JWwKWtbMKwPcuMJWmawg for pishing@paypal.com; Fri, 19 Dec 2025 02:24:34 +0000 (UTC)"
},
{
"name": "DKIM-Signature",
"value": "v=1; a=rsa-sha256; c=relaxed/simple; d=bayarea-cc.com; s=mail; t=1766111073; bh=489KasDOSypdn6kagJw8c/vBfll20acGANR7WEnsNq8=; h=From:To:Subject:Reply-To:In-Reply-To:References; b=axFSO5cJaEy+bSCreaVfYY8ThHUvEAJmiVV26Qpw2sZG4YFoYglcNry2Gv2B+99ctJwcTAlxa/XzB0mJzzSpyU7WU0D03Kw/4k+8Mdl0mu+Li8icoINPJ0v5Kap2hVMRVp+ge6w7wAZR+rS46oAvL++piRZYr+85FGiHpFtJIK8e4a06sXtkHB4kDDNTDzKiTM7tTH6/oD4LV3LxeL29notQih5atTUOSo5LHN1QNp5Hq05A4sih7rM6J7CNKIouvqm1ku8I2+xUsgNu0neWnddBDV8njD24Gc70Flab22q5GDqVQ0caql7odpMlrCQjdmAgyEmeVP+JWjB3EnZ3DQ=="
},
{
"name": "Received",
"value": "from app.email-bayarea.com (roundcube-new.mail_network [172.18.0.5]) (Authenticated sender: andreas.knuth@bayarea-cc.com) by mail.email-srvr.com (Postfix) with ESMTPSA id 6CD2F2E60092 for <pishing@paypal.com>; Thu, 18 Dec 2025 20:24:33 -0600 (CST)"
},
{
"name": "MIME-Version",
"value": "1.0"
},
{
"name": "Date",
"value": "Thu, 18 Dec 2025 20:24:33 -0600"
},
{
"name": "From",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "To",
"value": "pishing@paypal.com"
},
{
"name": "Subject",
"value": "Fwd: A one-time merchant setup fee of $249.99 has been applied and will appear on your bank statement wit"
},
{
"name": "Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Mail-Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "In-Reply-To",
"value": "<6061d865685c1bb406c127f32451d22d@bayarea-cc.com>"
},
{
"name": "References",
"value": "<boLgON9OSkmzVWOPwCp8qQ@geopod-ismtpd-45> <6061d865685c1bb406c127f32451d22d@bayarea-cc.com>"
},
{
"name": "Message-ID",
"value": "<bf937f16310bd1be5350425b2dfc3d65@bayarea-cc.com>"
},
{
"name": "X-Sender",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Content-Type",
"value": "multipart/alternative; boundary='=_d6bdf41daf974c2c1b77e9250e4348a7'"
}
],
"commonHeaders": {
"from": [
"andreas.knuth@bayarea-cc.com"
],
"replyTo": [
"andreas.knuth@bayarea-cc.com"
],
"date": "Thu, 18 Dec 2025 20:24:33 -0600",
"to": [
"pishing@paypal.com"
],
"messageId": "010f019b346c5722-7f94b168-0d66-444c-8333-99f80801ee6e-000000",
"subject": "Fwd: A one-time merchant setup fee of $249.99 has been applied and will appear on your bank statement wit"
},
"tags": {
"ses:source-tls-version": [
"TLSv1.3"
],
"ses:operation": [
"SendSmtpEmail"
],
"ses:configuration-set": [
"relay-outbound"
],
"ses:source-ip": [
"2.56.188.138"
],
"ses:from-domain": [
"bayarea-cc.com"
],
"ses:caller-identity": [
"bizmatch.net"
]
}
}
}
}

125
bounces/ooo1.json Normal file
View File

@@ -0,0 +1,125 @@
{
"version": "0",
"id": "b1198c79-d4df-6d77-a472-12c05eb99a39",
"detail-type": "Email Bounced",
"source": "aws.ses",
"account": "339712845857",
"time": "2025-12-19T01:59:01Z",
"region": "us-east-2",
"resources": [
"arn:aws:ses:us-east-2:339712845857:configuration-set/relay-outbound"
],
"detail": {
"eventType": "Bounce",
"bounce": {
"feedbackId": "010f019b3454f3b9-6b92ce4e-e1f2-420b-8dd3-e48e062f0f88-000000",
"bounceType": "Transient",
"bounceSubType": "General",
"bouncedRecipients": [
{
"emailAddress": "frankie@iitwelders.com"
}
],
"timestamp": "2025-12-19T01:59:01.245Z"
},
"mail": {
"timestamp": "2025-12-19T01:58:58.255Z",
"source": "andreas.knuth@bayarea-cc.com",
"sourceArn": "arn:aws:ses:us-east-2:339712845857:identity/bayarea-cc.com",
"sendingAccountId": "339712845857",
"messageId": "010f019b3454e7cf-36b8560d-7880-4913-9e5d-dd87f336b0dd-000000",
"destination": [
"frankie@iitwelders.com"
],
"headersTruncated": False,
"headers": [
{
"name": "Received",
"value": "from mail.email-srvr.com (mail.email-srvr.com [2.56.188.138]) by email-smtp.amazonaws.com with SMTP (SimpleEmailService-d-Z6YSX0FGF) id d7Quc01fG0CsS9eS7yfX for frankie@iitwelders.com; Fri, 19 Dec 2025 01:58:58 +0000 (UTC)"
},
{
"name": "DKIM-Signature",
"value": "v=1; a=rsa-sha256; c=relaxed/simple; d=bayarea-cc.com; s=mail; t=1766109537; bh=S/AVMjQHFbdT0GdJ56RlBKNMvace1V8iv+n0iBHTPYQ=; h=From:To:Subject:Reply-To; b=CX4lHSxen4aqQ5+3mlfl51hmyoK3mkP3gVu9mfILqPaxafH8aXNYfUYBxpRct9sQHNuN2OhgUfdjrTM/75WnKrV50wo13HeKw3D2b3d/N3zj447KG2eAGycm/guNibrcjhduLDERGVwMFaeWAAKHbbWfWnAw68yEFKkcnTCNB1imyAn9diDew5zO9q2ZuA0fOm3YXZ7qFmVtmmX4z6la0Rfa39gEM6wBiOhpZTtODyTqkmABFolVTEqc1VqYH27jB8ZVHi1bO4M42VGoRcDzvjOfkxq5ad/UQeho7HOsLuWnVG7H3BarTom/TdZYMrt2ZllH5N+nf2ec90/lH20CxA=="
},
{
"name": "Received",
"value": "from app.email-bayarea.com (roundcube-new.mail_network [172.18.0.5]) (Authenticated sender: andreas.knuth@bayarea-cc.com) by mail.email-srvr.com (Postfix) with ESMTPSA id EC1B02E5FD51 for <frankie@iitwelders.com>; Thu, 18 Dec 2025 19:58:56 -0600 (CST)"
},
{
"name": "MIME-Version",
"value": "1.0"
},
{
"name": "Date",
"value": "Thu, 18 Dec 2025 19:58:56 -0600"
},
{
"name": "From",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "To",
"value": "Frankie <frankie@iitwelders.com>"
},
{
"name": "Subject",
"value": "12/18/25 7:58"
},
{
"name": "Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Mail-Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Message-ID",
"value": "<17a781e80ecae12285697c536cc46033@bayarea-cc.com>"
},
{
"name": "X-Sender",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Content-Type",
"value": "multipart/alternative; boundary='=_46eb06b0a62a2efa142c40c5eadbbc54'"
}
],
"commonHeaders": {
"from": [
"andreas.knuth@bayarea-cc.com"
],
"replyTo": [
"andreas.knuth@bayarea-cc.com"
],
"date": "Thu, 18 Dec 2025 19:58:56 -0600",
"to": [
"Frankie <frankie@iitwelders.com>"
],
"messageId": "010f019b3454e7cf-36b8560d-7880-4913-9e5d-dd87f336b0dd-000000",
"subject": "12/18/25 7:58"
},
"tags": {
"ses:source-tls-version": [
"TLSv1.3"
],
"ses:operation": [
"SendSmtpEmail"
],
"ses:configuration-set": [
"relay-outbound"
],
"ses:source-ip": [
"2.56.188.138"
],
"ses:from-domain": [
"bayarea-cc.com"
],
"ses:caller-identity": [
"bizmatch.net"
]
}
}
}
}

125
bounces/ooo2.json Normal file
View File

@@ -0,0 +1,125 @@
{
"version": "0",
"id": "4d37ae3d-e411-2b83-8a83-6489a5fa1a00",
"detail-type": "Email Bounced",
"source": "aws.ses",
"account": "339712845857",
"time": "2025-12-19T02:10:33Z",
"region": "us-east-2",
"resources": [
"arn:aws:ses:us-east-2:339712845857:configuration-set/relay-outbound"
],
"detail": {
"eventType": "Bounce",
"bounce": {
"feedbackId": "010f019b345f8461-3382d3a0-42bb-4861-977f-e62606a24cb7-000000",
"bounceType": "Transient",
"bounceSubType": "General",
"bouncedRecipients": [
{
"emailAddress": "remote@gregknoppcpa.com"
}
],
"timestamp": "2025-12-19T02:10:33.636Z"
},
"mail": {
"timestamp": "2025-12-19T02:10:32.560Z",
"source": "andreas.knuth@bayarea-cc.com",
"sourceArn": "arn:aws:ses:us-east-2:339712845857:identity/bayarea-cc.com",
"sendingAccountId": "339712845857",
"messageId": "010f019b345f7ff0-e22c2d38-c499-48ed-8992-abbf1c44b6a1-000000",
"destination": [
"remote@gregknoppcpa.com"
],
"headersTruncated": False,
"headers": [
{
"name": "Received",
"value": "from mail.email-srvr.com (mail.email-srvr.com [2.56.188.138]) by email-smtp.amazonaws.com with SMTP (SimpleEmailService-d-V0JPVCFGF) id 6KbS70pRiY9lOcyjIONV for remote@gregknoppcpa.com; Fri, 19 Dec 2025 02:10:32 +0000 (UTC)"
},
{
"name": "DKIM-Signature",
"value": "v=1; a=rsa-sha256; c=relaxed/simple; d=bayarea-cc.com; s=mail; t=1766110231; bh=sU5OepBQM0PVwu+hgNjl2gP+fBXM9lfNeDiFo9j+0BQ=; h=From:To:Subject:Reply-To; b=lK1PWF722nu9AuCE0SRq7VBVHBrznhyiozlM2kxSSVFVUNHtV4abBKHMPdzE0c6oYN4blSogNMi9/qJA4EKSpoegMHertvETZpHHTM51M083wtzodojc5ZPKoOZjLpjWOVf3oqomccwUxTwqNXmyEdQcUH/lYz52o+b6GFFb7X7MkxQfA0VXgIYL5v0rIKszOoLAour3lfx99uoJSwIIVLZi4f5LFWa+FB48bGH67FaojHRqQzeioMQyLwa9fSKMG/bifT1/jPSmCauRPMSxzsdDBvk0nuVitr8/RgAno8FqfBH+UWJIw8Wt3gVQDLNL82hi5qWUgsXKwY3LFo2LkA=="
},
{
"name": "Received",
"value": "from app.email-bayarea.com (roundcube-new.mail_network [172.18.0.5]) (Authenticated sender: andreas.knuth@bayarea-cc.com) by mail.email-srvr.com (Postfix) with ESMTPSA id D9D3F2E5FD51 for <remote@gregknoppcpa.com>; Thu, 18 Dec 2025 20:10:31 -0600 (CST)"
},
{
"name": "MIME-Version",
"value": "1.0"
},
{
"name": "Date",
"value": "Thu, 18 Dec 2025 20:10:31 -0600"
},
{
"name": "From",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "To",
"value": "remote@gregknoppcpa.com"
},
{
"name": "Subject",
"value": "testing out-of-office messages"
},
{
"name": "Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Mail-Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Message-ID",
"value": "<95264ff6f55b9cc3ffcd451d6b27f7f0@bayarea-cc.com>"
},
{
"name": "X-Sender",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Content-Type",
"value": "multipart/alternative; boundary='=_7ffce281e198378b2420ed61fd6b9156'"
}
],
"commonHeaders": {
"from": [
"andreas.knuth@bayarea-cc.com"
],
"replyTo": [
"andreas.knuth@bayarea-cc.com"
],
"date": "Thu, 18 Dec 2025 20:10:31 -0600",
"to": [
"remote@gregknoppcpa.com"
],
"messageId": "010f019b345f7ff0-e22c2d38-c499-48ed-8992-abbf1c44b6a1-000000",
"subject": "testing out-of-office messages"
},
"tags": {
"ses:source-tls-version": [
"TLSv1.3"
],
"ses:operation": [
"SendSmtpEmail"
],
"ses:configuration-set": [
"relay-outbound"
],
"ses:source-ip": [
"2.56.188.138"
],
"ses:from-domain": [
"bayarea-cc.com"
],
"ses:caller-identity": [
"bizmatch.net"
]
}
}
}
}

133
bounces/paypal.json Normal file
View File

@@ -0,0 +1,133 @@
{
"version": "0",
"id": "ddfd563e-49f6-1f59-6d1e-c67158ab5eec",
"detail-type": "Email Bounced",
"source": "aws.ses",
"account": "339712845857",
"time": "2025-12-19T02:33:55Z",
"region": "us-east-2",
"resources": [
"arn:aws:ses:us-east-2:339712845857:configuration-set/relay-outbound"
],
"detail": {
"eventType": "Bounce",
"bounce": {
"feedbackId": "010f019b3474e821-12fa60c3-e47e-4289-a4b6-47ac55d996a2-000000",
"bounceType": "Undetermined",
"bounceSubType": "Undetermined",
"bouncedRecipients": [
{
"emailAddress": "phishing@paypal.com"
}
],
"timestamp": "2025-12-19T02:33:55.434Z"
},
"mail": {
"timestamp": "2025-12-19T02:33:53.244Z",
"source": "andreas.knuth@bayarea-cc.com",
"sourceArn": "arn:aws:ses:us-east-2:339712845857:identity/bayarea-cc.com",
"sendingAccountId": "339712845857",
"messageId": "010f019b3474df5c-c634e6cc-8ebb-4b13-957e-0e9b84e39917-000000",
"destination": [
"phishing@paypal.com"
],
"headersTruncated": False,
"headers": [
{
"name": "Received",
"value": "from mail.email-srvr.com (mail.email-srvr.com [2.56.188.138]) by email-smtp.amazonaws.com with SMTP (SimpleEmailService-d-V0JPVCFGF) id XSfVNEIjPhLtO2NEYG88 for phishing@paypal.com; Fri, 19 Dec 2025 02:33:53 +0000 (UTC)"
},
{
"name": "DKIM-Signature",
"value": "v=1; a=rsa-sha256; c=relaxed/simple; d=bayarea-cc.com; s=mail; t=1766111632; bh=ycI1TnY3sqcJF4JmY2LCeBTlZ8Zv+aR+7YbjD2Y1n0Y=; h=From:To:Subject:Reply-To:In-Reply-To:References; b=YQ/EtiYxQIi4Ykwx4ELKXP6gd5u+sev5/GnN97t2rkfxFjrGAZHFdUS9IHipOi/KG5VCAbW89ocW6vPZrdC9SpSxrxr+NMncceSBfvun7SgMQM7ja12clsMfOPebbLsp+TEoSwo43QW4IYsNJep8B7OTInTpadABgeiKd+yWe0BLfsa56tGr6OdIcCBKmxXm/qEZoEjkXooYWu0A5yWCrfpfpdvgZTKKaArturPAtiPUcQiUuDRx7jMkDQkofmBNTtrDbmaLzfEbPqfI2usavV7DCDpa70N6/fbVY2RgnFpcDYP3zd1gf4qDGdnsy9+8B848D1QV/HrEVDsh/Opoxw=="
},
{
"name": "Received",
"value": "from app.email-bayarea.com (roundcube-new.mail_network [172.18.0.5]) (Authenticated sender: andreas.knuth@bayarea-cc.com) by mail.email-srvr.com (Postfix) with ESMTPSA id 9685E2E60092 for <phishing@paypal.com>; Thu, 18 Dec 2025 20:33:52 -0600 (CST)"
},
{
"name": "MIME-Version",
"value": "1.0"
},
{
"name": "Date",
"value": "Thu, 18 Dec 2025 20:33:52 -0600"
},
{
"name": "From",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "To",
"value": "phishing@paypal.com"
},
{
"name": "Subject",
"value": "Fwd: A one-time merchant setup fee of $249.99 has been applied and will appear on your bank statement wit"
},
{
"name": "Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Mail-Reply-To",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "In-Reply-To",
"value": "<6061d865685c1bb406c127f32451d22d@bayarea-cc.com>"
},
{
"name": "References",
"value": "<boLgON9OSkmzVWOPwCp8qQ@geopod-ismtpd-45> <6061d865685c1bb406c127f32451d22d@bayarea-cc.com>"
},
{
"name": "Message-ID",
"value": "<e7ec8070400b953e735b6fbe5439fa1e@bayarea-cc.com>"
},
{
"name": "X-Sender",
"value": "andreas.knuth@bayarea-cc.com"
},
{
"name": "Content-Type",
"value": "multipart/alternative; boundary='=_eb88e98e1904b7ce5ebf2be21b8909fd'"
}
],
"commonHeaders": {
"from": [
"andreas.knuth@bayarea-cc.com"
],
"replyTo": [
"andreas.knuth@bayarea-cc.com"
],
"date": "Thu, 18 Dec 2025 20:33:52 -0600",
"to": [
"phishing@paypal.com"
],
"messageId": "010f019b3474df5c-c634e6cc-8ebb-4b13-957e-0e9b84e39917-000000",
"subject": "Fwd: A one-time merchant setup fee of $249.99 has been applied and will appear on your bank statement wit"
},
"tags": {
"ses:source-tls-version": [
"TLSv1.3"
],
"ses:operation": [
"SendSmtpEmail"
],
"ses:configuration-set": [
"relay-outbound"
],
"ses:source-ip": [
"2.56.188.138"
],
"ses:from-domain": [
"bayarea-cc.com"
],
"ses:caller-identity": [
"bizmatch.net"
]
}
}
}
}

2
caddy/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
caddy-data/
caddy-config/

7
caddy/Caddyfile Normal file
View File

@@ -0,0 +1,7 @@
{
email {env.CLOUDFLARE_EMAIL}
acme_dns cloudflare {env.CLOUDFLARE_API_TOKEN}
acme_ca https://acme-v02.api.letsencrypt.org/directory
debug
}
import mail_certs

13
caddy/Dockerfile.caddy Normal file
View File

@@ -0,0 +1,13 @@
# Dockerfile.caddy
ARG CADDY_VERSION=2.9.1
FROM caddy:${CADDY_VERSION}-builder AS builder
# Caddy in exakt dieser Version + Plugins bauen
RUN xcaddy build ${CADDY_VERSION} \
--with github.com/caddy-dns/cloudflare \
--with github.com/caddyserver/replace-response
FROM caddy:${CADDY_VERSION}
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
RUN mkdir -p /var/log/caddy

31
caddy/docker-compose.yml Normal file
View File

@@ -0,0 +1,31 @@
services:
caddy:
image: custom-caddy:2.9.1-rr1
container_name: caddy
build:
context: .
dockerfile: Dockerfile.caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
extra_hosts:
- 'host.docker.internal:host-gateway'
networks:
- mail_network
volumes:
- $PWD/Caddyfile:/etc/caddy/Caddyfile
- $PWD/mail_certs:/etc/caddy/mail_certs
# email_autodiscover entfernt - Snippet ist jetzt in mail_certs eingebettet
# email.mobileconfig.html entfernt - Inhalt ist jetzt inline in mail_certs
- $PWD/email-setup:/var/www/email-setup
- ./caddy-data:/data
- ./caddy-config:/config
- /home/aknuth/log/caddy:/var/log/caddy
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
- CLOUDFLARE_EMAIL=${CLOUDFLARE_EMAIL}
networks:
mail_network:
external: true

BIN
caddy/email-setup/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

View File

@@ -0,0 +1,122 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Email Setup</title>
<script src="https://cdnjs.cloudflare.com/ajax/libs/qrcodejs/1.0.0/qrcode.min.js"></script>
<style>
body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif; background: #f2f2f7; display: flex; justify-content: center; align-items: center; min-height: 100vh; margin: 0; padding: 20px; box-sizing: border-box; }
.card { background: white; padding: 2.5rem; border-radius: 24px; box-shadow: 0 12px 30px rgba(0,0,0,0.1); width: 100%; max-width: 420px; text-align: center; transition: all 0.3s ease; }
.logo { width: 80px; height: 80px; margin-bottom: 1.5rem; }
h1 { margin: 0 0 1rem 0; color: #1a1a1a; font-size: 1.8rem; }
p { color: #666; line-height: 1.5; margin-bottom: 2rem; }
/* Input Section */
#input-section { transition: opacity 0.3s ease; }
input { width: 100%; padding: 16px; margin-bottom: 16px; border: 2px solid #eee; border-radius: 14px; font-size: 16px; box-sizing: border-box; transition: border-color 0.2s; outline: none; }
input:focus { border-color: #007AFF; }
button { width: 100%; padding: 16px; background: #007AFF; color: white; border: none; border-radius: 14px; font-size: 18px; font-weight: 600; cursor: pointer; transition: background 0.2s, transform 0.1s; }
button:hover { background: #0062cc; }
button:active { transform: scale(0.98); }
/* QR Section (initially hidden) */
#qr-section { display: none; opacity: 0; transition: opacity 0.5s ease; }
#qrcode { margin: 2rem auto; padding: 15px; background: white; border-radius: 16px; box-shadow: 0 4px 12px rgba(0,0,0,0.08); display: inline-block; }
#qrcode img { margin: auto; } /* Centers the generated QR code */
.hint { font-size: 0.9rem; color: #888; margin-top: 1.5rem; }
.hint strong { color: #333; }
.error { color: #d32f2f; background: #fde8e8; padding: 10px; border-radius: 8px; font-size: 0.9rem; display: none; margin-bottom: 16px; }
.back-btn { background: transparent; color: #007AFF; margin-top: 1rem; font-size: 16px; }
.back-btn:hover { background: #f0f8ff; }
</style>
</head>
<body>
<div class="card">
<img src="/email-setup/logo.png" alt="Logo" class="logo">
<div id="input-section">
<h1>Email Setup</h1>
<p>Enter your email address to automatically configure your iPhone or iPad.</p>
<div id="error-msg" class="error">Please enter a valid email address.</div>
<input type="email" id="email" placeholder="name@company.com" required autocomplete="email">
<button onclick="generateQR()">Generate QR Code</button>
</div>
<div id="qr-section">
<h1>Scan me!</h1>
<p>Open the <strong>Camera app</strong> on your iPhone and point it at this code.</p>
<div id="qrcode"></div>
<p class="hint">
Tap the banner that appears at the top.<br>
Click <strong>"Allow"</strong> and then go to <strong>Settings</strong> to install the profile.
</p>
<button class="back-btn" onclick="resetForm()">Back</button>
</div>
</div>
<script>
const inputSection = document.getElementById('input-section');
const qrSection = document.getElementById('qr-section');
const emailInput = document.getElementById('email');
const errorMsg = document.getElementById('error-msg');
let qrcode = null;
function generateQR() {
const email = emailInput.value.trim();
if (!email || !email.includes('@') || email.split('@')[1].length < 3) {
errorMsg.style.display = 'block';
emailInput.focus();
return;
}
errorMsg.style.display = 'none';
const domain = email.split('@')[1];
// The magic link
const targetUrl = `https://autodiscover.${domain}/apple?email=${email}`;
// Hide input, show QR
inputSection.style.display = 'none';
qrSection.style.display = 'block';
setTimeout(() => qrSection.style.opacity = '1', 50);
// Generate (or update) QR Code
if (qrcode === null) {
qrcode = new QRCode(document.getElementById("qrcode"), {
text: targetUrl,
width: 200,
height: 200,
colorDark : "#000000",
colorLight : "#ffffff",
correctLevel : QRCode.CorrectLevel.H
});
} else {
qrcode.clear();
qrcode.makeCode(targetUrl);
}
}
function resetForm() {
qrSection.style.opacity = '0';
setTimeout(() => {
qrSection.style.display = 'none';
inputSection.style.display = 'block';
emailInput.value = '';
emailInput.focus();
}, 300);
}
emailInput.addEventListener("keypress", function(event) {
if (event.key === "Enter") generateQR();
});
</script>
</body>
</html>

378
caddy/update-caddy-certs.sh Executable file
View File

@@ -0,0 +1,378 @@
#!/bin/bash
# update-caddy-certs.sh
# Gehört ins Caddy-Verzeichnis (neben dem Caddyfile).
#
# Liest alle Domains aus dem DMS und generiert die Wildcard-Cert-Blöcke
# für Caddy in die Datei "mail_certs" (per "import mail_certs" im Caddyfile).
#
# Generiert pro Domain:
# - Wildcard-Cert Block (*.domain + domain)
# - Webmail Block (reverse_proxy zu Roundcube)
# - Autodiscover/Autoconfig Block (importiert email_settings Snippet)
# - Email-Setup Block (QR-Code Seite für iPhone)
#
# Bei neuen Domains: Script erneut laufen lassen + caddy reload.
#
# Usage:
# ./update-caddy-certs.sh
# DRY_RUN=true ./update-caddy-certs.sh
# DMS_CONTAINER=mailserver CADDY_CONTAINER=caddy ./update-caddy-certs.sh
set -e
DMS_CONTAINER=${DMS_CONTAINER:-"mailserver"}
CADDY_CONTAINER=${CADDY_CONTAINER:-"caddy"}
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
OUTPUT_FILE="$SCRIPT_DIR/mail_certs"
DRY_RUN=${DRY_RUN:-"false"}
# Node-Hostname des Mailservers (für Default-Cert Block)
NODE_HOSTNAME=${NODE_HOSTNAME:-"node1.email-srvr.com"}
echo "============================================================"
echo " 📜 Caddy Wildcard-Cert Konfig Generator"
echo " DMS Container: $DMS_CONTAINER"
echo " Caddy Container: $CADDY_CONTAINER"
echo " Output: $OUTPUT_FILE"
echo " Node Hostname: $NODE_HOSTNAME"
[ "$DRY_RUN" = "true" ] && echo " ⚠️ DRY RUN - Keine Dateien werden geschrieben"
echo "============================================================"
# --- Domains aus DMS lesen ---
echo ""
echo "📋 Lese Domains aus DMS..."
DOMAINS=$(docker exec "$DMS_CONTAINER" setup email list 2>/dev/null \
| grep -oP '(?<=@)[^\s]+' \
| sort -u)
if [ -z "$DOMAINS" ]; then
echo "⚠️ Keine DMS-Accounts gefunden. Nur Node-Hostname wird eingetragen."
fi
if [ -n "$DOMAINS" ]; then
echo " Gefundene Domains:"
for d in $DOMAINS; do echo " - $d"; done
fi
# --- Konfig generieren ---
echo ""
echo "📝 Generiere Caddy-Konfiguration..."
OUTPUT=""
OUTPUT="${OUTPUT}# mail_certs - Automatisch generiert von update-caddy-certs.sh\n"
OUTPUT="${OUTPUT}# Wildcard-Zertifikate + Webmail + Autodiscover für DMS-Domains.\n"
OUTPUT="${OUTPUT}# Einbinden im Caddyfile: import mail_certs\n"
OUTPUT="${OUTPUT}# Generiert: $(date)\n"
OUTPUT="${OUTPUT}\n"
# =====================================================================
# Autodiscover/Autoconfig Snippet (dynamisch)
# {labels.1}.{labels.0} = Basisdomain aus Hostname
# =====================================================================
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}# Autodiscover/Autoconfig Snippet (dynamisch)\n"
OUTPUT="${OUTPUT}# {labels.1}.{labels.0} = Basisdomain aus Hostname\n"
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}(email_settings) {\n"
# --- 1. Outlook Classic Autodiscover (POST + GET XML) ---
OUTPUT="${OUTPUT} # Outlook Autodiscover (XML) - POST und GET\n"
OUTPUT="${OUTPUT} route /autodiscover/autodiscover.xml {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/xml\"\n"
OUTPUT="${OUTPUT} respond \`<?xml version=\"1.0\" encoding=\"utf-8\"?>\n"
OUTPUT="${OUTPUT}<Autodiscover xmlns=\"http://schemas.microsoft.com/exchange/autodiscover/responseschema/2006\">\n"
OUTPUT="${OUTPUT} <Response xmlns=\"http://schemas.microsoft.com/exchange/autodiscover/outlook/responseschema/2006a\">\n"
OUTPUT="${OUTPUT} <Account>\n"
OUTPUT="${OUTPUT} <AccountType>email</AccountType>\n"
OUTPUT="${OUTPUT} <Action>settings</Action>\n"
OUTPUT="${OUTPUT} <Protocol>\n"
OUTPUT="${OUTPUT} <Type>IMAP</Type>\n"
OUTPUT="${OUTPUT} <Server>imap.{labels.1}.{labels.0}</Server>\n"
OUTPUT="${OUTPUT} <Port>993</Port>\n"
OUTPUT="${OUTPUT} <DomainRequired>off</DomainRequired>\n"
OUTPUT="${OUTPUT} <LoginName></LoginName>\n"
OUTPUT="${OUTPUT} <SPA>off</SPA>\n"
OUTPUT="${OUTPUT} <SSL>on</SSL>\n"
OUTPUT="${OUTPUT} <AuthRequired>on</AuthRequired>\n"
OUTPUT="${OUTPUT} </Protocol>\n"
OUTPUT="${OUTPUT} <Protocol>\n"
OUTPUT="${OUTPUT} <Type>SMTP</Type>\n"
OUTPUT="${OUTPUT} <Server>smtp.{labels.1}.{labels.0}</Server>\n"
OUTPUT="${OUTPUT} <Port>465</Port>\n"
OUTPUT="${OUTPUT} <DomainRequired>off</DomainRequired>\n"
OUTPUT="${OUTPUT} <LoginName></LoginName>\n"
OUTPUT="${OUTPUT} <SPA>off</SPA>\n"
OUTPUT="${OUTPUT} <SSL>on</SSL>\n"
OUTPUT="${OUTPUT} <AuthRequired>on</AuthRequired>\n"
OUTPUT="${OUTPUT} </Protocol>\n"
OUTPUT="${OUTPUT} </Account>\n"
OUTPUT="${OUTPUT} </Response>\n"
OUTPUT="${OUTPUT}</Autodiscover>\` 200\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}\n"
# --- 2. Outlook New / Microsoft 365 (JSON v2) ---
# Outlook New sendet GET auf /autodiscover/autodiscover.json?Protocol=AutodiscoverV1&...
# Antwort muss den XML-Endpoint zurückgeben
OUTPUT="${OUTPUT} # Outlook New/365 (JSON → Redirect zu XML)\n"
OUTPUT="${OUTPUT} route /autodiscover/autodiscover.json {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/json\"\n"
OUTPUT="${OUTPUT} respond \`{\"Protocol\":\"AutodiscoverV1\",\"Url\":\"https://autodiscover.{labels.1}.{labels.0}/autodiscover/autodiscover.xml\"}\` 200\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}\n"
# --- 3. Thunderbird Autoconfig ---
OUTPUT="${OUTPUT} # Thunderbird Autoconfig\n"
OUTPUT="${OUTPUT} route /mail/config-v1.1.xml {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/xml\"\n"
OUTPUT="${OUTPUT} respond \`<?xml version=\"1.0\"?>\n"
OUTPUT="${OUTPUT}<clientConfig version=\"1.1\">\n"
OUTPUT="${OUTPUT} <emailProvider id=\"{labels.1}.{labels.0}\">\n"
OUTPUT="${OUTPUT} <displayName>{labels.1}.{labels.0} Mail</displayName>\n"
OUTPUT="${OUTPUT} <domain>{labels.1}.{labels.0}</domain>\n"
OUTPUT="${OUTPUT} <incomingServer type=\"imap\">\n"
OUTPUT="${OUTPUT} <hostname>imap.{labels.1}.{labels.0}</hostname>\n"
OUTPUT="${OUTPUT} <port>993</port>\n"
OUTPUT="${OUTPUT} <socketType>SSL</socketType>\n"
OUTPUT="${OUTPUT} <authentication>password-cleartext</authentication>\n"
OUTPUT="${OUTPUT} <username>%%EMAILADDRESS%%</username>\n"
OUTPUT="${OUTPUT} </incomingServer>\n"
OUTPUT="${OUTPUT} <outgoingServer type=\"smtp\">\n"
OUTPUT="${OUTPUT} <hostname>smtp.{labels.1}.{labels.0}</hostname>\n"
OUTPUT="${OUTPUT} <port>465</port>\n"
OUTPUT="${OUTPUT} <socketType>SSL</socketType>\n"
OUTPUT="${OUTPUT} <authentication>password-cleartext</authentication>\n"
OUTPUT="${OUTPUT} <username>%%EMAILADDRESS%%</username>\n"
OUTPUT="${OUTPUT} </outgoingServer>\n"
OUTPUT="${OUTPUT} </emailProvider>\n"
OUTPUT="${OUTPUT}</clientConfig>\` 200\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}\n"
# --- 4. Apple MobileConfig (inline, wie Autodiscover/Autoconfig) ---
OUTPUT="${OUTPUT} # Apple MobileConfig (inline respond)\n"
OUTPUT="${OUTPUT} route /apple {\n"
OUTPUT="${OUTPUT} header Content-Type \"application/x-apple-aspen-config; charset=utf-8\"\n"
OUTPUT="${OUTPUT} header Content-Disposition \"attachment; filename=email.mobileconfig\"\n"
OUTPUT="${OUTPUT} respond \`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
OUTPUT="${OUTPUT}<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
OUTPUT="${OUTPUT}<plist version=\"1.0\">\n"
OUTPUT="${OUTPUT}<dict>\n"
OUTPUT="${OUTPUT} <key>PayloadContent</key>\n"
OUTPUT="${OUTPUT} <array>\n"
OUTPUT="${OUTPUT} <dict>\n"
OUTPUT="${OUTPUT} <key>EmailAccountDescription</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>EmailAccountName</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>EmailAccountType</key>\n"
OUTPUT="${OUTPUT} <string>EmailTypeIMAP</string>\n"
OUTPUT="${OUTPUT} <key>EmailAddress</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerAuthentication</key>\n"
OUTPUT="${OUTPUT} <string>EmailAuthPassword</string>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerHostName</key>\n"
OUTPUT="${OUTPUT} <string>imap.{labels.1}.{labels.0}</string>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerPortNumber</key>\n"
OUTPUT="${OUTPUT} <integer>993</integer>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerUseSSL</key>\n"
OUTPUT="${OUTPUT} <true/>\n"
OUTPUT="${OUTPUT} <key>IncomingMailServerUsername</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerAuthentication</key>\n"
OUTPUT="${OUTPUT} <string>EmailAuthPassword</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerHostName</key>\n"
OUTPUT="${OUTPUT} <string>smtp.{labels.1}.{labels.0}</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerPortNumber</key>\n"
OUTPUT="${OUTPUT} <integer>465</integer>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerUseSSL</key>\n"
OUTPUT="${OUTPUT} <true/>\n"
OUTPUT="${OUTPUT} <key>OutgoingMailServerUsername</key>\n"
OUTPUT="${OUTPUT} <string>{query.email}</string>\n"
OUTPUT="${OUTPUT} <key>OutgoingPasswordRequired</key>\n"
OUTPUT="${OUTPUT} <true/>\n"
OUTPUT="${OUTPUT} <key>PayloadDescription</key>\n"
OUTPUT="${OUTPUT} <string>E-Mail Konfiguration</string>\n"
OUTPUT="${OUTPUT} <key>PayloadDisplayName</key>\n"
OUTPUT="${OUTPUT} <string>{labels.1}.{labels.0} E-Mail</string>\n"
OUTPUT="${OUTPUT} <key>PayloadIdentifier</key>\n"
OUTPUT="${OUTPUT} <string>com.{labels.1}.{labels.0}.email.account</string>\n"
OUTPUT="${OUTPUT} <key>PayloadType</key>\n"
OUTPUT="${OUTPUT} <string>com.apple.mail.managed</string>\n"
OUTPUT="${OUTPUT} <key>PayloadUUID</key>\n"
OUTPUT="${OUTPUT} <string>A1B2C3D4-E5F6-7890-ABCD-EF1234567890</string>\n"
OUTPUT="${OUTPUT} <key>PayloadVersion</key>\n"
OUTPUT="${OUTPUT} <integer>1</integer>\n"
OUTPUT="${OUTPUT} </dict>\n"
OUTPUT="${OUTPUT} </array>\n"
OUTPUT="${OUTPUT} <key>PayloadDescription</key>\n"
OUTPUT="${OUTPUT} <string>E-Mail Einrichtung</string>\n"
OUTPUT="${OUTPUT} <key>PayloadDisplayName</key>\n"
OUTPUT="${OUTPUT} <string>{labels.1}.{labels.0} E-Mail</string>\n"
OUTPUT="${OUTPUT} <key>PayloadIdentifier</key>\n"
OUTPUT="${OUTPUT} <string>com.{labels.1}.{labels.0}.email.profile</string>\n"
OUTPUT="${OUTPUT} <key>PayloadOrganization</key>\n"
OUTPUT="${OUTPUT} <string>Bay Area Affiliates, Inc.</string>\n"
OUTPUT="${OUTPUT} <key>PayloadRemovalDisallowed</key>\n"
OUTPUT="${OUTPUT} <false/>\n"
OUTPUT="${OUTPUT} <key>PayloadType</key>\n"
OUTPUT="${OUTPUT} <string>Configuration</string>\n"
OUTPUT="${OUTPUT} <key>PayloadUUID</key>\n"
OUTPUT="${OUTPUT} <string>F0E1D2C3-B4A5-6789-0FED-CBA987654321</string>\n"
OUTPUT="${OUTPUT} <key>PayloadVersion</key>\n"
OUTPUT="${OUTPUT} <integer>1</integer>\n"
OUTPUT="${OUTPUT}</dict>\n"
OUTPUT="${OUTPUT}</plist>\` 200\n"
OUTPUT="${OUTPUT} }\n"
# --- 5. Samsung Email (nutzt ebenfalls autoconfig, kein extra Block nötig) ---
# Samsung Email-App versucht:
# 1. https://autoconfig.<domain>/mail/config-v1.1.xml (= Thunderbird-Format, schon abgedeckt)
# 2. Alternativ: Outlook Autodiscover XML
# → Kein separater Block erforderlich.
OUTPUT="${OUTPUT}}\n\n"
# =====================================================================
# Email-Setup Snippet (QR-Code Seite für iPhone)
# =====================================================================
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}# Email-Setup Snippet (QR-Code Seite)\n"
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}(email_setup_page) {\n"
OUTPUT="${OUTPUT} route /email-setup* {\n"
OUTPUT="${OUTPUT} uri strip_prefix /email-setup\n"
OUTPUT="${OUTPUT} root * /var/www/email-setup\n"
OUTPUT="${OUTPUT} try_files {path} /setup.html\n"
OUTPUT="${OUTPUT} file_server\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}}\n\n"
# Node-Hostname immer als erstes (Default-Cert des DMS)
echo " → Node-Hostname Block: $NODE_HOSTNAME"
OUTPUT="${OUTPUT}# Node-Hostname (Default-Cert für DMS Fallback)\n"
OUTPUT="${OUTPUT}${NODE_HOSTNAME} {\n"
OUTPUT="${OUTPUT} tls {\n"
OUTPUT="${OUTPUT} dns cloudflare {env.CLOUDFLARE_API_TOKEN}\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT} respond \"OK\" 200\n"
OUTPUT="${OUTPUT}}\n\n"
# Wildcard-Blocks + Webmail + Autodiscover pro Kundendomain
for domain in $DOMAINS; do
echo " → Wildcard Block: *.${domain}"
echo " → Webmail Block: webmail.${domain}"
echo " → Autodiscover Block: autodiscover.${domain}, autoconfig.${domain}"
echo " → Email-Setup Block: webmail.${domain}/email-setup"
# Wildcard-Cert Block (für Cert-Generierung + Fallback)
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n"
OUTPUT="${OUTPUT}# ${domain}\n"
OUTPUT="${OUTPUT}# ═══════════════════════════════════════════════\n\n"
OUTPUT="${OUTPUT}# Wildcard-Cert für $domain\n"
OUTPUT="${OUTPUT}*.${domain}, ${domain} {\n"
OUTPUT="${OUTPUT} tls {\n"
OUTPUT="${OUTPUT} dns cloudflare {env.CLOUDFLARE_API_TOKEN}\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT} respond \"OK\" 200\n"
OUTPUT="${OUTPUT}}\n\n"
# Webmail Block (Roundcube + Email-Setup)
OUTPUT="${OUTPUT}# Roundcube Webmail + Email-Setup für $domain\n"
OUTPUT="${OUTPUT}webmail.${domain} {\n"
OUTPUT="${OUTPUT} import email_setup_page\n"
OUTPUT="${OUTPUT} reverse_proxy roundcube:80\n"
OUTPUT="${OUTPUT} encode gzip\n"
OUTPUT="${OUTPUT} log {\n"
OUTPUT="${OUTPUT} output stderr\n"
OUTPUT="${OUTPUT} format console\n"
OUTPUT="${OUTPUT} }\n"
OUTPUT="${OUTPUT}}\n\n"
# Autodiscover / Autoconfig Block
OUTPUT="${OUTPUT}# Autodiscover/Autoconfig für $domain\n"
OUTPUT="${OUTPUT}autodiscover.${domain}, autoconfig.${domain} {\n"
OUTPUT="${OUTPUT} import email_settings\n"
OUTPUT="${OUTPUT} respond \"Autodiscover Service Online\" 200\n"
OUTPUT="${OUTPUT}}\n\n"
done
# --- Ausgabe ---
if [ "$DRY_RUN" = "true" ]; then
echo ""
echo "--- VORSCHAU ---"
printf '%b' "$OUTPUT"
echo "--- ENDE ---"
else
printf '%b' "$OUTPUT" > "$OUTPUT_FILE"
echo " ✅ Geschrieben: $OUTPUT_FILE"
fi
# --- Import im Caddyfile prüfen ---
CADDYFILE="$SCRIPT_DIR/Caddyfile"
if [ -f "$CADDYFILE" ]; then
if grep -q "import mail_certs" "$CADDYFILE"; then
echo " ✅ 'import mail_certs' bereits im Caddyfile vorhanden."
else
echo ""
echo "⚠️ AKTION: 'import mail_certs' fehlt noch im Caddyfile!"
echo " Bitte nach dem globalen {} Block eintragen:"
echo ""
echo " { ← globaler Block"
echo " email {env.CLOUDFLARE_EMAIL}"
echo " ..."
echo " }"
echo " import mail_certs ← hier einfügen"
fi
# Prüfe ob alte email_autodiscover Referenz entfernt werden kann
if grep -q "import email_autodiscover" "$CADDYFILE"; then
echo ""
echo "⚠️ AUFRÄUMEN: 'import email_autodiscover' im Caddyfile gefunden!"
echo " Das Snippet (email_settings) ist jetzt in mail_certs eingebettet."
echo " Bitte 'import email_autodiscover' aus dem Caddyfile entfernen."
fi
fi
# --- Prüfe ob alte Dateien noch existieren ---
if [ -f "$SCRIPT_DIR/email_autodiscover" ]; then
echo ""
echo "⚠️ AUFRÄUMEN: Datei 'email_autodiscover' kann entfernt werden!"
echo " Das Snippet ist jetzt in mail_certs eingebettet."
fi
if [ -f "$SCRIPT_DIR/email-setup/autodiscover.xml" ]; then
echo ""
echo "⚠️ AUFRÄUMEN: 'email-setup/autodiscover.xml' kann entfernt werden!"
echo " Statische XML wird nicht mehr benötigt (dynamisch über Caddy)."
fi
echo ""
echo "============================================================"
echo "🔄 Nächste Schritte:"
echo ""
echo "1. Caddy Konfiguration validieren:"
echo " docker exec $CADDY_CONTAINER caddy validate --config /etc/caddy/Caddyfile"
echo ""
echo "2. Caddy neu laden (kein Downtime):"
echo " docker exec $CADDY_CONTAINER caddy reload --config /etc/caddy/Caddyfile"
echo ""
echo "3. Cert-Generierung verfolgen (~30s pro Domain):"
echo " docker logs -f $CADDY_CONTAINER 2>&1 | grep -i 'certificate\|acme\|tls\|error'"
echo ""
echo "4. Autodiscover testen:"
for domain in $DOMAINS; do
echo " # Thunderbird:"
echo " curl -s https://autoconfig.${domain}/mail/config-v1.1.xml | head -10"
echo " # Outlook:"
echo " curl -s https://autodiscover.${domain}/autodiscover/autodiscover.xml | head -10"
echo " # Apple (sollte .mobileconfig liefern):"
echo " curl -sI \"https://autodiscover.${domain}/apple?email=test@${domain}\""
echo ""
done
echo "5. iPhone Email-Setup QR-Code Seite:"
for domain in $DOMAINS; do
echo " https://webmail.${domain}/email-setup"
done
echo "============================================================"

View File

@@ -1,76 +0,0 @@
#!/bin/bash
# create-queue.sh
# Usage: DOMAIN=andreasknuth.de ./create-queue.sh
set -e
AWS_REGION="us-east-2"
# Domain aus Environment Variable
if [ -z "$DOMAIN" ]; then
echo "Error: DOMAIN environment variable not set"
echo "Usage: DOMAIN=andreasknuth.de $0"
exit 1
fi
QUEUE_NAME="${DOMAIN//./-}-queue"
DLQ_NAME="${QUEUE_NAME}-dlq"
echo "========================================"
echo "Creating SQS Queue for Email Delivery"
echo "========================================"
echo ""
echo "📧 Domain: $DOMAIN"
echo " Region: $AWS_REGION"
echo ""
# Dead Letter Queue erstellen
echo "Creating DLQ: $DLQ_NAME"
DLQ_URL=$(aws sqs create-queue \
--queue-name "${DLQ_NAME}" \
--region "${AWS_REGION}" \
--attributes '{
"MessageRetentionPeriod": "1209600"
}' \
--query 'QueueUrl' \
--output text 2>/dev/null || aws sqs get-queue-url --queue-name "${DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
echo " ✓ DLQ URL: ${DLQ_URL}"
# DLQ ARN ermitteln
DLQ_ARN=$(aws sqs get-queue-attributes \
--queue-url "${DLQ_URL}" \
--region "${AWS_REGION}" \
--attribute-names QueueArn \
--query 'Attributes.QueueArn' \
--output text)
echo " ✓ DLQ ARN: ${DLQ_ARN}"
echo ""
# Haupt-Queue erstellen mit Redrive Policy
echo "Creating Main Queue: $QUEUE_NAME"
QUEUE_URL=$(aws sqs create-queue \
--queue-name "${QUEUE_NAME}" \
--region "${AWS_REGION}" \
--attributes "{
\"VisibilityTimeout\": \"300\",
\"MessageRetentionPeriod\": \"86400\",
\"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \
--query 'QueueUrl' \
--output text 2>/dev/null || aws sqs get-queue-url --queue-name "${QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
echo " ✓ Queue URL: ${QUEUE_URL}"
echo ""
echo "========================================"
echo "✅ Queue created successfully!"
echo "========================================"
echo ""
echo "Configuration:"
echo " Domain: $DOMAIN"
echo " Queue: $QUEUE_NAME"
echo " Queue URL: $QUEUE_URL"
echo " DLQ: $DLQ_NAME"
echo " Region: $AWS_REGION"

View File

@@ -1,50 +0,0 @@
services:
worker:
image: python:3.11-slim
container_name: email-worker-${WORKER_DOMAIN}
restart: unless-stopped
network_mode: host # Zugriff auf lokales Netzwerk für Postfix
# Worker-Code mounten
volumes:
- ./worker.py:/app/worker.py:ro
working_dir: /app
# Python Dependencies installieren und Worker starten
command: >
sh -c "pip install --no-cache-dir boto3 &&
python -u worker.py"
environment:
# ⚠️ WICHTIG: WORKER_DOMAIN muss von außen gesetzt werden!
- WORKER_DOMAIN=${WORKER_DOMAIN}
# AWS Credentials
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY}
# Worker Settings
- POLL_INTERVAL=${POLL_INTERVAL:-20}
- MAX_MESSAGES=${MAX_MESSAGES:-10}
- VISIBILITY_TIMEOUT=${VISIBILITY_TIMEOUT:-300}
# SMTP Configuration
- SMTP_HOST=${SMTP_HOST:-localhost}
- SMTP_PORT=${SMTP_PORT:-25}
- SMTP_USE_TLS=${SMTP_USE_TLS:-false}
- SMTP_USER=${SMTP_USER:-}
- SMTP_PASS=${SMTP_PASS:-}
logging:
driver: "json-file"
options:
max-size: "10m"
max-file: "5"
healthcheck:
test: ["CMD", "pgrep", "-f", "worker.py"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s

View File

@@ -0,0 +1,38 @@
# AWS credentials (or use IAM role / instance profile)
AWS_REGION=us-east-2
# AWS_ACCESS_KEY_ID=
# AWS_SECRET_ACCESS_KEY=
# Domains: comma-separated list OR file path
# DOMAINS=andreasknuth.de,bizmatch.net
DOMAINS_FILE=/etc/email-worker/domains.txt
# SMTP (Docker Mail Server)
SMTP_HOST=localhost
SMTP_PORT=25
SMTP_USE_TLS=false
SMTP_USER=
SMTP_PASS=
SMTP_POOL_SIZE=5
# Internal SMTP port (bypass transport_maps)
INTERNAL_SMTP_PORT=25
# Worker settings
WORKER_THREADS=10
POLL_INTERVAL=20
MAX_MESSAGES=10
VISIBILITY_TIMEOUT=300
# DynamoDB tables
DYNAMODB_RULES_TABLE=email-rules
DYNAMODB_MESSAGES_TABLE=ses-outbound-messages
DYNAMODB_BLOCKED_TABLE=email-blocked-senders
# Bounce handling
BOUNCE_LOOKUP_RETRIES=3
BOUNCE_LOOKUP_DELAY=1.0
# Monitoring
METRICS_PORT=8000
HEALTH_PORT=8080

View File

@@ -0,0 +1,34 @@
# ── Build stage ──────────────────────────────────────────────────
FROM node:20-slim AS builder
WORKDIR /app
COPY package.json package-lock.json* ./
RUN npm ci
COPY tsconfig.json ./
COPY src/ ./src/
RUN npx tsc
# ── Run stage ────────────────────────────────────────────────────
FROM node:20-slim
WORKDIR /app
# Only production deps
COPY package.json package-lock.json* ./
RUN npm ci --omit=dev && npm cache clean --force
# Compiled JS from build stage
COPY --from=builder /app/dist ./dist
# Config directory (mount domains.txt here)
RUN mkdir -p /etc/email-worker /var/log/email-worker
EXPOSE 8000 8080
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
CMD node -e "fetch('http://localhost:8080').then(r => r.ok ? process.exit(0) : process.exit(1)).catch(() => process.exit(1))"
CMD ["node", "dist/main.js"]

View File

@@ -0,0 +1,21 @@
services:
email-worker:
build: .
container_name: email-worker-ts
restart: unless-stopped
env_file: .env
volumes:
- ./domains.txt:/etc/email-worker/domains.txt:ro
- ./logs:/var/log/email-worker
ports:
- "9000:8000" # Prometheus metrics (Host:Container)
- "9090:8080" # Health check (Host:Container)
# Connect to DMS on the host or Docker network
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- SMTP_HOST=host.docker.internal
- SMTP_PORT=25
volumes:
worker-logs:

3190
email-worker-nodejs/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,37 @@
{
"name": "unified-email-worker",
"version": "2.0.0",
"description": "Unified multi-domain email worker (TypeScript)",
"main": "dist/main.js",
"scripts": {
"build": "tsc",
"start": "node dist/main.js",
"dev": "tsx src/main.ts",
"lint": "eslint src/",
"typecheck": "tsc --noEmit"
},
"dependencies": {
"@aws-sdk/client-dynamodb": "^3.700.0",
"@aws-sdk/client-s3": "^3.700.0",
"@aws-sdk/client-ses": "^3.700.0",
"@aws-sdk/client-sqs": "^3.700.0",
"@aws-sdk/lib-dynamodb": "^3.700.0",
"mailparser": "^3.7.1",
"nodemailer": "^6.9.16",
"picomatch": "^4.0.2",
"pino": "^9.5.0",
"pino-pretty": "^13.0.0",
"prom-client": "^15.1.3"
},
"devDependencies": {
"@types/mailparser": "^3.4.5",
"@types/nodemailer": "^6.4.17",
"@types/picomatch": "^3.0.1",
"@types/node": "^22.10.0",
"tsx": "^4.19.0",
"typescript": "^5.7.0"
},
"engines": {
"node": ">=20.0.0"
}
}

View File

@@ -0,0 +1,230 @@
/**
* DynamoDB operations handler
*
* Tables:
* - email-rules → OOO / Forward rules per address
* - ses-outbound-messages → Bounce info (MessageId → original sender)
* - email-blocked-senders → Blocked patterns per address
*/
import { DynamoDBClient } from '@aws-sdk/client-dynamodb';
import {
DynamoDBDocumentClient,
GetCommand,
BatchGetCommand,
} from '@aws-sdk/lib-dynamodb';
import { config } from '../config.js';
import { log } from '../logger.js';
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
export interface EmailRule {
email_address: string;
ooo_active?: boolean;
ooo_message?: string;
ooo_content_type?: string;
forwards?: string[];
[key: string]: unknown;
}
export interface BounceInfo {
original_source: string;
bounceType: string;
bounceSubType: string;
bouncedRecipients: string[];
timestamp: string;
}
// ---------------------------------------------------------------------------
// Handler
// ---------------------------------------------------------------------------
export class DynamoDBHandler {
private docClient: DynamoDBDocumentClient;
public available = false;
constructor() {
const raw = new DynamoDBClient({ region: config.awsRegion });
this.docClient = DynamoDBDocumentClient.from(raw, {
marshallOptions: { removeUndefinedValues: true },
});
this.initialize();
}
// -----------------------------------------------------------------------
// Init
// -----------------------------------------------------------------------
private initialize(): void {
// We just mark as available; actual connectivity is tested on first call.
// The Python version tested table_status, but that's a DescribeTable call
// which is heavy and not needed the first GetItem will tell us.
this.available = true;
log('✓ DynamoDB client initialized');
}
/**
* Verify tables exist by doing a cheap GetItem on each.
* Called once during startup.
*/
async verifyTables(): Promise<boolean> {
try {
await Promise.all([
this.docClient.send(
new GetCommand({ TableName: config.rulesTable, Key: { email_address: '__probe__' } }),
),
this.docClient.send(
new GetCommand({ TableName: config.messagesTable, Key: { MessageId: '__probe__' } }),
),
this.docClient.send(
new GetCommand({ TableName: config.blockedTable, Key: { email_address: '__probe__' } }),
),
]);
this.available = true;
log('✓ DynamoDB tables connected successfully');
return true;
} catch (err: any) {
log(`⚠ DynamoDB not fully available: ${err.message ?? err}`, 'WARNING');
this.available = false;
return false;
}
}
// -----------------------------------------------------------------------
// Email rules
// -----------------------------------------------------------------------
async getEmailRules(emailAddress: string): Promise<EmailRule | null> {
if (!this.available) return null;
try {
const resp = await this.docClient.send(
new GetCommand({
TableName: config.rulesTable,
Key: { email_address: emailAddress },
}),
);
return (resp.Item as EmailRule) ?? null;
} catch (err: any) {
if (err.name !== 'ResourceNotFoundException') {
log(`⚠ DynamoDB error for ${emailAddress}: ${err.message ?? err}`, 'ERROR');
}
return null;
}
}
// -----------------------------------------------------------------------
// Bounce info
// -----------------------------------------------------------------------
async getBounceInfo(
messageId: string,
workerName = 'unified',
): Promise<BounceInfo | null> {
if (!this.available) return null;
for (let attempt = 0; attempt < config.bounceLookupRetries; attempt++) {
try {
const resp = await this.docClient.send(
new GetCommand({
TableName: config.messagesTable,
Key: { MessageId: messageId },
}),
);
if (resp.Item) {
return {
original_source: (resp.Item.original_source as string) ?? '',
bounceType: (resp.Item.bounceType as string) ?? 'Unknown',
bounceSubType: (resp.Item.bounceSubType as string) ?? 'Unknown',
bouncedRecipients: (resp.Item.bouncedRecipients as string[]) ?? [],
timestamp: (resp.Item.timestamp as string) ?? '',
};
}
if (attempt < config.bounceLookupRetries - 1) {
log(
` Bounce record not found yet, retrying in ${config.bounceLookupDelay}s ` +
`(attempt ${attempt + 1}/${config.bounceLookupRetries})...`,
'INFO',
workerName,
);
await sleep(config.bounceLookupDelay * 1000);
} else {
log(
`⚠ No bounce record found after ${config.bounceLookupRetries} attempts ` +
`for Message-ID: ${messageId}`,
'WARNING',
workerName,
);
return null;
}
} catch (err: any) {
log(
`⚠ DynamoDB Error (attempt ${attempt + 1}/${config.bounceLookupRetries}): ` +
`${err.message ?? err}`,
'ERROR',
workerName,
);
if (attempt < config.bounceLookupRetries - 1) {
await sleep(config.bounceLookupDelay * 1000);
} else {
return null;
}
}
}
return null;
}
// -----------------------------------------------------------------------
// Blocked senders
// -----------------------------------------------------------------------
async getBlockedPatterns(emailAddress: string): Promise<string[]> {
if (!this.available) return [];
try {
const resp = await this.docClient.send(
new GetCommand({
TableName: config.blockedTable,
Key: { email_address: emailAddress },
}),
);
return (resp.Item?.blocked_patterns as string[]) ?? [];
} catch (err: any) {
log(`⚠ Error getting block list for ${emailAddress}: ${err.message ?? err}`, 'ERROR');
return [];
}
}
async batchGetBlockedPatterns(
emailAddresses: string[],
): Promise<Record<string, string[]>> {
const empty: Record<string, string[]> = {};
for (const a of emailAddresses) empty[a] = [];
if (!this.available || emailAddresses.length === 0) return empty;
try {
const keys = emailAddresses.map((a) => ({ email_address: a }));
const resp = await this.docClient.send(
new BatchGetCommand({
RequestItems: {
[config.blockedTable]: { Keys: keys },
},
}),
);
const items = resp.Responses?.[config.blockedTable] ?? [];
const result: Record<string, string[]> = { ...empty };
for (const item of items) {
const addr = item.email_address as string;
result[addr] = (item.blocked_patterns as string[]) ?? [];
}
return result;
} catch (err: any) {
log(`⚠ Batch blocklist check error: ${err.message ?? err}`, 'ERROR');
return empty;
}
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}

View File

@@ -0,0 +1,202 @@
/**
* S3 operations handler
*
* Responsibilities:
* - Download raw email from domain-specific bucket
* - Mark email metadata (processed / all-invalid / blocked)
* - Delete blocked emails
*/
import {
S3Client,
GetObjectCommand,
HeadObjectCommand,
CopyObjectCommand,
DeleteObjectCommand,
type S3ClientConfig,
} from '@aws-sdk/client-s3';
import { config, domainToBucketName } from '../config.js';
import { log } from '../logger.js';
export class S3Handler {
private client: S3Client;
constructor() {
const opts: S3ClientConfig = { region: config.awsRegion };
this.client = new S3Client(opts);
}
// -------------------------------------------------------------------------
// Download
// -------------------------------------------------------------------------
/**
* Download raw email bytes from S3.
* Returns `null` when the object does not exist yet (caller should retry).
* Throws on permanent errors.
*/
async getEmail(
domain: string,
messageId: string,
receiveCount: number,
): Promise<Buffer | null> {
const bucket = domainToBucketName(domain);
try {
const resp = await this.client.send(
new GetObjectCommand({ Bucket: bucket, Key: messageId }),
);
const bytes = await resp.Body?.transformToByteArray();
return bytes ? Buffer.from(bytes) : null;
} catch (err: any) {
if (err.name === 'NoSuchKey' || err.Code === 'NoSuchKey') {
if (receiveCount < 5) {
log(`⏳ S3 Object not found yet (Attempt ${receiveCount}). Retrying...`, 'WARNING');
return null;
}
log('❌ S3 Object missing permanently after retries.', 'ERROR');
throw err;
}
log(`❌ S3 Download Error: ${err.message ?? err}`, 'ERROR');
throw err;
}
}
// -------------------------------------------------------------------------
// Metadata helpers (copy-in-place with updated metadata)
// -------------------------------------------------------------------------
private async updateMetadata(
bucket: string,
key: string,
patch: Record<string, string>,
removeKeys: string[] = [],
): Promise<void> {
const head = await this.client.send(
new HeadObjectCommand({ Bucket: bucket, Key: key }),
);
const metadata = { ...(head.Metadata ?? {}) };
// Apply patch
for (const [k, v] of Object.entries(patch)) {
metadata[k] = v;
}
// Remove keys
for (const k of removeKeys) {
delete metadata[k];
}
await this.client.send(
new CopyObjectCommand({
Bucket: bucket,
Key: key,
CopySource: `${bucket}/${key}`,
Metadata: metadata,
MetadataDirective: 'REPLACE',
}),
);
}
// -------------------------------------------------------------------------
// Mark helpers
// -------------------------------------------------------------------------
async markAsProcessed(
domain: string,
messageId: string,
workerName: string,
invalidInboxes?: string[],
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
const patch: Record<string, string> = {
processed: 'true',
processed_at: String(Math.floor(Date.now() / 1000)),
processed_by: workerName,
status: 'delivered',
};
if (invalidInboxes?.length) {
patch['invalid_inboxes'] = invalidInboxes.join(',');
log(`⚠ Invalid inboxes recorded: ${invalidInboxes.join(', ')}`, 'WARNING', workerName);
}
await this.updateMetadata(bucket, messageId, patch, [
'processing_started',
'queued_at',
]);
} catch (err: any) {
log(`Failed to mark as processed: ${err.message ?? err}`, 'WARNING', workerName);
}
}
async markAsAllInvalid(
domain: string,
messageId: string,
invalidInboxes: string[],
workerName: string,
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
await this.updateMetadata(
bucket,
messageId,
{
processed: 'true',
processed_at: String(Math.floor(Date.now() / 1000)),
processed_by: workerName,
status: 'failed',
error: 'All recipients are invalid (mailboxes do not exist)',
invalid_inboxes: invalidInboxes.join(','),
},
['processing_started', 'queued_at'],
);
} catch (err: any) {
log(`Failed to mark as all invalid: ${err.message ?? err}`, 'WARNING', workerName);
}
}
async markAsBlocked(
domain: string,
messageId: string,
blockedRecipients: string[],
sender: string,
workerName: string,
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
await this.updateMetadata(
bucket,
messageId,
{
processed: 'true',
processed_at: String(Math.floor(Date.now() / 1000)),
processed_by: workerName,
status: 'blocked',
blocked_recipients: blockedRecipients.join(','),
blocked_sender: sender,
},
['processing_started', 'queued_at'],
);
log('✓ Marked as blocked in S3 metadata', 'INFO', workerName);
} catch (err: any) {
log(`⚠ Failed to mark as blocked: ${err.message ?? err}`, 'ERROR', workerName);
throw err;
}
}
async deleteBlockedEmail(
domain: string,
messageId: string,
workerName: string,
): Promise<void> {
const bucket = domainToBucketName(domain);
try {
await this.client.send(
new DeleteObjectCommand({ Bucket: bucket, Key: messageId }),
);
log('🗑 Deleted blocked email from S3', 'SUCCESS', workerName);
} catch (err: any) {
log(`⚠ Failed to delete blocked email: ${err.message ?? err}`, 'ERROR', workerName);
throw err;
}
}
}

View File

@@ -0,0 +1,52 @@
/**
* SES operations handler
*
* Only used for:
* - Sending OOO replies to external addresses
* - Forwarding to external addresses
*/
import {
SESClient,
SendRawEmailCommand,
} from '@aws-sdk/client-ses';
import { config } from '../config.js';
import { log } from '../logger.js';
export class SESHandler {
private client: SESClient;
constructor() {
this.client = new SESClient({ region: config.awsRegion });
}
/**
* Send a raw MIME message via SES.
* Returns true on success, false on failure (never throws).
*/
async sendRawEmail(
source: string,
destination: string,
rawMessage: Buffer,
workerName: string,
): Promise<boolean> {
try {
await this.client.send(
new SendRawEmailCommand({
Source: source,
Destinations: [destination],
RawMessage: { Data: rawMessage },
}),
);
return true;
} catch (err: any) {
const code = err.name ?? err.Code ?? 'Unknown';
log(
`⚠ SES send failed to ${destination} (${code}): ${err.message ?? err}`,
'ERROR',
workerName,
);
return false;
}
}
}

View File

@@ -0,0 +1,99 @@
/**
* SQS operations handler
*
* Responsibilities:
* - Resolve queue URL for a domain
* - Long-poll for messages
* - Delete processed messages
* - Report approximate queue size
*/
import {
SQSClient,
GetQueueUrlCommand,
ReceiveMessageCommand,
DeleteMessageCommand,
GetQueueAttributesCommand,
type Message,
} from '@aws-sdk/client-sqs';
import { config, domainToQueueName } from '../config.js';
import { log } from '../logger.js';
export class SQSHandler {
private client: SQSClient;
constructor() {
this.client = new SQSClient({ region: config.awsRegion });
}
/** Resolve queue URL for a domain. Returns null if queue does not exist. */
async getQueueUrl(domain: string): Promise<string | null> {
const queueName = domainToQueueName(domain);
try {
const resp = await this.client.send(
new GetQueueUrlCommand({ QueueName: queueName }),
);
return resp.QueueUrl ?? null;
} catch (err: any) {
if (err.name === 'QueueDoesNotExist' ||
err.Code === 'AWS.SimpleQueueService.NonExistentQueue') {
log(`Queue not found for domain: ${domain}`, 'WARNING');
} else {
log(`Error getting queue URL for ${domain}: ${err.message ?? err}`, 'ERROR');
}
return null;
}
}
/** Long-poll for messages (uses configured poll interval as wait time). */
async receiveMessages(queueUrl: string): Promise<Message[]> {
try {
const resp = await this.client.send(
new ReceiveMessageCommand({
QueueUrl: queueUrl,
MaxNumberOfMessages: config.maxMessages,
WaitTimeSeconds: config.pollInterval,
VisibilityTimeout: config.visibilityTimeout,
MessageSystemAttributeNames: ['ApproximateReceiveCount', 'SentTimestamp'],
}),
);
return resp.Messages ?? [];
} catch (err: any) {
log(`Error receiving messages: ${err.message ?? err}`, 'ERROR');
return [];
}
}
/** Delete a message from the queue after successful processing. */
async deleteMessage(queueUrl: string, receiptHandle: string): Promise<void> {
try {
await this.client.send(
new DeleteMessageCommand({
QueueUrl: queueUrl,
ReceiptHandle: receiptHandle,
}),
);
} catch (err: any) {
log(`Error deleting message: ${err.message ?? err}`, 'ERROR');
throw err;
}
}
/** Approximate number of messages in the queue. Returns 0 on error. */
async getQueueSize(queueUrl: string): Promise<number> {
try {
const resp = await this.client.send(
new GetQueueAttributesCommand({
QueueUrl: queueUrl,
AttributeNames: ['ApproximateNumberOfMessages'],
}),
);
return parseInt(
resp.Attributes?.ApproximateNumberOfMessages ?? '0',
10,
);
} catch {
return 0;
}
}
}

View File

@@ -0,0 +1,118 @@
/**
* Configuration management for unified email worker
*
* All settings are read from environment variables with sensible defaults.
* Domain helpers (bucket name, queue name, internal check) are co-located here
* so every module can import { config, domainToBucket, ... } from './config'.
*/
import { readFileSync, existsSync } from 'node:fs';
// ---------------------------------------------------------------------------
// Config object
// ---------------------------------------------------------------------------
export const config = {
// AWS
awsRegion: process.env.AWS_REGION ?? 'us-east-2',
// Domains
domainsList: process.env.DOMAINS ?? '',
domainsFile: process.env.DOMAINS_FILE ?? '/etc/email-worker/domains.txt',
// Worker
workerThreads: parseInt(process.env.WORKER_THREADS ?? '10', 10),
pollInterval: parseInt(process.env.POLL_INTERVAL ?? '20', 10),
maxMessages: parseInt(process.env.MAX_MESSAGES ?? '10', 10),
visibilityTimeout: parseInt(process.env.VISIBILITY_TIMEOUT ?? '300', 10),
// SMTP delivery (local DMS)
smtpHost: process.env.SMTP_HOST ?? 'localhost',
smtpPort: parseInt(process.env.SMTP_PORT ?? '25', 10),
smtpUseTls: (process.env.SMTP_USE_TLS ?? 'false').toLowerCase() === 'true',
smtpUser: process.env.SMTP_USER ?? '',
smtpPass: process.env.SMTP_PASS ?? '',
smtpPoolSize: parseInt(process.env.SMTP_POOL_SIZE ?? '5', 10),
// Internal SMTP port (for OOO / forwards to managed domains)
internalSmtpPort: parseInt(process.env.INTERNAL_SMTP_PORT ?? '25', 10),
// DynamoDB tables
rulesTable: process.env.DYNAMODB_RULES_TABLE ?? 'email-rules',
messagesTable: process.env.DYNAMODB_MESSAGES_TABLE ?? 'ses-outbound-messages',
blockedTable: process.env.DYNAMODB_BLOCKED_TABLE ?? 'email-blocked-senders',
// Bounce handling
bounceLookupRetries: parseInt(process.env.BOUNCE_LOOKUP_RETRIES ?? '3', 10),
bounceLookupDelay: parseFloat(process.env.BOUNCE_LOOKUP_DELAY ?? '1.0'),
// Monitoring
metricsPort: parseInt(process.env.METRICS_PORT ?? '8000', 10),
healthPort: parseInt(process.env.HEALTH_PORT ?? '8080', 10),
queueSuffix: process.env.QUEUE_SUFFIX ?? '-queue',
standbyMode: (process.env.STANDBY_MODE ?? 'false').toLowerCase() === 'true',
} as const;
export type Config = typeof config;
// ---------------------------------------------------------------------------
// Managed domains (populated by loadDomains())
// ---------------------------------------------------------------------------
const managedDomains = new Set<string>();
/**
* Load domains from env var and/or file, populate the internal set.
*/
export function loadDomains(): string[] {
const domains: string[] = [];
// From env
if (config.domainsList) {
for (const d of config.domainsList.split(',')) {
const trimmed = d.trim();
if (trimmed) domains.push(trimmed);
}
}
// From file
if (existsSync(config.domainsFile)) {
const content = readFileSync(config.domainsFile, 'utf-8');
for (const line of content.split('\n')) {
const trimmed = line.trim();
if (trimmed && !trimmed.startsWith('#')) {
domains.push(trimmed);
}
}
}
// Deduplicate
const unique = [...new Set(domains)];
managedDomains.clear();
for (const d of unique) {
managedDomains.add(d.toLowerCase());
}
return unique;
}
// ---------------------------------------------------------------------------
// Domain helpers
// ---------------------------------------------------------------------------
/** Check whether an email address belongs to one of our managed domains */
export function isInternalAddress(email: string): boolean {
const atIdx = email.indexOf('@');
if (atIdx < 0) return false;
return managedDomains.has(email.slice(atIdx + 1).toLowerCase());
}
/** Convert domain to SQS queue name: bizmatch.net → bizmatch-net-queue */
export function domainToQueueName(domain: string): string {
return domain.replace(/\./g, '-') + config.queueSuffix;
}
/** Convert domain to S3 bucket name: bizmatch.net → bizmatch-net-emails */
export function domainToBucketName(domain: string): string {
return domain.replace(/\./g, '-') + '-emails';
}

View File

@@ -0,0 +1,62 @@
/**
* Sender blocklist checking with wildcard / glob support
*
* Uses picomatch for pattern matching (equivalent to Python's fnmatch).
* Patterns are stored per-recipient in DynamoDB.
*/
import picomatch from 'picomatch';
import type { DynamoDBHandler } from '../aws/dynamodb.js';
import { log } from '../logger.js';
/**
* Extract the bare email address from a From header value.
* "John Doe <john@example.com>" → "john@example.com"
*/
function extractAddress(sender: string): string {
const match = sender.match(/<([^>]+)>/);
const addr = match ? match[1] : sender;
return addr.trim().toLowerCase();
}
export class BlocklistChecker {
constructor(private dynamodb: DynamoDBHandler) {}
/**
* Batch-check whether a sender is blocked for each recipient.
* Uses a single batch DynamoDB call for efficiency.
*/
async batchCheckBlockedSenders(
recipients: string[],
senders: string[], // <-- Geändert zu Array
workerName: string,
): Promise<Record<string, boolean>> {
const patternsByRecipient = await this.dynamodb.batchGetBlockedPatterns(recipients);
// Alle übergebenen Adressen bereinigen
const sendersClean = senders.map(s => extractAddress(s)).filter(Boolean);
const result: Record<string, boolean> = {};
for (const recipient of recipients) {
const patterns = patternsByRecipient[recipient] ?? [];
let isBlocked = false;
for (const pattern of patterns) {
for (const senderClean of sendersClean) {
if (picomatch.isMatch(senderClean, pattern.toLowerCase())) {
log(
`⛔ BLOCKED: Sender ${senderClean} matches pattern '${pattern}' for inbox ${recipient}`,
'WARNING',
workerName,
);
isBlocked = true;
break;
}
}
if (isBlocked) break;
}
result[recipient] = isBlocked;
}
return result;
}
}

View File

@@ -0,0 +1,190 @@
/**
* Bounce detection and header rewriting
*
* When Amazon SES returns a bounce, the From header is
* mailer-daemon@amazonses.com. We look up the original sender
* in DynamoDB and rewrite the headers so the bounce appears
* to come from the actual bounced recipient.
*/
import type { ParsedMail } from 'mailparser';
import type { DynamoDBHandler } from '../aws/dynamodb.js';
import { isSesBounceNotification, getHeader } from './parser.js';
import { log } from '../logger.js';
export interface BounceResult {
/** Updated raw bytes (headers rewritten if bounce was detected) */
rawBytes: Buffer;
/** Whether bounce was detected and headers were modified */
modified: boolean;
/** Whether this email is a bounce notification at all */
isBounce: boolean;
/** The effective From address (rewritten or original) */
fromAddr: string;
}
export class BounceHandler {
constructor(private dynamodb: DynamoDBHandler) {}
/**
* Detect SES bounce, look up original sender in DynamoDB,
* and rewrite headers in the raw buffer.
*
* We operate on the raw Buffer because we need to preserve
* the original MIME structure exactly, only swapping specific
* header lines. mailparser's ParsedMail is read-only.
*/
async applyBounceLogic(
parsed: ParsedMail,
rawBytes: Buffer,
subject: string,
workerName = 'unified',
): Promise<BounceResult> {
if (!isSesBounceNotification(parsed)) {
return {
rawBytes,
modified: false,
isBounce: false,
fromAddr: parsed.from?.text ?? '',
};
}
log('🔍 Detected SES MAILER-DAEMON bounce notification', 'INFO', workerName);
// Extract Message-ID from the bounce notification header
const rawMessageId = getHeader(parsed, 'message-id')
.replace(/^</, '')
.replace(/>$/, '')
.split('@')[0];
if (!rawMessageId) {
log('⚠ Could not extract Message-ID from bounce notification', 'WARNING', workerName);
return {
rawBytes,
modified: false,
isBounce: true,
fromAddr: parsed.from?.text ?? '',
};
}
log(` Looking up Message-ID: ${rawMessageId}`, 'INFO', workerName);
const bounceInfo = await this.dynamodb.getBounceInfo(rawMessageId, workerName);
if (!bounceInfo) {
return {
rawBytes,
modified: false,
isBounce: true,
fromAddr: parsed.from?.text ?? '',
};
}
// Log bounce details
log(`✓ Found bounce info:`, 'INFO', workerName);
log(` Original sender: ${bounceInfo.original_source}`, 'INFO', workerName);
log(` Bounce type: ${bounceInfo.bounceType}/${bounceInfo.bounceSubType}`, 'INFO', workerName);
log(` Bounced recipients: ${bounceInfo.bouncedRecipients}`, 'INFO', workerName);
if (!bounceInfo.bouncedRecipients.length) {
log('⚠ No bounced recipients found in bounce info', 'WARNING', workerName);
return {
rawBytes,
modified: false,
isBounce: true,
fromAddr: parsed.from?.text ?? '',
};
}
const newFrom = bounceInfo.bouncedRecipients[0];
// Rewrite headers in raw bytes
let modifiedBytes = rawBytes;
const originalFrom = getHeader(parsed, 'from');
// Replace From header
modifiedBytes = replaceHeader(modifiedBytes, 'From', newFrom);
// Add diagnostic headers
modifiedBytes = addHeader(modifiedBytes, 'X-Original-SES-From', originalFrom);
modifiedBytes = addHeader(
modifiedBytes,
'X-Bounce-Type',
`${bounceInfo.bounceType}/${bounceInfo.bounceSubType}`,
);
// Add Reply-To if not present
if (!getHeader(parsed, 'reply-to')) {
modifiedBytes = addHeader(modifiedBytes, 'Reply-To', newFrom);
}
// Adjust subject for generic delivery status notifications
const subjectLower = subject.toLowerCase();
if (
subjectLower.includes('delivery status notification') ||
subjectLower.includes('thanks for your submission')
) {
modifiedBytes = replaceHeader(
modifiedBytes,
'Subject',
`Delivery Status: ${newFrom}`,
);
}
log(`✓ Rewritten FROM: ${newFrom}`, 'SUCCESS', workerName);
return {
rawBytes: modifiedBytes,
modified: true,
isBounce: true,
fromAddr: newFrom,
};
}
}
// ---------------------------------------------------------------------------
// Raw header manipulation helpers
// ---------------------------------------------------------------------------
/**
* Replace a header value in raw MIME bytes.
* Handles multi-line (folded) headers.
*/
function replaceHeader(raw: Buffer, name: string, newValue: string): Buffer {
const str = raw.toString('utf-8');
// Match header including potential folded continuation lines
const regex = new RegExp(
`^(${escapeRegex(name)}:\\s*).*?(\\r?\\n(?=[^ \\t])|\\r?\\n$)`,
'im',
);
// Also need to consume folded lines
const foldedRegex = new RegExp(
`^${escapeRegex(name)}:[ \\t]*[^\\r\\n]*(?:\\r?\\n[ \\t]+[^\\r\\n]*)*`,
'im',
);
const match = foldedRegex.exec(str);
if (!match) return raw;
const before = str.slice(0, match.index);
const after = str.slice(match.index + match[0].length);
const replaced = `${before}${name}: ${newValue}${after}`;
return Buffer.from(replaced, 'utf-8');
}
/**
* Add a new header line right before the header/body separator.
*/
function addHeader(raw: Buffer, name: string, value: string): Buffer {
const str = raw.toString('utf-8');
// Find the header/body boundary (first blank line)
const sep = str.match(/\r?\n\r?\n/);
if (!sep || sep.index === undefined) return raw;
const before = str.slice(0, sep.index);
const after = str.slice(sep.index);
return Buffer.from(`${before}\r\n${name}: ${value}${after}`, 'utf-8');
}
function escapeRegex(s: string): string {
return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
}

View File

@@ -0,0 +1,120 @@
/**
* Email parsing utilities
*
* Wraps `mailparser` for parsing raw MIME bytes and provides
* header sanitization (e.g. Microsoft's malformed Message-IDs).
*/
import { simpleParser, type ParsedMail } from 'mailparser';
// ---------------------------------------------------------------------------
// Types
// ---------------------------------------------------------------------------
export interface BodyParts {
text: string;
html: string | null;
}
// ---------------------------------------------------------------------------
// Parser
// ---------------------------------------------------------------------------
/**
* Parse raw email bytes into a ParsedMail object.
* Applies pre-sanitization for known malformed headers before parsing.
*/
export async function parseEmail(raw: Buffer): Promise<ParsedMail> {
// Pre-sanitize: fix Microsoft's [uuid]@domain Message-IDs
const sanitized = sanitizeRawHeaders(raw);
return simpleParser(sanitized);
}
/**
* Extract text and HTML body parts from a parsed email.
*/
export function extractBodyParts(parsed: ParsedMail): BodyParts {
const text = parsed.text?.trim() || '(No body content)';
const html = parsed.html || null;
return { text, html };
}
/**
* Check if email was already processed by our worker (loop detection).
*/
export function isProcessedByWorker(parsed: ParsedMail): boolean {
const headers = parsed.headers;
const xWorker = headers.get('x-ses-worker-processed');
const autoSubmitted = headers.get('auto-submitted');
const isProcessedByUs = !!xWorker;
const isOurAutoReply = autoSubmitted === 'auto-replied' && !!xWorker;
return isProcessedByUs || isOurAutoReply;
}
/**
* Check if email is a SES MAILER-DAEMON bounce notification.
*/
export function isSesBounceNotification(parsed: ParsedMail): boolean {
const from = (parsed.from?.text ?? '').toLowerCase();
return from.includes('mailer-daemon@') && from.includes('amazonses.com');
}
/**
* Get a header value as string. Handles mailparser's headerlines Map.
*/
export function getHeader(parsed: ParsedMail, name: string): string {
const val = parsed.headers.get(name.toLowerCase());
if (val === undefined || val === null) return '';
if (typeof val === 'string') return val;
if (typeof val === 'object' && 'text' in val) return (val as any).text ?? '';
return String(val);
}
// ---------------------------------------------------------------------------
// Raw header sanitization
// ---------------------------------------------------------------------------
/**
* Fix known problematic patterns in raw MIME headers BEFORE parsing.
*
* Specifically targets Microsoft's `Message-ID: <[uuid]@domain>` which
* causes strict parsers to crash.
*/
function sanitizeRawHeaders(raw: Buffer): Buffer {
// We only need to check/fix the header section (before first blank line).
// For efficiency we work on the first ~8KB where headers live.
const headerEnd = findDoubleNewline(raw);
const headerLen = headerEnd === -1 ? Math.min(raw.length, 8192) : headerEnd;
const headerStr = raw.subarray(0, headerLen).toString('utf-8');
// Fix: Message-ID with square brackets <[...]@...>
if (headerStr.includes('[') || headerStr.includes(']')) {
const fixed = headerStr.replace(
/^(Message-ID:\s*<?)(\[.*?\])(@[^>]*>?\s*)$/im,
(_match, prefix, bracketed, suffix) =>
prefix + bracketed.replace(/\[/g, '').replace(/\]/g, '') + suffix,
);
if (fixed !== headerStr) {
return Buffer.concat([
Buffer.from(fixed, 'utf-8'),
raw.subarray(headerLen),
]);
}
}
return raw;
}
function findDoubleNewline(buf: Buffer): number {
// Look for \r\n\r\n or \n\n
for (let i = 0; i < buf.length - 3; i++) {
if (buf[i] === 0x0d && buf[i + 1] === 0x0a && buf[i + 2] === 0x0d && buf[i + 3] === 0x0a) {
return i;
}
if (buf[i] === 0x0a && buf[i + 1] === 0x0a) {
return i;
}
}
return -1;
}

View File

@@ -0,0 +1,309 @@
/**
* Email rules processing (Auto-Reply / OOO and Forwarding)
* * CLEANED UP & FIXED:
* - Uses MailComposer for ALL message generation (safer MIME handling)
* - Fixes broken attachment forwarding
* - Removed legacy SMTP forwarding
* - Removed manual string concatenation for MIME boundaries
*/
import { createTransport } from 'nodemailer';
import type { ParsedMail } from 'mailparser';
import type { SESHandler } from '../aws/ses.js';
import { extractBodyParts } from './parser.js';
import { log } from '../logger.js';
// Wir nutzen MailComposer direkt für das Erstellen der Raw Bytes
import MailComposer from 'nodemailer/lib/mail-composer/index.js';
import { DynamoDBHandler, EmailRule } from '../aws/dynamodb.js';
import { config, isInternalAddress } from '../config.js';
export type MetricsCallback = (action: 'autoreply' | 'forward', domain: string) => void;
export class RulesProcessor {
constructor(
private dynamodb: DynamoDBHandler,
private ses: SESHandler,
) {}
/**
* Process OOO and Forward rules for a single recipient.
*/
async processRulesForRecipient(
recipient: string,
parsed: ParsedMail,
rawBytes: Buffer,
domain: string,
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<boolean> {
if (config.standbyMode) {
return false;
}
const rule = await this.dynamodb.getEmailRules(recipient.toLowerCase());
if (!rule) return false;
const originalFrom = parsed.from?.text ?? '';
const senderAddr = extractSenderAddress(originalFrom);
// OOO / Auto-Reply
if (rule.ooo_active) {
await this.handleOoo(
recipient,
parsed,
senderAddr,
rule,
domain,
workerName,
metricsCallback,
);
}
// Forwarding
const forwards = rule.forwards ?? [];
if (forwards.length > 0) {
await this.handleForwards(
recipient,
parsed,
originalFrom,
forwards,
domain,
workerName,
metricsCallback,
);
}
return false; // never skip local delivery
}
// -----------------------------------------------------------------------
// OOO
// -----------------------------------------------------------------------
private async handleOoo(
recipient: string,
parsed: ParsedMail,
senderAddr: string,
rule: EmailRule,
domain: string,
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<void> {
// Don't reply to automatic messages
const autoSubmitted = parsed.headers.get('auto-submitted');
const precedence = String(parsed.headers.get('precedence') ?? '').toLowerCase();
if (autoSubmitted && autoSubmitted !== 'no') {
log(' ⏭ Skipping OOO for auto-submitted message', 'INFO', workerName);
return;
}
if (['bulk', 'junk', 'list'].includes(precedence)) {
log(` ⏭ Skipping OOO for ${precedence} message`, 'INFO', workerName);
return;
}
if (/noreply|no-reply|mailer-daemon/i.test(senderAddr)) {
log(' ⏭ Skipping OOO for noreply address', 'INFO', workerName);
return;
}
try {
const oooMsg = (rule.ooo_message as string) ?? 'I am out of office.';
const contentType = (rule.ooo_content_type as string) ?? 'text';
// FIX: Use MailComposer via await
const oooBuffer = await buildOooReply(parsed, recipient, oooMsg, contentType);
if (isInternalAddress(senderAddr)) {
const ok = await sendInternalEmail(recipient, senderAddr, oooBuffer, workerName);
if (ok) log(`✓ Sent OOO reply internally to ${senderAddr}`, 'SUCCESS', workerName);
else log(`⚠ Internal OOO reply failed to ${senderAddr}`, 'WARNING', workerName);
} else {
const ok = await this.ses.sendRawEmail(recipient, senderAddr, oooBuffer, workerName);
if (ok) log(`✓ Sent OOO reply externally to ${senderAddr} via SES`, 'SUCCESS', workerName);
}
metricsCallback?.('autoreply', domain);
} catch (err: any) {
log(`⚠ OOO reply failed to ${senderAddr}: ${err.message ?? err}`, 'ERROR', workerName);
}
}
// -----------------------------------------------------------------------
// Forwarding
// -----------------------------------------------------------------------
private async handleForwards(
recipient: string,
parsed: ParsedMail,
originalFrom: string,
forwards: string[],
domain: string,
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<void> {
for (const forwardTo of forwards) {
try {
// FIX: Correctly await the composer result
const fwdBuffer = await buildForwardMessage(parsed, recipient, forwardTo, originalFrom);
if (isInternalAddress(forwardTo)) {
const ok = await sendInternalEmail(recipient, forwardTo, fwdBuffer, workerName);
if (ok) log(`✓ Forwarded internally to ${forwardTo}`, 'SUCCESS', workerName);
else log(`⚠ Internal forward failed to ${forwardTo}`, 'WARNING', workerName);
} else {
const ok = await this.ses.sendRawEmail(recipient, forwardTo, fwdBuffer, workerName);
if (ok) log(`✓ Forwarded externally to ${forwardTo} via SES`, 'SUCCESS', workerName);
}
metricsCallback?.('forward', domain);
} catch (err: any) {
log(`⚠ Forward failed to ${forwardTo}: ${err.message ?? err}`, 'ERROR', workerName);
}
}
}
}
// ---------------------------------------------------------------------------
// Message building (Using Nodemailer MailComposer for Safety)
// ---------------------------------------------------------------------------
async function buildOooReply(
original: ParsedMail,
recipient: string,
oooMsg: string,
contentType: string,
): Promise<Buffer> {
const { text: textBody, html: htmlBody } = extractBodyParts(original);
const originalSubject = original.subject ?? '(no subject)';
const originalFrom = original.from?.text ?? 'unknown';
const originalMsgId = original.messageId ?? '';
const recipientDomain = recipient.split('@')[1];
// Text version
let textContent = `${oooMsg}\n\n--- Original Message ---\n`;
textContent += `From: ${originalFrom}\n`;
textContent += `Subject: ${originalSubject}\n\n`;
textContent += textBody;
// HTML version
let htmlContent = `<div>${oooMsg}</div><br><hr><br>`;
htmlContent += '<strong>Original Message</strong><br>';
htmlContent += `<strong>From:</strong> ${originalFrom}<br>`;
htmlContent += `<strong>Subject:</strong> ${originalSubject}<br><br>`;
htmlContent += htmlBody ? htmlBody : textBody.replace(/\n/g, '<br>');
const includeHtml = contentType === 'html' || !!htmlBody;
const composer = new MailComposer({
from: recipient,
to: originalFrom,
subject: `Out of Office: ${originalSubject}`,
inReplyTo: originalMsgId,
references: [originalMsgId], // Nodemailer wants array
text: textContent,
html: includeHtml ? htmlContent : undefined,
headers: {
'Auto-Submitted': 'auto-replied',
'X-SES-Worker-Processed': 'ooo-reply',
},
messageId: `<${Date.now()}.${Math.random().toString(36).slice(2)}@${recipientDomain}>`
});
return composer.compile().build();
}
async function buildForwardMessage(
original: ParsedMail,
recipient: string,
forwardTo: string,
originalFrom: string,
): Promise<Buffer> {
const { text: textBody, html: htmlBody } = extractBodyParts(original);
const originalSubject = original.subject ?? '(no subject)';
const originalDate = original.date?.toUTCString() ?? 'unknown';
// Text version
let fwdText = '---------- Forwarded message ---------\n';
fwdText += `From: ${originalFrom}\n`;
fwdText += `Date: ${originalDate}\n`;
fwdText += `Subject: ${originalSubject}\n`;
fwdText += `To: ${recipient}\n\n`;
fwdText += textBody;
// HTML version
let fwdHtml: string | undefined;
if (htmlBody) {
fwdHtml = "<div style='border-left:3px solid #ccc;padding-left:10px;'>";
fwdHtml += '<strong>---------- Forwarded message ---------</strong><br>';
fwdHtml += `<strong>From:</strong> ${originalFrom}<br>`;
fwdHtml += `<strong>Date:</strong> ${originalDate}<br>`;
fwdHtml += `<strong>Subject:</strong> ${originalSubject}<br>`;
fwdHtml += `<strong>To:</strong> ${recipient}<br><br>`;
fwdHtml += htmlBody;
fwdHtml += '</div>';
}
// Config object for MailComposer
const mailOptions: any = {
from: recipient,
to: forwardTo,
subject: `FWD: ${originalSubject}`,
replyTo: originalFrom,
text: fwdText,
html: fwdHtml,
headers: {
'X-SES-Worker-Processed': 'forwarded',
},
};
// Attachments
if (original.attachments && original.attachments.length > 0) {
mailOptions.attachments = original.attachments.map((att) => ({
filename: att.filename ?? 'attachment',
content: att.content,
contentType: att.contentType,
cid: att.cid ?? undefined,
contentDisposition: att.contentDisposition || 'attachment'
}));
}
const composer = new MailComposer(mailOptions);
return composer.compile().build();
}
// ---------------------------------------------------------------------------
// Internal SMTP delivery (port 25, bypasses transport_maps)
// ---------------------------------------------------------------------------
async function sendInternalEmail(
from: string,
to: string,
rawMessage: Buffer,
workerName: string,
): Promise<boolean> {
try {
const transport = createTransport({
host: config.smtpHost,
port: config.internalSmtpPort,
secure: false,
tls: { rejectUnauthorized: false },
});
await transport.sendMail({
envelope: { from, to: [to] },
raw: rawMessage,
});
transport.close();
return true;
} catch (err: any) {
log(` ✗ Internal delivery failed to ${to}: ${err.message ?? err}`, 'ERROR', workerName);
return false;
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function extractSenderAddress(fromHeader: string): string {
const match = fromHeader.match(/<([^>]+)>/);
return match ? match[1] : fromHeader;
}

View File

@@ -0,0 +1,48 @@
/**
* Health check HTTP server
*
* Provides a simple /health endpoint for Docker healthcheck
* and monitoring. Returns domain list and feature flags.
*/
import { createServer, type Server } from 'node:http';
import { log } from './logger.js';
export function startHealthServer(
port: number,
domains: string[],
getStats?: () => any,
): Server {
const server = createServer((_req, res) => {
const stats = getStats?.() ?? {};
const payload = {
status: 'healthy',
worker: 'unified-email-worker-ts',
version: '2.0.0',
domains,
domainCount: domains.length,
features: {
bounce_handling: true,
ooo_replies: true,
forwarding: true,
blocklist: true,
prometheus_metrics: true,
lmtp: false,
legacy_smtp_forward: false,
},
stats,
uptime: process.uptime(),
timestamp: new Date().toISOString(),
};
res.writeHead(200, { 'Content-Type': 'application/json' });
res.end(JSON.stringify(payload, null, 2));
});
server.listen(port, () => {
log(`Health check on port ${port}`);
});
return server;
}

View File

@@ -0,0 +1,166 @@
/**
* Structured logging for email worker with daily rotation AND retention
*
* Uses pino for high-performance JSON logging.
* Includes logic to delete logs older than X days.
*/
import pino from 'pino';
import {
existsSync,
mkdirSync,
createWriteStream,
type WriteStream,
readdirSync,
statSync,
unlinkSync
} from 'node:fs';
import { join } from 'node:path';
// ---------------------------------------------------------------------------
// Configuration
// ---------------------------------------------------------------------------
const LOG_DIR = '/var/log/email-worker';
const LOG_FILE_PREFIX = 'worker';
const RETENTION_DAYS = 14; // Logs älter als 14 Tage löschen
// ---------------------------------------------------------------------------
// File stream & Retention Logic
// ---------------------------------------------------------------------------
let fileStream: WriteStream | null = null;
let currentDateStr = '';
function getDateStr(): string {
return new Date().toISOString().slice(0, 10); // YYYY-MM-DD
}
/**
* Löscht alte Log-Dateien basierend auf RETENTION_DAYS
*/
function cleanUpOldLogs(): void {
try {
if (!existsSync(LOG_DIR)) return;
const files = readdirSync(LOG_DIR);
const now = Date.now();
const maxAgeMs = RETENTION_DAYS * 24 * 60 * 60 * 1000;
for (const file of files) {
// Prüfen ob es eine unserer Log-Dateien ist
if (!file.startsWith(LOG_FILE_PREFIX) || !file.endsWith('.log')) continue;
const filePath = join(LOG_DIR, file);
try {
const stats = statSync(filePath);
const ageMs = now - stats.mtimeMs;
if (ageMs > maxAgeMs) {
unlinkSync(filePath);
// Einmalig auf stdout loggen, damit man sieht, dass aufgeräumt wurde
process.stdout.write(`[INFO] Deleted old log file: ${file}\n`);
}
} catch (err) {
// Ignorieren, falls Datei gerade gelöscht wurde oder Zugriff verweigert
}
}
} catch (err) {
process.stderr.write(`[WARN] Failed to clean up old logs: ${err}\n`);
}
}
function ensureFileStream(): WriteStream | null {
const today = getDateStr();
// Wenn wir bereits einen Stream für heute haben, zurückgeben
if (fileStream && currentDateStr === today) return fileStream;
try {
if (!existsSync(LOG_DIR)) mkdirSync(LOG_DIR, { recursive: true });
// Wenn sich das Datum geändert hat (oder beim ersten Start): Aufräumen
if (currentDateStr !== today) {
cleanUpOldLogs();
}
// Alten Stream schließen, falls vorhanden
if (fileStream) {
fileStream.end();
}
const filePath = join(LOG_DIR, `${LOG_FILE_PREFIX}.${today}.log`);
fileStream = createWriteStream(filePath, { flags: 'a' });
currentDateStr = today;
return fileStream;
} catch {
// Silently continue without file logging (e.g. permission issue)
return null;
}
}
// ---------------------------------------------------------------------------
// Pino logger
// ---------------------------------------------------------------------------
const logger = pino({
level: 'info',
transport: {
targets: [
{
// 1. Schicke bunte Logs in die Konsole (für docker compose logs -f)
target: 'pino-pretty',
options: {
colorize: true,
translateTime: 'SYS:yyyy-mm-dd HH:MM:ss',
ignore: 'pid,hostname',
singleLine: true
}
},
{
// 2. Schreibe gleichzeitig alles unformatiert in die Datei
target: 'pino/file',
options: {
destination: '/var/log/email-worker/worker.log',
mkdir: true
}
}
]
}
});
// ---------------------------------------------------------------------------
// Log level mapping
// ---------------------------------------------------------------------------
type LogLevel = 'DEBUG' | 'INFO' | 'WARNING' | 'ERROR' | 'CRITICAL' | 'SUCCESS';
const LEVEL_MAP: Record<LogLevel, keyof pino.Logger> = {
DEBUG: 'debug',
INFO: 'info',
WARNING: 'warn',
ERROR: 'error',
CRITICAL: 'fatal',
SUCCESS: 'info',
};
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
export function log(
message: string,
level: LogLevel = 'INFO',
workerName = 'unified-worker',
): void {
const prefix = level === 'SUCCESS' ? '[SUCCESS] ' : '';
const formatted = `[${workerName}] ${prefix}${message}`;
// Pino (stdout/json)
const method = LEVEL_MAP[level] ?? 'info';
(logger as any)[method](formatted);
// File (plain text)
const stream = ensureFileStream();
if (stream) {
const ts = new Date().toISOString().replace('T', ' ').slice(0, 19);
const line = `[${ts}] [${level}] [${workerName}] ${prefix}${message}\n`;
stream.write(line);
}
}

View File

@@ -0,0 +1,89 @@
/**
* Main entry point for unified email worker
*
* Startup sequence:
* 1. Load configuration and domains
* 2. Start Prometheus metrics server
* 3. Start health check server
* 4. Initialize UnifiedWorker
* 5. Register signal handlers for graceful shutdown
*/
import { config, loadDomains } from './config.js';
import { log } from './logger.js';
import { startMetricsServer, type MetricsCollector } from './metrics.js';
import { startHealthServer } from './health.js';
import { UnifiedWorker } from './worker/unified-worker.js';
// ---------------------------------------------------------------------------
// Banner
// ---------------------------------------------------------------------------
function printBanner(domains: string[]): void {
log('╔══════════════════════════════════════════════════╗');
log('║ Unified Email Worker (TypeScript) ║');
log('║ Version 2.0.0 ║');
log('╚══════════════════════════════════════════════════╝');
log('');
log(`Domains (${domains.length}):`);
for (const d of domains) {
log(`${d}`);
}
log('');
log(`SMTP: ${config.smtpHost}:${config.smtpPort}`);
log(`Internal SMTP: port ${config.internalSmtpPort}`);
log(`Poll interval: ${config.pollInterval}s`);
log(`Metrics: port ${config.metricsPort}`);
log(`Health: port ${config.healthPort}`);
log('');
}
// ---------------------------------------------------------------------------
// Main
// ---------------------------------------------------------------------------
async function main(): Promise<void> {
// 1. Load domains
const domains = loadDomains();
if (domains.length === 0) {
log('❌ No domains configured. Set DOMAINS env var or provide DOMAINS_FILE.', 'ERROR');
process.exit(1);
}
printBanner(domains);
// 2. Metrics server
const metrics: MetricsCollector | null = await startMetricsServer(config.metricsPort);
// 3. Unified worker
const worker = new UnifiedWorker(domains, metrics);
// 4. Health server
startHealthServer(config.healthPort, domains, () => worker.getStats());
// 5. Signal handlers
let shuttingDown = false;
const shutdown = async (signal: string) => {
if (shuttingDown) return;
shuttingDown = true;
log(`\n🛑 Received ${signal}. Shutting down gracefully...`);
await worker.stop();
log('👋 Goodbye.');
process.exit(0);
};
process.on('SIGINT', () => shutdown('SIGINT'));
process.on('SIGTERM', () => shutdown('SIGTERM'));
// 6. Start
await worker.start();
// Keep alive (event loop stays open due to HTTP servers + SQS polling)
log('✅ Worker is running. Press Ctrl+C to stop.');
}
// ---------------------------------------------------------------------------
main().catch((err) => {
log(`💥 Fatal startup error: ${err.message ?? err}`, 'CRITICAL');
log(err.stack ?? '', 'CRITICAL');
process.exit(1);
});

View File

@@ -0,0 +1,155 @@
/**
* Prometheus metrics collection
*
* Uses prom-client. Falls back gracefully if not available.
*/
import { log } from './logger.js';
import type * as PromClientTypes from 'prom-client';
// prom-client is optional — import dynamically
let promClient: typeof PromClientTypes | null = null;
try {
promClient = require('prom-client') as typeof PromClientTypes;
} catch {
// not installed
}
// ---------------------------------------------------------------------------
// Metric instances (created lazily if prom-client is available)
// ---------------------------------------------------------------------------
let emailsProcessed: any;
let emailsInFlight: any;
let processingTime: any;
let queueSize: any;
let bouncesProcessed: any;
let autorepliesSent: any;
let forwardsSent: any;
let blockedSenders: any;
function initMetrics(): void {
if (!promClient) return;
const { Counter, Gauge, Histogram } = promClient;
emailsProcessed = new Counter({
name: 'emails_processed_total',
help: 'Total emails processed',
labelNames: ['domain', 'status'],
});
emailsInFlight = new Gauge({
name: 'emails_in_flight',
help: 'Emails currently being processed',
});
processingTime = new Histogram({
name: 'email_processing_seconds',
help: 'Time to process email',
labelNames: ['domain'],
});
queueSize = new Gauge({
name: 'queue_messages_available',
help: 'Messages in queue',
labelNames: ['domain'],
});
bouncesProcessed = new Counter({
name: 'bounces_processed_total',
help: 'Bounce notifications processed',
labelNames: ['domain', 'type'],
});
autorepliesSent = new Counter({
name: 'autoreplies_sent_total',
help: 'Auto-replies sent',
labelNames: ['domain'],
});
forwardsSent = new Counter({
name: 'forwards_sent_total',
help: 'Forwards sent',
labelNames: ['domain'],
});
blockedSenders = new Counter({
name: 'blocked_senders_total',
help: 'Emails blocked by blacklist',
labelNames: ['domain'],
});
}
// ---------------------------------------------------------------------------
// MetricsCollector
// ---------------------------------------------------------------------------
export class MetricsCollector {
public readonly enabled: boolean;
constructor() {
this.enabled = !!promClient;
if (this.enabled) initMetrics();
}
incrementProcessed(domain: string, status: string): void {
emailsProcessed?.labels(domain, status).inc();
}
incrementInFlight(): void {
emailsInFlight?.inc();
}
decrementInFlight(): void {
emailsInFlight?.dec();
}
observeProcessingTime(domain: string, seconds: number): void {
processingTime?.labels(domain).observe(seconds);
}
setQueueSize(domain: string, size: number): void {
queueSize?.labels(domain).set(size);
}
incrementBounce(domain: string, bounceType: string): void {
bouncesProcessed?.labels(domain, bounceType).inc();
}
incrementAutoreply(domain: string): void {
autorepliesSent?.labels(domain).inc();
}
incrementForward(domain: string): void {
forwardsSent?.labels(domain).inc();
}
incrementBlocked(domain: string): void {
blockedSenders?.labels(domain).inc();
}
}
// ---------------------------------------------------------------------------
// Start metrics HTTP server
// ---------------------------------------------------------------------------
export async function startMetricsServer(port: number): Promise<MetricsCollector | null> {
if (!promClient) {
log('⚠ Prometheus client not installed, metrics disabled', 'WARNING');
return null;
}
try {
const { createServer } = await import('node:http');
const { register } = promClient;
const server = createServer(async (_req, res) => {
try {
res.setHeader('Content-Type', register.contentType);
res.end(await register.metrics());
} catch {
res.statusCode = 500;
res.end();
}
});
server.listen(port, () => {
log(`Prometheus metrics on port ${port}`);
});
return new MetricsCollector();
} catch (err: any) {
log(`Failed to start metrics server: ${err.message ?? err}`, 'ERROR');
return null;
}
}

View File

@@ -0,0 +1,155 @@
/**
* SMTP / email delivery with nodemailer pooled transport
*
* Replaces both Python's SMTPPool and EmailDelivery classes.
* nodemailer handles connection pooling, keepalive, and reconnection natively.
*
* Removed: LMTP delivery path (never used in production).
*/
import { createTransport, type Transporter } from 'nodemailer';
import { log } from '../logger.js';
import { config } from '../config.js';
// ---------------------------------------------------------------------------
// Permanent error detection
// ---------------------------------------------------------------------------
const PERMANENT_INDICATORS = [
'550', '551', '553',
'mailbox not found', 'user unknown', 'no such user',
'recipient rejected', 'does not exist', 'invalid recipient',
'unknown user',
];
function isPermanentRecipientError(errorMsg: string): boolean {
const lower = errorMsg.toLowerCase();
return PERMANENT_INDICATORS.some((ind) => lower.includes(ind));
}
// ---------------------------------------------------------------------------
// Delivery class
// ---------------------------------------------------------------------------
export class EmailDelivery {
private transport: Transporter;
constructor() {
this.transport = createTransport({
host: config.smtpHost,
port: config.smtpPort,
secure: config.smtpUseTls,
pool: true,
maxConnections: config.smtpPoolSize,
maxMessages: Infinity, // reuse connections indefinitely
tls: { rejectUnauthorized: false },
...(config.smtpUser && config.smtpPass
? { auth: { user: config.smtpUser, pass: config.smtpPass } }
: {}),
});
log(
`📡 SMTP pool initialized → ${config.smtpHost}:${config.smtpPort} ` +
`(max ${config.smtpPoolSize} connections)`,
);
}
/**
* Send raw email to ONE recipient via the local DMS.
*
* Returns: [success, errorMessage?, isPermanent]
*/
async sendToRecipient(
fromAddr: string,
recipient: string,
rawMessage: Buffer,
workerName: string,
maxRetries = 2,
): Promise<[boolean, string | null, boolean]> {
let lastError: string | null = null;
for (let attempt = 0; attempt <= maxRetries; attempt++) {
try {
await this.transport.sendMail({
envelope: { from: fromAddr, to: [recipient] },
raw: rawMessage,
});
log(`${recipient}: Delivered (SMTP)`, 'SUCCESS', workerName);
return [true, null, false];
} catch (err: any) {
const errorMsg = err.message ?? String(err);
const responseCode = err.responseCode ?? 0;
// Check for permanent errors (5xx)
if (
responseCode >= 550 ||
isPermanentRecipientError(errorMsg)
) {
log(
`${recipient}: ${errorMsg} (permanent)`,
'ERROR',
workerName,
);
return [false, errorMsg, true];
}
// Connection-level errors → retry
if (
err.code === 'ECONNRESET' ||
err.code === 'ECONNREFUSED' ||
err.code === 'ETIMEDOUT' ||
errorMsg.toLowerCase().includes('disconnect') ||
errorMsg.toLowerCase().includes('closed') ||
errorMsg.toLowerCase().includes('connection')
) {
log(
`${recipient}: Connection error, retrying... ` +
`(attempt ${attempt + 1}/${maxRetries + 1})`,
'WARNING',
workerName,
);
lastError = errorMsg;
await sleep(300);
continue;
}
// Other SMTP errors
const isPerm = isPermanentRecipientError(errorMsg);
log(
`${recipient}: ${errorMsg} (${isPerm ? 'permanent' : 'temporary'})`,
'ERROR',
workerName,
);
return [false, errorMsg, isPerm];
}
}
// All retries exhausted
log(
`${recipient}: All retries failed - ${lastError}`,
'ERROR',
workerName,
);
return [false, lastError ?? 'Connection failed after retries', false];
}
/** Verify the transport is reachable (used during startup). */
async verify(): Promise<boolean> {
try {
await this.transport.verify();
return true;
} catch {
return false;
}
}
/** Close all pooled connections. */
close(): void {
this.transport.close();
}
}
// ---------------------------------------------------------------------------
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}

View File

@@ -0,0 +1,151 @@
/**
* Domain queue poller
*
* One poller per domain. Runs an async loop that long-polls SQS
* and dispatches messages to the MessageProcessor.
*
* Replaces Python's threading.Thread + threading.Event with
* a simple async loop + AbortController for cancellation.
*/
import type { SQSHandler } from '../aws/sqs.js';
import type { MetricsCollector } from '../metrics.js';
import { log } from '../logger.js';
import { MessageProcessor } from './message-processor.js';
export interface DomainPollerStats {
domain: string;
processed: number;
errors: number;
lastActivity: Date | null;
running: boolean;
}
export class DomainPoller {
public stats: DomainPollerStats;
private abort: AbortController;
private loopPromise: Promise<void> | null = null;
constructor(
private domain: string,
private queueUrl: string,
private sqs: SQSHandler,
private processor: MessageProcessor,
private metrics: MetricsCollector | null,
) {
this.abort = new AbortController();
this.stats = {
domain,
processed: 0,
errors: 0,
lastActivity: null,
running: false,
};
}
/** Start the polling loop. Returns immediately. */
start(): void {
if (this.stats.running) return;
this.stats.running = true;
log(`▶ Started poller for ${this.domain}`, 'INFO', `poller-${this.domain}`);
this.loopPromise = this.pollLoop();
}
/** Signal the poller to stop and wait for it to finish. */
async stop(): Promise<void> {
if (!this.stats.running) return;
this.abort.abort();
if (this.loopPromise) {
await this.loopPromise;
}
this.stats.running = false;
log(`⏹ Stopped poller for ${this.domain}`, 'INFO', `poller-${this.domain}`);
}
// -----------------------------------------------------------------------
// Poll loop
// -----------------------------------------------------------------------
private async pollLoop(): Promise<void> {
const workerName = `poller-${this.domain}`;
while (!this.abort.signal.aborted) {
try {
// Report queue size
const qSize = await this.sqs.getQueueSize(this.queueUrl);
this.metrics?.setQueueSize(this.domain, qSize);
if (qSize > 0) {
log(`📊 Queue ${this.domain}: ~${qSize} messages`, 'INFO', workerName);
}
// Long-poll
const messages = await this.sqs.receiveMessages(this.queueUrl);
if (this.abort.signal.aborted) break;
if (messages.length === 0) continue;
log(
`📬 Received ${messages.length} message(s) for ${this.domain}`,
'INFO',
workerName,
);
// Process each message
for (const msg of messages) {
if (this.abort.signal.aborted) break;
const receiveCount = parseInt(
msg.Attributes?.ApproximateReceiveCount ?? '1',
10,
);
this.metrics?.incrementInFlight();
const start = Date.now();
try {
const shouldDelete = await this.processor.processMessage(
this.domain,
msg,
receiveCount,
);
if (shouldDelete && msg.ReceiptHandle) {
await this.sqs.deleteMessage(this.queueUrl, msg.ReceiptHandle);
}
this.stats.processed++;
this.stats.lastActivity = new Date();
const elapsed = ((Date.now() - start) / 1000).toFixed(2);
this.metrics?.observeProcessingTime(this.domain, parseFloat(elapsed));
} catch (err: any) {
this.stats.errors++;
log(
`❌ Error processing message: ${err.message ?? err}`,
'ERROR',
workerName,
);
} finally {
this.metrics?.decrementInFlight();
}
}
} catch (err: any) {
if (this.abort.signal.aborted) break;
this.stats.errors++;
log(
`❌ Polling error for ${this.domain}: ${err.message ?? err}`,
'ERROR',
workerName,
);
// Back off on repeated errors
await sleep(5000);
}
}
}
}
// ---------------------------------------------------------------------------
function sleep(ms: number): Promise<void> {
return new Promise((r) => setTimeout(r, ms));
}

View File

@@ -0,0 +1,361 @@
/**
* Email message processing worker
*
* Processes a single SQS message:
* 1. Unpack SNS/SES envelope
* 2. Download raw email from S3
* 3. Loop detection
* 4. Parse & sanitize headers
* 5. Bounce detection & header rewrite
* 6. Blocklist check
* 7. Process recipients (rules, SMTP delivery)
* 8. Mark result in S3 metadata
*/
import type { Message } from '@aws-sdk/client-sqs';
import type { S3Handler } from '../aws/s3.js';
import type { SQSHandler } from '../aws/sqs.js';
import type { SESHandler } from '../aws/ses.js';
import type { DynamoDBHandler } from '../aws/dynamodb.js';
import type { EmailDelivery } from '../smtp/delivery.js';
import type { MetricsCollector } from '../metrics.js';
import type { ParsedMail } from 'mailparser';
import { log } from '../logger.js';
import { BlocklistChecker } from '../email/blocklist.js';
import { BounceHandler } from '../email/bounce-handler.js';
import { parseEmail, isProcessedByWorker } from '../email/parser.js';
import { RulesProcessor } from '../email/rules-processor.js';
import { config } from '../config.js';
// ---------------------------------------------------------------------------
// Processor
// ---------------------------------------------------------------------------
export class MessageProcessor {
private bounceHandler: BounceHandler;
private rulesProcessor: RulesProcessor;
private blocklist: BlocklistChecker;
public metrics: MetricsCollector | null = null;
constructor(
private s3: S3Handler,
private sqs: SQSHandler,
private ses: SESHandler,
private dynamodb: DynamoDBHandler,
private delivery: EmailDelivery,
) {
this.bounceHandler = new BounceHandler(dynamodb);
this.rulesProcessor = new RulesProcessor(dynamodb, ses);
this.blocklist = new BlocklistChecker(dynamodb);
}
/**
* Process one email message from queue.
* Returns true → delete from queue.
* Returns false → leave in queue for retry.
*/
async processMessage(
domain: string,
message: Message,
receiveCount: number,
): Promise<boolean> {
const workerName = `worker-${domain}`;
try {
// 1. UNPACK (SNS → SES)
const body = JSON.parse(message.Body ?? '{}');
let sesMsg: any;
if (body.Message && body.Type) {
// SNS Notification wrapper
const snsContent = body.Message;
sesMsg = typeof snsContent === 'string' ? JSON.parse(snsContent) : snsContent;
} else {
sesMsg = body;
}
// 2. EXTRACT DATA
const mail = sesMsg.mail ?? {};
const receipt = sesMsg.receipt ?? {};
const messageId: string | undefined = mail.messageId;
// Ignore SES setup notifications
if (messageId === 'AMAZON_SES_SETUP_NOTIFICATION') {
log(' Received Amazon SES Setup Notification. Ignoring.', 'INFO', workerName);
return true;
}
const fromAddr: string = mail.source ?? '';
const recipients: string[] = receipt.recipients ?? [];
if (!messageId) {
log('❌ Error: No messageId in event payload', 'ERROR', workerName);
return true;
}
// Domain validation
if (recipients.length === 0) {
log('⚠ Warning: No recipients in event', 'WARNING', workerName);
return true;
}
const recipientDomain = recipients[0].split('@')[1];
if (recipientDomain.toLowerCase() !== domain.toLowerCase()) {
log(
`⚠ Security: Ignored message for ${recipientDomain} ` +
`(I am worker for ${domain})`,
'WARNING',
workerName,
);
return true;
}
// Compact log
const recipientsStr =
recipients.length === 1
? recipients[0]
: `${recipients.length} recipients`;
log(
`📧 Processing: ${messageId.slice(0, 20)}... -> ${recipientsStr}`,
'INFO',
workerName,
);
// 3. DOWNLOAD FROM S3
const rawBytes = await this.s3.getEmail(domain, messageId, receiveCount);
if (rawBytes === null) return false; // retry later
// 4. LOOP DETECTION
const tempParsed = await parseEmail(rawBytes);
const skipRules = isProcessedByWorker(tempParsed);
if (skipRules) {
log('🔄 Loop prevention: Already processed by worker', 'INFO', workerName);
}
// 5. PARSING & BOUNCE LOGIC
let finalRawBytes = rawBytes;
let fromAddrFinal = fromAddr;
let isBounce = false;
let parsedFinal: ParsedMail | null = null; // <-- Hier deklarieren
try {
const parsed = await parseEmail(rawBytes);
const subject = parsed.subject ?? '(no subject)';
// Bounce header rewriting
const bounceResult = await this.bounceHandler.applyBounceLogic(
parsed,
rawBytes,
subject,
workerName,
);
isBounce = bounceResult.isBounce;
finalRawBytes = bounceResult.rawBytes;
if (bounceResult.modified) {
log(' ✨ Bounce detected & headers rewritten via DynamoDB', 'INFO', workerName);
fromAddrFinal = bounceResult.fromAddr;
this.metrics?.incrementBounce(domain, 'rewritten');
} else {
fromAddrFinal = fromAddr;
}
// Add processing marker for non-processed emails
if (!skipRules) {
finalRawBytes = addProcessedHeader(finalRawBytes);
}
// Re-parse after modifications for rules processing
parsedFinal = await parseEmail(finalRawBytes);
} catch (err: any) {
log(
`⚠ Parsing/Logic Error: ${err.message ?? err}. Sending original RAW mail without rules.`,
'WARNING',
workerName,
);
log(`Full error: ${err.stack ?? err}`, 'ERROR', workerName);
fromAddrFinal = fromAddr;
isBounce = false;
parsedFinal = null; // <-- GANZ WICHTIG: Kein erneuter Parse-Versuch!
}
// 6. BLOCKLIST CHECK
const sendersToCheck: string[] = [];
if (fromAddrFinal) sendersToCheck.push(fromAddrFinal);
const headerFrom = parsedFinal?.from?.text;
if (headerFrom && !sendersToCheck.includes(headerFrom)) {
sendersToCheck.push(headerFrom);
}
const blockedByRecipient = await this.blocklist.batchCheckBlockedSenders(
recipients,
sendersToCheck, // <-- Array übergeben
workerName,
);
// 7. PROCESS RECIPIENTS
log(`📤 Sending to ${recipients.length} recipient(s)...`, 'INFO', workerName);
const successful: string[] = [];
const failedPermanent: string[] = [];
const failedTemporary: string[] = [];
const blockedRecipients: string[] = [];
for (const recipient of recipients) {
// Blocked?
if (blockedByRecipient[recipient]) {
log(
`🗑 Silently dropping message for ${recipient} (Sender blocked)`,
'INFO',
workerName,
);
blockedRecipients.push(recipient);
this.metrics?.incrementBlocked(domain);
continue;
}
// Process rules (OOO, Forwarding) — not for bounces or already forwarded
if (!isBounce && !skipRules && parsedFinal !== null) {
const metricsCallback = (action: 'autoreply' | 'forward', dom: string) => {
if (action === 'autoreply') this.metrics?.incrementAutoreply(dom);
else if (action === 'forward') this.metrics?.incrementForward(dom);
};
await this.rulesProcessor.processRulesForRecipient(
recipient,
parsedFinal,
finalRawBytes,
domain,
workerName,
metricsCallback,
);
}
// SMTP delivery
const [success, error, isPerm] = await this.delivery.sendToRecipient(
fromAddrFinal,
recipient,
finalRawBytes,
workerName,
);
if (success) {
successful.push(recipient);
this.metrics?.incrementProcessed(domain, 'success');
} else if (isPerm) {
failedPermanent.push(recipient);
this.metrics?.incrementProcessed(domain, 'permanent_failure');
} else {
failedTemporary.push(recipient);
this.metrics?.incrementProcessed(domain, 'temporary_failure');
}
}
// 8. RESULT & CLEANUP
const totalHandled =
successful.length + failedPermanent.length + blockedRecipients.length;
if (totalHandled === recipients.length) {
if (blockedRecipients.length === recipients.length) {
// All blocked — im Standby kein S3 anfassen
if (!config.standbyMode) {
try {
await this.s3.markAsBlocked(
domain,
messageId,
blockedRecipients,
fromAddrFinal,
workerName,
);
await this.s3.deleteBlockedEmail(domain, messageId, workerName);
} catch (err: any) {
log(`⚠ Failed to handle blocked email: ${err.message ?? err}`, 'ERROR', workerName);
return false;
}
}
} else if (successful.length > 0) {
if (!config.standbyMode) {
await this.s3.markAsProcessed(
domain,
messageId,
workerName,
failedPermanent.length > 0 ? failedPermanent : undefined,
);
}
} else if (failedPermanent.length > 0) {
if (!config.standbyMode) {
await this.s3.markAsAllInvalid(
domain,
messageId,
failedPermanent,
workerName,
);
}
}
// Summary
const parts: string[] = [];
if (successful.length) parts.push(`${successful.length} OK`);
if (failedPermanent.length) parts.push(`${failedPermanent.length} invalid`);
if (blockedRecipients.length) parts.push(`${blockedRecipients.length} blocked`);
log(`✅ Completed (${parts.join(', ')})`, 'SUCCESS', workerName);
return true;
} else {
// Temporary failures remain
log(
`🔄 Temp failure (${failedTemporary.length} failed), will retry`,
'WARNING',
workerName,
);
return false;
}
} catch (err: any) {
log(`❌ CRITICAL WORKER ERROR: ${err.message ?? err}`, 'ERROR', workerName);
log(err.stack ?? '', 'ERROR', workerName);
return false;
}
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
/**
* Add X-SES-Worker-Processed header to raw email bytes using Buffer manipulation.
* More robust and memory efficient than toString().
*/
function addProcessedHeader(raw: Buffer): Buffer {
// Wir suchen nach dem Ende der Header: Double Newline (\r\n\r\n oder \n\n)
let headerEndIndex = -1;
// Effiziente Suche im Buffer
for (let i = 0; i < raw.length - 3; i++) {
// Check für \r\n\r\n
if (raw[i] === 0x0d && raw[i+1] === 0x0a && raw[i+2] === 0x0d && raw[i+3] === 0x0a) {
headerEndIndex = i;
break;
}
// Check für \n\n (Unix style, seltener bei E-Mail aber möglich)
if (raw[i] === 0x0a && raw[i+1] === 0x0a) {
headerEndIndex = i;
break;
}
}
// Falls keine Header-Trennung gefunden wurde (kaputte Mail?), hängen wir es einfach vorne an
if (headerEndIndex === -1) {
const headerLine = Buffer.from('X-SES-Worker-Processed: delivered\r\n', 'utf-8');
return Buffer.concat([headerLine, raw]);
}
// Wir fügen den Header VOR der leeren Zeile ein
const before = raw.subarray(0, headerEndIndex);
const after = raw.subarray(headerEndIndex);
const newHeader = Buffer.from('\r\nX-SES-Worker-Processed: delivered', 'utf-8');
return Buffer.concat([before, newHeader, after]);
}

View File

@@ -0,0 +1,134 @@
/**
* Unified multi-domain worker coordinator
*
* Manages the lifecycle of all DomainPoller instances:
* - Resolves SQS queue URLs for each domain
* - Creates pollers for valid domains
* - Provides aggregate stats
* - Graceful shutdown
*/
import { DynamoDBHandler } from '../aws/dynamodb';
import { S3Handler} from '../aws/s3.js';
import { SQSHandler} from '../aws/sqs.js'
import { SESHandler } from '../aws/ses';
import { EmailDelivery } from '../smtp/delivery.js';
import { MessageProcessor } from './message-processor.js';
import { DomainPoller, type DomainPollerStats } from './domain-poller.js';
import type { MetricsCollector } from '../metrics.js';
import { log } from '../logger.js';
export class UnifiedWorker {
private pollers: DomainPoller[] = [];
private processor: MessageProcessor;
private sqs: SQSHandler;
private statusInterval: NodeJS.Timeout | null = null;
constructor(
private domains: string[],
private metrics: MetricsCollector | null,
) {
const s3 = new S3Handler();
this.sqs = new SQSHandler();
const ses = new SESHandler();
const dynamodb = new DynamoDBHandler();
const delivery = new EmailDelivery();
this.processor = new MessageProcessor(s3, this.sqs, ses, dynamodb, delivery);
this.processor.metrics = metrics;
dynamodb.verifyTables().catch(() => {});
}
async start(): Promise<void> {
log(`🚀 Starting unified worker for ${this.domains.length} domain(s)...`);
const resolvedPollers: DomainPoller[] = [];
for (const domain of this.domains) {
const queueUrl = await this.sqs.getQueueUrl(domain);
if (!queueUrl) {
log(`⚠ Skipping ${domain}: No SQS queue found`, 'WARNING');
continue;
}
const poller = new DomainPoller(
domain,
queueUrl,
this.sqs,
this.processor,
this.metrics,
);
resolvedPollers.push(poller);
}
if (resolvedPollers.length === 0) {
log('❌ No valid domains with SQS queues found. Exiting.', 'ERROR');
process.exit(1);
}
this.pollers = resolvedPollers;
for (const poller of this.pollers) {
poller.start();
}
log(
`✅ All ${this.pollers.length} domain poller(s) running: ` +
this.pollers.map((p) => p.stats.domain).join(', '),
'SUCCESS',
);
// Starte den 5-Minuten-Status-Report
this.statusInterval = setInterval(() => {
this.printStatus();
}, 5 * 60 * 1000);
}
async stop(): Promise<void> {
log('🛑 Stopping all domain pollers...');
if (this.statusInterval) clearInterval(this.statusInterval); // <-- Neue Zeile
await Promise.all(this.pollers.map((p) => p.stop()));
log('✅ All pollers stopped.');
}
getStats(): {
totalProcessed: number;
totalErrors: number;
domains: DomainPollerStats[];
} {
let totalProcessed = 0;
let totalErrors = 0;
const domains: DomainPollerStats[] = [];
for (const p of this.pollers) {
totalProcessed += p.stats.processed;
totalErrors += p.stats.errors;
domains.push({ ...p.stats });
}
return { totalProcessed, totalErrors, domains };
}
private printStatus(): void {
const stats = this.getStats();
// Zähle aktive Poller
const activePollers = this.pollers.filter((p) => p.stats.running).length;
const totalPollers = this.pollers.length;
// Formatiere die Domain-Statistiken (z.B. hotshpotshga:1)
const domainStats = stats.domains
.map((d) => {
const shortName = d.domain.split('.')[0].substring(0, 12);
return `${shortName}:${d.processed}`;
})
.join(' | ');
log(
`📊 Status: ${activePollers}/${totalPollers} active, total:${stats.totalProcessed} | ${domainStats}`,
'INFO',
'unified-worker'
);
}
}

View File

@@ -0,0 +1,22 @@
{
"compilerOptions": {
"target": "ES2022",
"module": "Node16",
"moduleResolution": "Node16",
"lib": ["ES2022"],
"outDir": "./dist",
"rootDir": "./src",
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"resolveJsonModule": true,
"declaration": true,
"declarationMap": true,
"sourceMap": true,
"noUnusedLocals": false,
"noUnusedParameters": false
},
"include": ["src/**/*"],
"exclude": ["node_modules", "dist"]
}

View File

@@ -0,0 +1,97 @@
import json
import boto3
import os
from datetime import datetime
# AWS Clients
s3 = boto3.client('s3')
sqs = boto3.client('sqs')
dynamodb = boto3.resource('dynamodb')
# DynamoDB Table
OUTBOUND_TABLE = os.environ.get('OUTBOUND_TABLE', 'ses-outbound-messages')
table = dynamodb.Table(OUTBOUND_TABLE)
def lambda_handler(event, context):
"""
Verarbeitet SES Events:
- Bounce Events: Speichert bounce details in DynamoDB
- Send Events: Ignoriert (nicht mehr benötigt)
"""
print(f"Received event: {json.dumps(event)}")
# SNS Wrapper entpacken
for record in event.get('Records', []):
if 'Sns' in record:
message = json.loads(record['Sns']['Message'])
else:
message = record
event_type = message.get('eventType')
if event_type == 'Bounce':
handle_bounce(message)
elif event_type == 'Send':
# Ignorieren - wird nicht mehr benötigt
print(f"Ignoring Send event (no longer needed)")
else:
print(f"Unknown event type: {event_type}")
return {'statusCode': 200}
def handle_bounce(message):
"""
Verarbeitet Bounce Events und speichert Details in DynamoDB
"""
try:
bounce = message.get('bounce', {})
mail = message.get('mail', {})
# Extrahiere relevante Daten
feedback_id = bounce.get('feedbackId') # Das ist die Message-ID!
bounce_type = bounce.get('bounceType', 'Unknown')
bounce_subtype = bounce.get('bounceSubType', 'Unknown')
bounced_recipients = [r['emailAddress'] for r in bounce.get('bouncedRecipients', [])]
timestamp = bounce.get('timestamp')
# Original Message Daten
original_source = mail.get('source')
original_message_id = mail.get('messageId')
if not feedback_id:
print(f"Warning: No feedbackId in bounce event")
return
print(f"Processing bounce: feedbackId={feedback_id}, type={bounce_type}/{bounce_subtype}")
print(f"Bounced recipients: {bounced_recipients}")
# Speichere in DynamoDB (feedback_id ist die Message-ID der Bounce-Mail!)
table.put_item(
Item={
'MessageId': feedback_id, # Primary Key
'original_message_id': original_message_id, # SES MessageId der Original-Mail
'original_source': original_source,
'bounceType': bounce_type,
'bounceSubType': bounce_subtype,
'bouncedRecipients': bounced_recipients, # Liste von Email-Adressen
'timestamp': timestamp or datetime.utcnow().isoformat(),
'event_type': 'bounce'
}
)
print(f"✓ Stored bounce info for feedbackId {feedback_id}")
except Exception as e:
print(f"Error handling bounce: {e}")
import traceback
traceback.print_exc()
def handle_send(message):
"""
DEPRECATED - Wird nicht mehr benötigt
Send Events werden jetzt ignoriert
"""
pass

View File

@@ -1,419 +0,0 @@
import os
import boto3
import json
import time
from email.parser import BytesParser
from email.policy import SMTP as SMTPPolicy
s3 = boto3.client('s3')
sqs = boto3.client('sqs', region_name='us-east-2')
# AWS Region
AWS_REGION = 'us-east-2'
# Dynamo Table
dynamo = boto3.resource('dynamodb', region_name=AWS_REGION)
msg_table = dynamo.Table('ses-outbound-messages')
# Metadata Keys
PROCESSED_KEY = 'processed'
PROCESSED_VALUE = 'true'
def is_ses_bounce_or_autoreply(parsed):
"""Erkennt SES Bounces und Auto-Replies"""
from_h = (parsed.get('From') or '').lower()
auto_sub = (parsed.get('Auto-Submitted') or '').lower()
# SES MAILER-DAEMON oder Auto-Submitted Header
is_mailer_daemon = 'mailer-daemon@' in from_h and 'amazonses.com' in from_h
is_auto_replied = 'auto-replied' in auto_sub or 'auto-generated' in auto_sub
return is_mailer_daemon or is_auto_replied
def extract_original_message_id(parsed):
"""Extrahiert die ursprüngliche Message-ID aus In-Reply-To oder References"""
# Versuche In-Reply-To
in_reply_to = (parsed.get('In-Reply-To') or '').strip()
if in_reply_to:
msg_id = in_reply_to
if msg_id.startswith('<') and '>' in msg_id:
msg_id = msg_id[1:msg_id.find('>')]
# ✅ WICHTIG: Entferne @amazonses.com Suffix falls vorhanden
if '@' in msg_id:
msg_id = msg_id.split('@')[0]
return msg_id
# Fallback: References Header (nimm die ERSTE ID)
refs = (parsed.get('References') or '').strip()
if refs:
first_ref = refs.split()[0]
if first_ref.startswith('<') and '>' in first_ref:
first_ref = first_ref[1:first_ref.find('>')]
# ✅ WICHTIG: Entferne @amazonses.com Suffix falls vorhanden
if '@' in first_ref:
first_ref = first_ref.split('@')[0]
return first_ref
return None
def domain_to_bucket(domain: str) -> str:
"""Konvertiert Domain zu S3 Bucket Namen"""
domain = domain.lower()
return domain.replace('.', '-') + '-emails'
def domain_to_queue_name(domain: str) -> str:
"""Konvertiert Domain zu SQS Queue Namen"""
domain = domain.lower()
return domain.replace('.', '-') + '-queue'
def get_queue_url_for_domain(domain: str) -> str:
"""Ermittelt SQS Queue URL für Domain"""
queue_name = domain_to_queue_name(domain)
try:
response = sqs.get_queue_url(QueueName=queue_name)
queue_url = response['QueueUrl']
print(f"✓ Found queue: {queue_name}")
return queue_url
except sqs.exceptions.QueueDoesNotExist:
raise Exception(
f"Queue does not exist: {queue_name} "
f"(for domain: {domain.lower()})"
)
except Exception as e:
raise Exception(f"Error getting queue URL for {domain.lower()}: {e}")
def is_already_processed(bucket: str, key: str) -> bool:
"""Prüft ob E-Mail bereits verarbeitet wurde"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
if metadata.get(PROCESSED_KEY) == PROCESSED_VALUE:
processed_at = metadata.get('processed_at', 'unknown')
print(f"✓ Already processed at {processed_at}")
return True
return False
except s3.exceptions.NoSuchKey:
print(f"⚠ Object {key} not found in {bucket}")
return True
except Exception as e:
print(f"⚠ Error checking processed status: {e}")
return False
def set_processing_lock(bucket: str, key: str) -> bool:
"""
Setzt Processing Lock um Duplicate Processing zu verhindern
Returns: True wenn Lock erfolgreich gesetzt, False wenn bereits locked
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
# Prüfe auf existierenden Lock
processing_started = metadata.get('processing_started')
if processing_started:
lock_age = time.time() - float(processing_started)
if lock_age < 300: # 5 Minuten Lock
print(f"⚠ Processing lock active (age: {lock_age:.0f}s)")
return False
else:
print(f"⚠ Stale lock detected ({lock_age:.0f}s old), overriding")
# Setze neuen Lock
new_meta = metadata.copy()
new_meta['processing_started'] = str(int(time.time()))
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=new_meta,
MetadataDirective='REPLACE'
)
print(f"✓ Processing lock set")
return True
except Exception as e:
print(f"⚠ Error setting processing lock: {e}")
return True
def mark_as_queued(bucket: str, key: str, queue_name: str):
"""Markiert E-Mail als in Queue eingereiht"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['queued_at'] = str(int(time.time()))
metadata['queued_to'] = queue_name
metadata['status'] = 'queued'
metadata.pop('processing_started', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
print(f"✓ Marked as queued to {queue_name}")
except Exception as e:
print(f"⚠ Failed to mark as queued: {e}")
def send_to_queue(queue_url: str, bucket: str, key: str,
from_addr: str, recipients: list, domain: str,
subject: str, message_id: str):
"""
Sendet E-Mail-Job in domain-spezifische SQS Queue
EINE Message mit ALLEN Recipients für diese Domain
"""
queue_name = queue_url.split('/')[-1]
message = {
'bucket': bucket,
'key': key,
'from': from_addr,
'recipients': recipients,
'domain': domain,
'subject': subject,
'message_id': message_id,
'timestamp': int(time.time())
}
try:
response = sqs.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(message, ensure_ascii=False),
MessageAttributes={
'domain': {
'StringValue': domain,
'DataType': 'String'
},
'bucket': {
'StringValue': bucket,
'DataType': 'String'
},
'recipient_count': {
'StringValue': str(len(recipients)),
'DataType': 'Number'
},
'message_id': {
'StringValue': message_id,
'DataType': 'String'
}
}
)
sqs_message_id = response['MessageId']
print(f"✓ Queued to {queue_name}: SQS MessageId={sqs_message_id}")
print(f" Recipients: {len(recipients)} - {', '.join(recipients)}")
mark_as_queued(bucket, key, queue_name)
return sqs_message_id
except Exception as e:
print(f"✗ Failed to queue message: {e}")
raise
def lambda_handler(event, context):
"""
Lambda Handler für SES Inbound Events
"""
print(f"{'='*70}")
print(f"Lambda invoked: {context.aws_request_id}")
print(f"Region: {AWS_REGION}")
print(f"{'='*70}")
# SES Event parsen
try:
record = event['Records'][0]
ses = record['ses']
except (KeyError, IndexError) as e:
print(f"✗ Invalid event structure: {e}")
return {'statusCode': 400, 'body': json.dumps({'error': 'Invalid SES event'})}
mail = ses['mail']
receipt = ses['receipt']
message_id = mail['messageId']
source = mail['source']
timestamp = mail.get('timestamp', '')
recipients = receipt.get('recipients', [])
print(f"\n🔑 S3 Key: {message_id}")
print(f"👥 Recipients ({len(recipients)}): {', '.join(recipients)}")
if not recipients:
print(f"✗ No recipients found in event")
return {'statusCode': 400, 'body': json.dumps({'error': 'No recipients'})}
# Domain extrahieren
domain = recipients[0].split('@')[1].lower()
bucket = domain_to_bucket(domain)
print(f"\n📧 Email Event:")
print(f" MessageId: {message_id}")
print(f" From: {source}")
print(f" Domain: {domain}")
print(f" Bucket: {bucket}")
# Queue ermitteln
try:
queue_url = get_queue_url_for_domain(domain)
queue_name = queue_url.split('/')[-1]
except Exception as e:
print(f"\n✗ Queue ERROR: {e}")
return {'statusCode': 500, 'body': json.dumps({'error': str(e)})}
# S3 Object finden
try:
response = s3.list_objects_v2(Bucket=bucket, Prefix=message_id, MaxKeys=1)
if 'Contents' not in response:
raise Exception(f"No S3 object found for {message_id}")
key = response['Contents'][0]['Key']
size = response['Contents'][0]['Size']
print(f" Found: s3://{bucket}/{key} ({size/1024:.1f} KB)")
except Exception as e:
print(f"\n✗ S3 ERROR: {e}")
return {'statusCode': 404, 'body': json.dumps({'error': str(e)})}
# Duplicate Check
if is_already_processed(bucket, key):
return {'statusCode': 200, 'body': json.dumps({'status': 'already_processed'})}
# Processing Lock
if not set_processing_lock(bucket, key):
return {'statusCode': 200, 'body': json.dumps({'status': 'already_processing'})}
# E-Mail laden und ggf. umschreiben
subject = '(unknown)'
modified = False
try:
print(f"\n📖 Reading email...")
obj = s3.get_object(Bucket=bucket, Key=key)
raw_bytes = obj['Body'].read()
metadata = obj.get('Metadata', {}) or {}
parsed = BytesParser(policy=SMTPPolicy).parsebytes(raw_bytes)
subject = parsed.get('Subject', '(no subject)')
print(f" Subject: {subject}")
# 🔁 Auto-Response / Bounce Detection
if is_ses_bounce_or_autoreply(parsed):
print(f" 🔍 Detected auto-response/bounce from SES")
# Extrahiere ursprüngliche Message-ID
original_msg_id = extract_original_message_id(parsed)
if original_msg_id:
print(f" 📋 Original MessageId: {original_msg_id}")
try:
# Hole Original-Send aus DynamoDB
result = msg_table.get_item(Key={'MessageId': original_msg_id})
original_send = result.get('Item')
if original_send:
orig_source = original_send.get('source', '')
orig_destinations = original_send.get('destinations', [])
print(f" ✓ Found original send:")
print(f" Original From: {orig_source}")
print(f" Original To: {orig_destinations}")
# **WICHTIG**: Der erste Empfänger war der eigentliche Empfänger
original_recipient = orig_destinations[0] if orig_destinations else ''
if original_recipient:
# Absender umschreiben auf ursprünglichen Empfänger
original_from = parsed.get('From', '')
parsed['X-Original-SES-From'] = original_from
parsed['X-Original-MessageId'] = original_msg_id
# **From auf den ursprünglichen Empfänger setzen**
parsed.replace_header('From', original_recipient)
# Reply-To optional beibehalten
if not parsed.get('Reply-To'):
parsed['Reply-To'] = original_recipient
# Subject anpassen falls nötig
if 'delivery status notification' in subject.lower():
parsed.replace_header('Subject', f"Delivery Status: {orig_destinations[0]}")
raw_bytes = parsed.as_bytes()
modified = True
print(f" ✅ Rewritten: From={original_recipient}")
else:
print(f" ⚠ No DynamoDB record found for {original_msg_id}")
except Exception as e:
print(f" ⚠ DynamoDB lookup failed: {e}")
else:
print(f" ⚠ Could not extract original Message-ID")
# S3 aktualisieren falls modified
if modified:
s3.put_object(Bucket=bucket, Key=key, Body=raw_bytes, Metadata=metadata)
print(f" 💾 Updated S3 object with rewritten email")
except Exception as e:
print(f" ⚠ Email parsing error: {e}")
# In Queue einreihen
try:
sqs_message_id = send_to_queue(
queue_url=queue_url,
bucket=bucket,
key=key,
from_addr=source,
recipients=recipients,
domain=domain,
subject=subject,
message_id=message_id
)
print(f"\n✅ SUCCESS - Queued for delivery\n")
return {
'statusCode': 200,
'body': json.dumps({
'status': 'queued',
'message_id': message_id,
'sqs_message_id': sqs_message_id,
'modified': modified
})
}
except Exception as e:
print(f"\n✗ QUEUE FAILED: {e}")
return {'statusCode': 500, 'body': json.dumps({'error': str(e)})}

View File

@@ -1,74 +0,0 @@
import boto3
import os
dynamo = boto3.resource('dynamodb', region_name='us-east-2')
table = dynamo.Table('ses-outbound-messages')
def lambda_handler(event, context):
print(f"Received event: {event}")
detail = event.get('detail', {})
mail = detail.get('mail', {})
msg_id = mail.get('messageId')
if not msg_id:
print("No MessageId in event")
return
# Event-Type aus dem Event extrahieren
event_type = detail.get('eventType')
if event_type == 'Send':
source = mail.get('source')
destinations = mail.get('destination', [])
table.put_item(
Item={
'MessageId': msg_id,
'source': source,
'destinations': destinations,
'timestamp': mail.get('timestamp')
}
)
print(f"Stored SEND event for {msg_id}")
return
if event_type == 'Bounce':
bounce = detail.get('bounce', {})
bounced = [
r.get('emailAddress')
for r in bounce.get('bouncedRecipients', [])
if r.get('emailAddress')
]
if not bounced:
print("No bouncedRecipients in bounce event")
return
table.update_item(
Key={'MessageId': msg_id},
UpdateExpression="ADD bouncedRecipients :b",
ExpressionAttributeValues={
':b': set(bounced)
}
)
print(f"Updated {msg_id} with bouncedRecipients={bounced}")
return
if event_type == 'Complaint':
complaint = detail.get('complaint', {})
complained = [
r.get('emailAddress')
for r in complaint.get('complainedRecipients', [])
if r.get('emailAddress')
]
if not complained:
return
table.update_item(
Key={'MessageId': msg_id},
UpdateExpression="ADD complaintRecipients :c",
ExpressionAttributeValues={
':c': set(complained)
}
)
print(f"Updated {msg_id} with complaintRecipients={complained}")
return

View File

@@ -1,23 +0,0 @@
#!/bin/bash
# manage-worker.sh
DOMAIN=$1
ACTION=${2:-up -d} # Default: up -d
if [ -z "$DOMAIN" ]; then
echo "Usage: $0 <domain> [action]"
echo "Example: $0 andreasknuth.de"
echo " $0 andreasknuth.de down"
echo " $0 andreasknuth.de logs -f"
exit 1
fi
PROJECT_NAME="${DOMAIN//./-}"
ENV_FILE=".env.${DOMAIN}"
if [ ! -f "$ENV_FILE" ]; then
echo "Error: $ENV_FILE not found!"
exit 1
fi
docker compose -p "$PROJECT_NAME" --env-file "$ENV_FILE" $ACTION

View File

@@ -1,106 +0,0 @@
#!/usr/bin/env python3
# monitor-queues.py
"""
Überwacht alle Email-Queues und zeigt Statistiken
"""
import boto3
import json
from datetime import datetime
sqs = boto3.client('sqs', region_name='eu-central-1')
DOMAINS = ['andreasknuth.de', 'bizmatch.net']
def get_queue_stats(domain):
"""Zeigt Queue-Statistiken für eine Domain"""
queue_name = domain.replace('.', '-') + '-queue'
dlq_name = queue_name + '-dlq'
try:
# Main Queue URL
queue_url = sqs.get_queue_url(QueueName=queue_name)['QueueUrl']
# Queue Attributes
attrs = sqs.get_queue_attributes(
QueueUrl=queue_url,
AttributeNames=['All']
)['Attributes']
# DLQ URL
dlq_url = sqs.get_queue_url(QueueName=dlq_name)['QueueUrl']
# DLQ Attributes
dlq_attrs = sqs.get_queue_attributes(
QueueUrl=dlq_url,
AttributeNames=['ApproximateNumberOfMessages']
)['Attributes']
return {
'domain': domain,
'queue': {
'available': int(attrs.get('ApproximateNumberOfMessages', 0)),
'in_flight': int(attrs.get('ApproximateNumberOfMessagesNotVisible', 0)),
'oldest_age': int(attrs.get('ApproximateAgeOfOldestMessage', 0))
},
'dlq': {
'count': int(dlq_attrs.get('ApproximateNumberOfMessages', 0))
}
}
except Exception as e:
return {
'domain': domain,
'error': str(e)
}
def main():
print(f"\n{'='*70}")
print(f"Email Queue Monitoring - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"{'='*70}\n")
total_available = 0
total_in_flight = 0
total_dlq = 0
for domain in DOMAINS:
stats = get_queue_stats(domain)
if 'error' in stats:
print(f"{domain}: {stats['error']}")
continue
queue = stats['queue']
dlq = stats['dlq']
total_available += queue['available']
total_in_flight += queue['in_flight']
total_dlq += dlq['count']
status = "" if dlq['count'] == 0 else "⚠️"
print(f"{status} {domain}")
print(f" Available: {queue['available']:>5} messages")
print(f" In Flight: {queue['in_flight']:>5} messages")
print(f" Oldest Age: {queue['oldest_age']:>5}s")
print(f" DLQ: {dlq['count']:>5} messages")
if dlq['count'] > 0:
print(f" ⚠️ WARNING: {dlq['count']} failed message(s) in DLQ!")
print()
print(f"{'='*70}")
print(f"TOTALS:")
print(f" Available: {total_available} messages")
print(f" In Flight: {total_in_flight} messages")
print(f" Failed: {total_dlq} messages")
print(f"{'='*70}\n")
if total_dlq > 0:
print(f"⚠️ Action required: {total_dlq} message(s) in Dead Letter Queues!")
print(f" Run: python check-dlq.py to investigate\n")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,36 @@
services:
prometheus:
image: prom/prometheus:latest
container_name: prometheus
restart: unless-stopped
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
ports:
- "9091:9090"
extra_hosts:
- "host.docker.internal:host-gateway" # Damit er deinen Worker findet
grafana:
image: grafana/grafana:latest
container_name: grafana
restart: unless-stopped
volumes:
- grafana_data:/var/lib/grafana
ports:
- "4000:3000"
depends_on:
- prometheus
blackbox_exporter:
image: prom/blackbox-exporter:latest
container_name: blackbox_exporter
restart: unless-stopped
ports:
- "9115:9115"
extra_hosts: # <-- Diese Zeile neu
- "host.docker.internal:host-gateway" # <-- Diese Zeile neu
volumes:
prometheus_data:
grafana_data:

25
monitoring/prometheus.yml Normal file
View File

@@ -0,0 +1,25 @@
global:
scrape_interval: 15s
scrape_configs:
# 1. Scraping deines Node.js Email-Workers
- job_name: 'email-worker'
static_configs:
- targets: ['host.docker.internal:9000']
# 2. Port-Überwachung deines Mailservers (IMAP 993 & POP3 995)
- job_name: 'mailserver_ports'
metrics_path: /probe
params:
module: [tcp_connect] # Prüft nur, ob der TCP-Port offen ist
static_configs:
- targets:
- host.docker.internal:993 # IMAPS
- host.docker.internal:995 # POP3S
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox_exporter:9115 # Der Exporter führt den Check aus

View File

@@ -0,0 +1,76 @@
#!/usr/bin/env python3
import boto3
from datetime import datetime
# Region fest auf us-east-2
sqs = boto3.client('sqs', region_name='us-east-2')
def get_all_queues():
"""Findet automatisch alle Queues, die auf '-queue' enden (keine DLQs)"""
queues = []
paginator = sqs.get_paginator('list_queues')
for page in paginator.paginate():
for url in page.get('QueueUrls', []):
name = url.split('/')[-1]
if name.endswith('-queue'):
queues.append((name, url))
return queues
def main():
print(f"\n{'='*70}")
print(f"Email Queue Monitoring (us-east-2) - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"{'='*70}\n")
queues = get_all_queues()
if not queues:
print("No queues found matching '*-queue'. Check your region or permissions.")
return
# Sortieren für schönere Ausgabe
queues.sort(key=lambda x: x[0])
for name, url in queues:
dlq_name = name + '-dlq'
try:
# Main Queue Stats - NUR gültige Attribute abfragen
attrs = sqs.get_queue_attributes(
QueueUrl=url,
AttributeNames=['ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesNotVisible']
)['Attributes']
# DLQ Stats (Versuch URL zu finden)
try:
dlq_url = sqs.get_queue_url(QueueName=dlq_name)['QueueUrl']
dlq_attrs = sqs.get_queue_attributes(QueueUrl=dlq_url, AttributeNames=['ApproximateNumberOfMessages'])['Attributes']
dlq_count = int(dlq_attrs.get('ApproximateNumberOfMessages', 0))
except:
dlq_count = -1 # Keine DLQ gefunden oder Fehler
available = int(attrs.get('ApproximateNumberOfMessages', 0))
flight = int(attrs.get('ApproximateNumberOfMessagesNotVisible', 0))
# Status-Icon Bestimmung
status = ""
if dlq_count > 0: status = "⚠️ " # DLQ nicht leer
if available > 50: status = "🔥" # Stau in der Main Queue
print(f"{status} Queue: {name}")
print(f" Pending: {available:<5} (Waiting for worker)")
print(f" Processing: {flight:<5} (Currently in worker)")
if dlq_count >= 0:
if dlq_count > 0:
print(f" DLQ Errors: \033[91m{dlq_count:<5}\033[0m (In {dlq_name})") # Rot markiert
else:
print(f" DLQ Errors: {dlq_count:<5} (In {dlq_name})")
else:
print(f" DLQ: Not found / No access")
print("-" * 30)
except Exception as e:
print(f"❌ Error checking {name}: {e}")
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,65 @@
#!/usr/bin/env python3
import sys
import boto3
# WICHTIG: Region auf us-east-2 gesetzt
sqs = boto3.client('sqs', region_name='us-east-2')
def requeue_dlq(domain, max_messages=10):
# Namen normalisieren (Punkte zu Bindestrichen)
queue_name = domain.replace('.', '-') + '-queue'
dlq_name = queue_name + '-dlq'
print(f"Connecting to AWS in us-east-2 for domain: {domain}")
try:
# URLs holen
q_url = sqs.get_queue_url(QueueName=queue_name)['QueueUrl']
dlq_url = sqs.get_queue_url(QueueName=dlq_name)['QueueUrl']
except Exception as e:
print(f"❌ Error finding queues: {e}")
return
# Status prüfen
attrs = sqs.get_queue_attributes(QueueUrl=dlq_url, AttributeNames=['ApproximateNumberOfMessages'])
count = int(attrs['Attributes']['ApproximateNumberOfMessages'])
if count == 0:
print(f"✅ No messages in DLQ ({dlq_name}).")
return
print(f"⚠️ Found {count} messages in {dlq_name}")
print(f" Target: {queue_name}")
if input(" Move messages now? (y/n): ").lower() != 'y':
print("Cancelled.")
return
moved = 0
while moved < max_messages:
# Messages holen
resp = sqs.receive_message(
QueueUrl=dlq_url,
MaxNumberOfMessages=10, # Max allowed by AWS per call
WaitTimeSeconds=1
)
msgs = resp.get('Messages', [])
if not msgs:
break
for msg in msgs:
# 1. In Main Queue senden
sqs.send_message(QueueUrl=q_url, MessageBody=msg['Body'])
# 2. Aus DLQ löschen
sqs.delete_message(QueueUrl=dlq_url, ReceiptHandle=msg['ReceiptHandle'])
moved += 1
print(f" ✓ Moved message {msg['MessageId']}")
print(f"✅ Successfully moved {moved} messages.")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python3 requeue-dlq.py <domain>")
sys.exit(1)
requeue_dlq(sys.argv[1])

175
queue_helper/requeue-email.sh Executable file
View File

@@ -0,0 +1,175 @@
#!/bin/bash
# requeue-email.sh - Sendet eine E-Mail aus S3 manuell in die SQS Queue
# Simuliert das SNS-ähnliche Format, das der Worker erwartet (aus Lambda).
# Extrahiert FROM und RECIPIENTS aus der E-Mail, falls nicht angegeben.
set -e
# Parameter prüfen
if [ $# -lt 2 ]; then
echo "Usage: $0 <DOMAIN> <S3_KEY> [FROM] [RECIPIENTS]"
echo ""
echo "RECIPIENTS kann kommagetrennt sein, z.B. user1@domain.com,user2@domain.com"
echo "Falls FROM und RECIPIENTS nicht angegeben, werden sie aus der E-Mail extrahiert."
echo "Example:"
echo " $0 bayarea-cc.com rgskc3d59dqdm6lq1scenpuvdq7ikhi3cqk382g1"
echo " oder mit manuellen Werten: $0 bayarea-cc.com rgskc3d59dqdm6lq1scenpuvdq7ikhi3cqk382g1 sender@example.com user@bayarea-cc.com"
echo ""
exit 1
fi
DOMAIN=$1
S3_KEY=$2
FROM=${3:-}
RECIPIENTS=${4:-}
# Konfiguration
AWS_REGION="us-east-2"
# Bucket-Name aus Domain ableiten
BUCKET="${DOMAIN//./-}-emails"
# Temporäre Datei für E-Mail
TEMP_FILE="/tmp/email-${S3_KEY}.eml"
echo "==================================="
echo "Requeue E-Mail zu SQS Queue"
echo "==================================="
echo "Domain: $DOMAIN"
echo "Bucket: $BUCKET"
echo "S3 Key: $S3_KEY"
# Prüfen ob S3 Object existiert
echo "Prüfe S3 Object..."
if ! aws s3api head-object \
--bucket "$BUCKET" \
--key "$S3_KEY" \
--region "$AWS_REGION" &>/dev/null; then
echo "ERROR: S3 Object nicht gefunden: s3://$BUCKET/$S3_KEY"
exit 1
fi
echo "✓ S3 Object existiert"
# Wenn FROM oder RECIPIENTS nicht angegeben, extrahiere aus E-Mail
if [ -z "$FROM" ] || [ -z "$RECIPIENTS" ]; then
echo "Extrahiere Headers aus E-Mail..."
aws s3 cp "s3://$BUCKET/$S3_KEY" "$TEMP_FILE" --region "$AWS_REGION" --quiet
# Aufruf der separaten Python-Datei
if ! command -v python3 &> /dev/null; then
echo "ERROR: python3 ist nicht installiert."
rm -f "$TEMP_FILE"
exit 1
fi
if [ ! -f "extract_email_headers.py" ]; then
echo "ERROR: extract_email_headers.py nicht gefunden (muss im selben Verzeichnis liegen)."
rm -f "$TEMP_FILE"
exit 1
fi
PARSE_OUTPUT=$(python3 extract_email_headers.py "$TEMP_FILE")
# Output parsen
EXTRACTED_FROM=$(echo "$PARSE_OUTPUT" | grep '^FROM:' | cut -d':' -f2-)
EXTRACTED_RECIPIENTS=$(echo "$PARSE_OUTPUT" | grep '^RECIPIENTS:' | cut -d':' -f2-)
# Verwende extrahierte Werte, falls nicht angegeben
FROM=${FROM:-$EXTRACTED_FROM}
RECIPIENTS=${RECIPIENTS:-$EXTRACTED_RECIPIENTS}
# Aufräumen
rm -f "$TEMP_FILE"
fi
# Wenn immer noch leer, Error
if [ -z "$FROM" ] || [ -z "$RECIPIENTS" ]; then
echo "ERROR: Konnte FROM oder RECIPIENTS nicht extrahieren oder angeben."
exit 1
fi
# Domain aus erstem Recipient extrahieren und validieren
FIRST_RECIPIENT=$(echo "$RECIPIENTS" | cut -d',' -f1 | sed 's/.*<//' | sed 's/>.*//') # Reinige falls <email>
EXTRACTED_DOMAIN=$(echo "$FIRST_RECIPIENT" | cut -d'@' -f2)
# Lowercase für case-insensitive Vergleich
EXTRACTED_DOMAIN_LOWER=$(echo "$EXTRACTED_DOMAIN" | tr '[:upper:]' '[:lower:]')
DOMAIN_LOWER=$(echo "$DOMAIN" | tr '[:upper:]' '[:lower:]')
if [ "$EXTRACTED_DOMAIN_LOWER" != "$DOMAIN_LOWER" ]; then
echo "ERROR: Extrahierte Domain ($EXTRACTED_DOMAIN) passt nicht zur angegebenen Domain ($DOMAIN)."
exit 1
fi
# Queue-Name aus Domain ableiten
QUEUE_NAME="${DOMAIN//./-}-queue"
echo "From: $FROM"
echo "Recipients: $RECIPIENTS"
echo "Queue: $QUEUE_NAME"
echo ""
# Queue URL ermitteln
echo "Ermittle Queue URL..."
QUEUE_URL=$(aws sqs get-queue-url \
--queue-name "$QUEUE_NAME" \
--region "$AWS_REGION" \
--query 'QueueUrl' \
--output text 2>/dev/null)
if [ -z "$QUEUE_URL" ]; then
echo "ERROR: Queue nicht gefunden: $QUEUE_NAME"
exit 1
fi
echo "✓ Queue URL: $QUEUE_URL"
# Recipients als Array für JSON
RECIPIENTS_ARRAY=$(echo "$RECIPIENTS" | tr ',' '\n' | jq -R . | jq -s .)
# Fake SES-Event erstellen
SES_DATA=$(jq -n \
--arg from "$FROM" \
--arg msgid "$S3_KEY" \
--argjson recipients "$RECIPIENTS_ARRAY" \
'{
mail: {
source: $from,
messageId: $msgid,
destination: $recipients
},
receipt: {
recipients: $recipients
}
}')
# Fake SNS-Payload (Wrapper)
FAKE_SNS_PAYLOAD=$(jq -n \
--argjson message "$SES_DATA" \
--arg msgid "$(uuidgen)" \
--arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \
'{
Type: "Notification",
MessageId: $msgid,
TopicArn: "arn:aws:sns:ses-shim:global-topic",
Subject: "Amazon SES Email Receipt Notification",
Message: ($message | tostring),
Timestamp: $timestamp
}')
# Message in Queue senden
echo "Sende Message in Queue..."
RESPONSE=$(aws sqs send-message \
--queue-url "$QUEUE_URL" \
--region "$AWS_REGION" \
--message-body "$FAKE_SNS_PAYLOAD")
MESSAGE_ID=$(echo "$RESPONSE" | jq -r '.MessageId')
echo ""
echo "==================================="
echo "✅ E-Mail erfolgreich in Queue"
echo "==================================="
echo "SQS Message ID: $MESSAGE_ID"
echo "Queue: $QUEUE_NAME"
echo ""
echo "Der Worker wird die E-Mail in Kürze verarbeiten."

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env python3
# requeue-dlq.py <domain>
"""
Verschiebt Messages aus DLQ zurück in Main Queue
"""
import sys
import boto3
sqs = boto3.client('sqs', region_name='eu-central-1')
def requeue_dlq(domain, max_messages=10):
"""Verschiebt Messages aus DLQ zurück in Main Queue"""
queue_name = domain.replace('.', '-') + '-queue'
dlq_name = queue_name + '-dlq'
try:
queue_url = sqs.get_queue_url(QueueName=queue_name)['QueueUrl']
dlq_url = sqs.get_queue_url(QueueName=dlq_name)['QueueUrl']
except Exception as e:
print(f"❌ Error: {e}")
return
print(f"Re-queuing up to {max_messages} messages from DLQ to main queue...")
print(f"Domain: {domain}")
print(f"From: {dlq_name}")
print(f"To: {queue_name}\n")
confirm = input("Continue? (yes/no): ")
if confirm.lower() != 'yes':
print("Cancelled.")
return
# Messages aus DLQ holen
response = sqs.receive_message(
QueueUrl=dlq_url,
MaxNumberOfMessages=max_messages,
WaitTimeSeconds=0
)
messages = response.get('Messages', [])
if not messages:
print("No messages in DLQ.")
return
print(f"\nRe-queuing {len(messages)} message(s)...\n")
for msg in messages:
# In Haupt-Queue schreiben
sqs.send_message(
QueueUrl=queue_url,
MessageBody=msg['Body']
)
# Aus DLQ löschen
sqs.delete_message(
QueueUrl=dlq_url,
ReceiptHandle=msg['ReceiptHandle']
)
print(f" ✓ Re-queued message {msg['MessageId']}")
print(f"\n✅ Done! {len(messages)} message(s) re-queued to {queue_name}")
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: python requeue-dlq.py <domain>")
print("Example: python requeue-dlq.py andreasknuth.de")
sys.exit(1)
domain = sys.argv[1]
requeue_dlq(domain)

View File

@@ -1,120 +0,0 @@
#!/bin/bash
# requeue-email.sh - Sendet eine E-Mail aus S3 manuell in die SQS Queue
set -e
# Parameter prüfen
if [ $# -lt 3 ]; then
echo "Usage: $0 <S3_KEY> <FROM> <RECIPIENT>"
echo ""
echo "Example:"
echo " $0 rgskc3d59dqdm6lq1scenpuvdq7ikhi3cqk382g1 sender@example.com user@bayarea-cc.com"
echo ""
exit 1
fi
S3_KEY=$1
FROM=$2
RECIPIENT=$3
# Konfiguration
AWS_REGION="us-east-2"
# Domain aus Recipient extrahieren
DOMAIN=$(echo "$RECIPIENT" | cut -d'@' -f2)
# Bucket-Name aus Domain ableiten
BUCKET="${DOMAIN//./-}-emails"
# Queue-Name aus Domain ableiten
QUEUE_NAME="${DOMAIN//./-}-queue"
echo "==================================="
echo "Requeue E-Mail zu SQS Queue"
echo "==================================="
echo "S3 Key: $S3_KEY"
echo "From: $FROM"
echo "Recipient: $RECIPIENT"
echo "Domain: $DOMAIN"
echo "Bucket: $BUCKET"
echo "Queue: $QUEUE_NAME"
echo ""
# Prüfen ob S3 Object existiert
echo "Prüfe S3 Object..."
if ! aws s3api head-object \
--bucket "$BUCKET" \
--key "$S3_KEY" \
--region "$AWS_REGION" &>/dev/null; then
echo "ERROR: S3 Object nicht gefunden: s3://$BUCKET/$S3_KEY"
exit 1
fi
echo "✓ S3 Object existiert"
# Queue URL ermitteln
echo "Ermittle Queue URL..."
QUEUE_URL=$(aws sqs get-queue-url \
--queue-name "$QUEUE_NAME" \
--region "$AWS_REGION" \
--query 'QueueUrl' \
--output text 2>/dev/null)
if [ -z "$QUEUE_URL" ]; then
echo "ERROR: Queue nicht gefunden: $QUEUE_NAME"
exit 1
fi
echo "✓ Queue URL: $QUEUE_URL"
# Optional: Subject aus E-Mail extrahieren
echo "Versuche Subject zu extrahieren..."
TEMP_FILE="/tmp/email-${S3_KEY}.eml"
aws s3 cp "s3://$BUCKET/$S3_KEY" "$TEMP_FILE" --region "$AWS_REGION" --quiet
# Subject extrahieren und JSON-escape (Newlines, Tabs, Quotes entfernen)
SUBJECT=$(grep -m1 "^Subject:" "$TEMP_FILE" | sed 's/Subject: //' | tr -d '\n\r\t"' | head -c 200 || echo "(no subject)")
rm -f "$TEMP_FILE"
echo "Subject: $SUBJECT"
echo ""
# Message in Queue senden
echo "Sende Message in Queue..."
TIMESTAMP=$(date +%s)
# JSON-escape für alle Felder
FROM_ESCAPED=$(echo "$FROM" | sed 's/"/\\"/g' | tr -d '\n\r\t')
RECIPIENT_ESCAPED=$(echo "$RECIPIENT" | sed 's/"/\\"/g' | tr -d '\n\r\t')
SUBJECT_ESCAPED=$(echo "$SUBJECT" | sed 's/"/\\"/g' | tr -d '\n\r\t')
RESPONSE=$(aws sqs send-message \
--queue-url "$QUEUE_URL" \
--region "$AWS_REGION" \
--message-body "{
\"bucket\": \"$BUCKET\",
\"key\": \"$S3_KEY\",
\"from\": \"$FROM_ESCAPED\",
\"recipient\": \"$RECIPIENT_ESCAPED\",
\"domain\": \"$DOMAIN\",
\"subject\": \"$SUBJECT_ESCAPED\",
\"message_id\": \"$S3_KEY\",
\"timestamp\": $TIMESTAMP
}" \
--message-attributes "{
\"domain\": {\"StringValue\": \"$DOMAIN\", \"DataType\": \"String\"},
\"bucket\": {\"StringValue\": \"$BUCKET\", \"DataType\": \"String\"},
\"recipient\": {\"StringValue\": \"$RECIPIENT_ESCAPED\", \"DataType\": \"String\"},
\"message_id\": {\"StringValue\": \"$S3_KEY\", \"DataType\": \"String\"}
}")
MESSAGE_ID=$(echo "$RESPONSE" | jq -r '.MessageId')
echo ""
echo "==================================="
echo "✅ E-Mail erfolgreich in Queue"
echo "==================================="
echo "SQS Message ID: $MESSAGE_ID"
echo "Queue: $QUEUE_NAME"
echo ""
echo "Der Worker wird die E-Mail in Kürze verarbeiten."

520
worker.py
View File

@@ -1,520 +0,0 @@
import os
import sys
import boto3
import smtplib
import json
import time
import traceback
import signal
from email.parser import BytesParser
from email.policy import SMTP as SMTPPolicy
from datetime import datetime
# AWS Configuration
AWS_REGION = 'us-east-2'
s3 = boto3.client('s3', region_name=AWS_REGION)
sqs = boto3.client('sqs', region_name=AWS_REGION)
# ✨ Worker Configuration (domain-spezifisch)
WORKER_DOMAIN = os.environ.get('WORKER_DOMAIN') # z.B. 'andreasknuth.de'
WORKER_NAME = os.environ.get('WORKER_NAME', f'worker-{WORKER_DOMAIN}')
# Worker Settings
POLL_INTERVAL = int(os.environ.get('POLL_INTERVAL', '20'))
MAX_MESSAGES = int(os.environ.get('MAX_MESSAGES', '10'))
VISIBILITY_TIMEOUT = int(os.environ.get('VISIBILITY_TIMEOUT', '300'))
# SMTP Configuration (einfach, da nur 1 Domain pro Worker)
SMTP_HOST = os.environ.get('SMTP_HOST', 'localhost')
SMTP_PORT = int(os.environ.get('SMTP_PORT', '25'))
SMTP_USE_TLS = os.environ.get('SMTP_USE_TLS', 'false').lower() == 'true'
SMTP_USER = os.environ.get('SMTP_USER')
SMTP_PASS = os.environ.get('SMTP_PASS')
# Graceful shutdown
shutdown_requested = False
def signal_handler(signum, frame):
global shutdown_requested
print(f"\n⚠ Shutdown signal received (signal {signum})")
shutdown_requested = True
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
def log(message: str, level: str = 'INFO'):
"""Structured logging with timestamp"""
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"[{timestamp}] [{level}] [{WORKER_NAME}] {message}", flush=True)
def domain_to_queue_name(domain: str) -> str:
"""Konvertiert Domain zu SQS Queue Namen"""
return domain.replace('.', '-') + '-queue'
def get_queue_url() -> str:
"""Ermittelt Queue-URL für die konfigurierte Domain"""
queue_name = domain_to_queue_name(WORKER_DOMAIN)
try:
response = sqs.get_queue_url(QueueName=queue_name)
return response['QueueUrl']
except Exception as e:
raise Exception(f"Failed to get queue URL for {WORKER_DOMAIN}: {e}")
def mark_as_processed(bucket: str, key: str, invalid_inboxes: list = None):
"""
Markiert E-Mail als erfolgreich zugestellt
Wird nur aufgerufen wenn mindestens 1 Recipient erfolgreich war
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['processed'] = 'true'
metadata['processed_at'] = str(int(time.time()))
metadata['processed_by'] = WORKER_NAME
metadata['status'] = 'delivered'
metadata.pop('processing_started', None)
metadata.pop('queued_at', None)
# Invalid inboxes speichern falls vorhanden
if invalid_inboxes:
metadata['invalid_inboxes'] = ','.join(invalid_inboxes)
log(f"⚠ Invalid inboxes recorded: {', '.join(invalid_inboxes)}", 'WARNING')
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✓ Marked s3://{bucket}/{key} as processed", 'SUCCESS')
except Exception as e:
log(f"Failed to mark as processed: {e}", 'WARNING')
def mark_as_all_invalid(bucket: str, key: str, invalid_inboxes: list):
"""
Markiert E-Mail als fehlgeschlagen weil alle Recipients ungültig sind
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['processed'] = 'true'
metadata['processed_at'] = str(int(time.time()))
metadata['processed_by'] = WORKER_NAME
metadata['status'] = 'failed'
metadata['error'] = 'All recipients are invalid (mailboxes do not exist)'
metadata['invalid_inboxes'] = ','.join(invalid_inboxes)
metadata.pop('processing_started', None)
metadata.pop('queued_at', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✓ Marked s3://{bucket}/{key} as failed (all invalid)", 'SUCCESS')
except Exception as e:
log(f"Failed to mark as all invalid: {e}", 'WARNING')
def mark_as_failed(bucket: str, key: str, error: str, receive_count: int):
"""
Markiert E-Mail als komplett fehlgeschlagen
Wird nur aufgerufen wenn ALLE Recipients fehlschlagen
"""
try:
head = s3.head_object(Bucket=bucket, Key=key)
metadata = head.get('Metadata', {}) or {}
metadata['status'] = 'failed'
metadata['failed_at'] = str(int(time.time()))
metadata['failed_by'] = WORKER_NAME
metadata['error'] = error[:500] # S3 Metadata limit
metadata['retry_count'] = str(receive_count)
metadata.pop('processing_started', None)
s3.copy_object(
Bucket=bucket,
Key=key,
CopySource={'Bucket': bucket, 'Key': key},
Metadata=metadata,
MetadataDirective='REPLACE'
)
log(f"✗ Marked s3://{bucket}/{key} as failed: {error[:100]}", 'ERROR')
except Exception as e:
log(f"Failed to mark as failed: {e}", 'WARNING')
def is_temporary_smtp_error(error_msg: str) -> bool:
"""
Prüft ob SMTP-Fehler temporär ist (Retry sinnvoll)
4xx Codes = temporär, 5xx = permanent
"""
temporary_indicators = [
'421', # Service not available
'450', # Mailbox unavailable
'451', # Local error
'452', # Insufficient storage
'4', # Generisch 4xx
'timeout',
'connection refused',
'connection reset',
'network unreachable',
'temporarily',
'try again'
]
error_lower = error_msg.lower()
return any(indicator in error_lower for indicator in temporary_indicators)
def is_permanent_recipient_error(error_msg: str) -> bool:
"""
Prüft ob Fehler permanent für diesen Recipient ist (Inbox existiert nicht)
550 = Mailbox not found, 551 = User not local, 553 = Mailbox name invalid
"""
permanent_indicators = [
'550', # Mailbox unavailable / not found
'551', # User not local
'553', # Mailbox name not allowed / invalid
'mailbox not found',
'user unknown',
'no such user',
'recipient rejected',
'does not exist',
'invalid recipient',
'unknown user'
]
error_lower = error_msg.lower()
return any(indicator in error_lower for indicator in permanent_indicators)
def send_email(from_addr: str, recipient: str, raw_message: bytes) -> tuple:
"""
Sendet E-Mail via SMTP an EINEN Empfänger
Returns: (success: bool, error: str or None, is_permanent: bool)
"""
try:
with smtplib.SMTP(SMTP_HOST, SMTP_PORT, timeout=30) as smtp:
smtp.ehlo()
# STARTTLS falls konfiguriert
if SMTP_USE_TLS:
try:
smtp.starttls()
smtp.ehlo()
except Exception as e:
log(f" STARTTLS failed: {e}", 'WARNING')
# Authentication falls konfiguriert
if SMTP_USER and SMTP_PASS:
try:
smtp.login(SMTP_USER, SMTP_PASS)
except Exception as e:
log(f" SMTP auth failed: {e}", 'WARNING')
# E-Mail senden
result = smtp.sendmail(from_addr, [recipient], raw_message)
# Result auswerten
if isinstance(result, dict) and result:
# Empfänger wurde abgelehnt
error = result.get(recipient, 'Unknown refusal')
is_permanent = is_permanent_recipient_error(str(error))
log(f"{recipient}: {error} ({'permanent' if is_permanent else 'temporary'})", 'ERROR')
return False, str(error), is_permanent
else:
# Erfolgreich
log(f"{recipient}: Delivered", 'SUCCESS')
return True, None, False
except smtplib.SMTPException as e:
error_msg = str(e)
is_permanent = is_permanent_recipient_error(error_msg)
log(f"{recipient}: SMTP error - {error_msg}", 'ERROR')
return False, error_msg, is_permanent
except Exception as e:
# Connection errors sind immer temporär
log(f"{recipient}: Connection error - {e}", 'ERROR')
return False, str(e), False
def process_message(message_body: dict, receive_count: int) -> bool:
"""
Verarbeitet eine E-Mail aus der Queue
Kann mehrere Recipients haben - sendet an alle
Returns: True wenn erfolgreich (Message löschen), False bei Fehler (Retry)
"""
bucket = message_body['bucket']
key = message_body['key']
from_addr = message_body['from']
recipients = message_body['recipients'] # Liste von Empfängern
domain = message_body['domain']
subject = message_body.get('subject', '(unknown)')
message_id = message_body.get('message_id', '(unknown)')
log(f"\n{'='*70}")
log(f"Processing email (Attempt #{receive_count}):")
log(f" MessageId: {message_id}")
log(f" S3 Key: {key}")
log(f" Domain: {domain}")
log(f" From: {from_addr}")
log(f" Recipients: {len(recipients)}")
for recipient in recipients:
log(f" - {recipient}")
log(f" Subject: {subject}")
log(f" S3: s3://{bucket}/{key}")
log(f"{'='*70}")
# ✨ VALIDATION: Domain muss mit Worker-Domain übereinstimmen
if domain.lower() != WORKER_DOMAIN.lower():
log(f"ERROR: Wrong domain! Expected {WORKER_DOMAIN}, got {domain}", 'ERROR')
log("This message should not be in this queue! Deleting...", 'ERROR')
return True # Message löschen (gehört nicht hierher)
# E-Mail aus S3 laden
try:
response = s3.get_object(Bucket=bucket, Key=key)
raw_bytes = response['Body'].read()
log(f"✓ Loaded {len(raw_bytes):,} bytes ({len(raw_bytes)/1024:.1f} KB)")
except s3.exceptions.NoSuchKey:
log(f"✗ S3 object not found (may have been deleted)", 'ERROR')
return True # Nicht retryable - Message löschen
except Exception as e:
log(f"✗ Failed to load from S3: {e}", 'ERROR')
return False # Könnte temporär sein - retry
# An alle Recipients senden
log(f"\n📤 Sending to {len(recipients)} recipient(s)...")
log(f"Connecting to {SMTP_HOST}:{SMTP_PORT} (TLS: {SMTP_USE_TLS})")
successful = []
failed_temporary = []
failed_permanent = []
for recipient in recipients:
success, error, is_permanent = send_email(from_addr, recipient, raw_bytes)
if success:
successful.append(recipient)
elif is_permanent:
failed_permanent.append(recipient)
else:
failed_temporary.append(recipient)
# Ergebnis-Zusammenfassung
log(f"\n📊 Delivery Results:")
log(f" ✓ Successful: {len(successful)}/{len(recipients)}")
log(f" ✗ Failed (temporary): {len(failed_temporary)}")
log(f" ✗ Failed (permanent): {len(failed_permanent)}")
# Entscheidungslogik
if len(successful) > 0:
# ✅ Fall 1: Mindestens 1 Recipient erfolgreich
# → status=delivered, invalid_inboxes tracken
invalid_inboxes = failed_permanent if failed_permanent else None
mark_as_processed(bucket, key, invalid_inboxes)
log(f"{'='*70}")
log(f"✅ Email delivered to {len(successful)} recipient(s)", 'SUCCESS')
if failed_permanent:
log(f"{len(failed_permanent)} invalid inbox(es): {', '.join(failed_permanent)}", 'WARNING')
if failed_temporary:
log(f"{len(failed_temporary)} temporary failure(s) - NOT retrying (at least 1 success)", 'WARNING')
log(f"{'='*70}\n")
return True # Message löschen
elif len(failed_permanent) == len(recipients):
# ❌ Fall 2: ALLE Recipients permanent fehlgeschlagen (alle Inboxen ungültig)
# → status=failed, invalid_inboxes = ALLE
mark_as_all_invalid(bucket, key, failed_permanent)
log(f"{'='*70}")
log(f"✗ All recipients are invalid inboxes - NO delivery", 'ERROR')
log(f" Invalid: {', '.join(failed_permanent)}", 'ERROR')
log(f"{'='*70}\n")
return True # Message löschen (nicht retryable)
else:
# ⏳ Fall 3: Nur temporäre Fehler, keine erfolgreichen Deliveries
# → Retry wenn noch Versuche übrig
if receive_count < 3:
log(f"⚠ All failures are temporary, will retry", 'WARNING')
log(f"{'='*70}\n")
return False # Message NICHT löschen → Retry
else:
# Max retries erreicht → als failed markieren
error_summary = f"Failed after {receive_count} attempts. Temporary errors for all recipients."
mark_as_failed(bucket, key, error_summary, receive_count)
log(f"{'='*70}")
log(f"✗ Email delivery failed permanently after {receive_count} attempts", 'ERROR')
log(f"{'='*70}\n")
return False # Nach 3 Versuchen → automatisch DLQ
def main_loop():
"""Hauptschleife: Pollt SQS Queue und verarbeitet Nachrichten"""
# Queue URL ermitteln
try:
queue_url = get_queue_url()
except Exception as e:
log(f"FATAL: {e}", 'ERROR')
sys.exit(1)
log(f"\n{'='*70}")
log(f"🚀 Email Worker started")
log(f"{'='*70}")
log(f" Worker Name: {WORKER_NAME}")
log(f" Domain: {WORKER_DOMAIN}")
log(f" Queue: {queue_url}")
log(f" Region: {AWS_REGION}")
log(f" SMTP: {SMTP_HOST}:{SMTP_PORT} (TLS: {SMTP_USE_TLS})")
log(f" Poll interval: {POLL_INTERVAL}s")
log(f" Max messages per poll: {MAX_MESSAGES}")
log(f" Visibility timeout: {VISIBILITY_TIMEOUT}s")
log(f"{'='*70}\n")
consecutive_errors = 0
max_consecutive_errors = 10
messages_processed = 0
last_activity = time.time()
while not shutdown_requested:
try:
# Messages aus Queue holen (Long Polling)
response = sqs.receive_message(
QueueUrl=queue_url,
MaxNumberOfMessages=MAX_MESSAGES,
WaitTimeSeconds=POLL_INTERVAL,
VisibilityTimeout=VISIBILITY_TIMEOUT,
AttributeNames=['ApproximateReceiveCount', 'SentTimestamp'],
MessageAttributeNames=['All']
)
# Reset error counter bei erfolgreicher Abfrage
consecutive_errors = 0
if 'Messages' not in response:
# Keine Nachrichten
if time.time() - last_activity > 60:
log(f"Waiting for messages... (processed: {messages_processed})")
last_activity = time.time()
continue
message_count = len(response['Messages'])
log(f"\n✉ Received {message_count} message(s) from queue")
last_activity = time.time()
# Messages verarbeiten
for msg in response['Messages']:
if shutdown_requested:
log("Shutdown requested, stopping processing")
break
receipt_handle = msg['ReceiptHandle']
# Receive Count auslesen
receive_count = int(msg.get('Attributes', {}).get('ApproximateReceiveCount', 1))
# Sent Timestamp (für Queue-Zeit-Berechnung)
sent_timestamp = int(msg.get('Attributes', {}).get('SentTimestamp', 0)) / 1000
queue_time = int(time.time() - sent_timestamp) if sent_timestamp else 0
if queue_time > 0:
log(f"Message was in queue for {queue_time}s")
try:
message_body = json.loads(msg['Body'])
# E-Mail verarbeiten
success = process_message(message_body, receive_count)
if success:
# Message aus Queue löschen
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
log("✓ Message deleted from queue")
messages_processed += 1
else:
# Bei Fehler bleibt Message in Queue
log(f"⚠ Message kept in queue for retry (attempt {receive_count}/3)")
except json.JSONDecodeError as e:
log(f"✗ Invalid message format: {e}", 'ERROR')
# Ungültige Messages löschen (nicht retryable)
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
except Exception as e:
log(f"✗ Error processing message: {e}", 'ERROR')
traceback.print_exc()
# Message bleibt in Queue für Retry
except KeyboardInterrupt:
log("\n⚠ Keyboard interrupt received")
break
except Exception as e:
consecutive_errors += 1
log(f"✗ Error in main loop ({consecutive_errors}/{max_consecutive_errors}): {e}", 'ERROR')
traceback.print_exc()
if consecutive_errors >= max_consecutive_errors:
log("Too many consecutive errors, shutting down", 'ERROR')
break
# Kurze Pause bei Fehlern
time.sleep(5)
log(f"\n{'='*70}")
log(f"👋 Worker shutting down")
log(f" Messages processed: {messages_processed}")
log(f"{'='*70}\n")
if __name__ == '__main__':
# Validierung
if not WORKER_DOMAIN:
log("ERROR: WORKER_DOMAIN not set!", 'ERROR')
sys.exit(1)
try:
main_loop()
except Exception as e:
log(f"Fatal error: {e}", 'ERROR')
traceback.print_exc()
sys.exit(1)