Compare commits

...

23 Commits

Author SHA1 Message Date
1e1265ef1b batch imapsync 2026-04-15 13:49:31 -05:00
9862689c0c no markAsBlocked, 2026-04-12 20:43:37 -05:00
bed6c2a398 fix 2026-04-03 17:03:14 -05:00
27c2be664a standby mode, sns or sqs 2026-04-03 16:54:51 -05:00
7aed24bfff Merge branch 'contabo' 2026-04-03 16:16:01 -05:00
2ebe0484a4 create topic, subscription and queues per Domain 2026-04-03 16:15:20 -05:00
61fce745af moving certs 2026-03-24 20:23:34 -05:00
b732cebd94 updated spam corrections 2026-03-20 10:33:33 -05:00
36c122bf53 new spam config 2026-03-19 18:18:42 -05:00
6e2a061cf3 add ip 2026-03-15 15:07:01 -05:00
d331bd13b5 no buffer 2026-03-11 19:38:02 -05:00
610b01eee7 whitelist helper 2026-03-11 19:26:32 -05:00
c2d4903bc9 ENABLE_FAIL2BAN 0 2026-03-11 09:38:00 -05:00
613aa30493 logs 2026-03-08 16:15:41 -05:00
29f360ece8 logger console + file 2026-03-08 16:09:30 -05:00
62221e8121 fix 2026-03-08 14:54:33 -05:00
74c4f5801e Prometheus, Grafana, blackbox_exporter 2026-03-08 14:50:43 -05:00
90b120957d add missing import 2026-03-07 17:07:50 -06:00
99ab2a07d8 send mail even if if parsing fails 2026-03-07 17:06:03 -06:00
d9a91c13ed printstats 2026-03-07 16:41:51 -06:00
1d53f2d357 pino 2026-03-07 15:34:15 -06:00
9586869c0c neue Ports 2026-03-07 15:26:56 -06:00
d1426afec5 new structure 2026-03-07 15:16:14 -06:00
14 changed files with 411 additions and 154 deletions

58
DMS/batch_imapsync.sh Normal file
View File

@@ -0,0 +1,58 @@
#!/bin/bash
# batch_imapsync.sh - Führt IMAP-Sync für alle User im Hintergrund aus
# Format der CSV: email@domain.com,SecretPassword123
HOST1=$1
HOST2=$2
CSV_FILE=$3
if [ -z "$HOST1" ] || [ -z "$HOST2" ] || [ -z "$CSV_FILE" ]; then
echo "Usage: $0 <source-host> <target-host> <users.csv>"
echo "Beispiel: $0 secure.emailsrvr.com 147.93.132.244 stxmaterials.csv"
exit 1
fi
# ======================================================================
# Die eigentliche Sync-Funktion (wird in den Hintergrund geschickt)
# ======================================================================
run_sync_jobs() {
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
LOG_DIR="sync_logs_$TIMESTAMP"
mkdir -p "$LOG_DIR"
echo "Beginne Sync-Lauf am $(date)" > "batch_master_${TIMESTAMP}.log"
while IFS=, read -r email password; do
email=$(echo "$email" | tr -d '\r' | xargs)
password=$(echo "$password" | tr -d '\r' | xargs)
[ -z "$email" ] && continue
LOGFILE="$LOG_DIR/imapsync_${email}.log"
echo "[$(date)] Syncing $email -> $LOGFILE" >> "batch_master_${TIMESTAMP}.log"
# Führe Docker imapsync für den aktuellen User aus
docker run --rm -i gilleslamiral/imapsync imapsync \
--host1 "$HOST1" --user1 "$email" --password1 "$password" --ssl1 \
--host2 "$HOST2" --user2 "$email" --password2 "$password" --ssl2 \
--automap > "$LOGFILE" 2>&1
done < "$CSV_FILE"
echo "Alle Sync-Jobs beendet am $(date)" >> "batch_master_${TIMESTAMP}.log"
}
# ======================================================================
# Skript-Start: Entkopplung vom Terminal
# ======================================================================
echo "🚀 Starte Batch-IMAP-Sync im Hintergrund..."
# Rufe die Funktion auf, leite alle restlichen Ausgaben ins Nichts und schicke sie in den Hintergrund (&)
run_sync_jobs </dev/null >/dev/null 2>&1 &
echo "✅ Der Job läuft jetzt autark im Hintergrund (sequenziell)."
echo "Du kannst das SSH-Terminal jetzt bedenkenlos schließen!"
echo "Überwache den Gesamtfortschritt mit:"
echo " tail -f batch_master_*.log"
echo "Oder die Details eines einzelnen Postfachs mit:"
echo " tail -f sync_logs_*/imapsync_<email>.log"

View File

@@ -45,7 +45,8 @@ services:
# setup-dms-tls.sh referenziert per:
# /etc/mail/certs/*.domain/*.domain.crt|.key
# -------------------------------------------------------
- /var/lib/docker/volumes/caddy_data/_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
# - /var/lib/docker/volumes/caddy_data/_data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
- /home/aknuth/git/email-amazon/caddy/caddy-data/caddy/certificates/acme-v02.api.letsencrypt.org-directory:/etc/mail/certs:ro
# -------------------------------------------------------
# Dovecot SNI Konfiguration (generiert von setup-dms-tls.sh)
# DMS lädt /tmp/docker-mailserver/dovecot-sni.cf automatisch.

View File

@@ -1,6 +1,6 @@
[DEFAULT]
# Whitelist: Localhost, private Docker-Netze und die Budd Electric Office-IP
ignoreip = 127.0.0.1/8 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 24.155.193.233 69.223.70.143
ignoreip = 127.0.0.1/8 ::1 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 24.155.193.233 69.223.70.143 24.155.193.233
[dovecot]
# Erhöht die Anzahl der erlaubten Fehlversuche auf 20

View File

@@ -1,14 +1,8 @@
DOCKER_WL {
# ÄNDERUNG: Wir prüfen jetzt den Absender (Envelope From)
type = "from";
filter = "email:domain";
# Pfad bleibt gleich
map = "/etc/rspamd/override.d/docker_whitelist.map";
symbol = "DOCKER_WHITELIST";
score = -50.0;
description = "Whitelist fuer eigene Domains";
prefilter = true;
action = "accept";
score = -50.0;
}

View File

@@ -1,9 +0,0 @@
bayarea-cc.com
ruehrgedoens.de
annavillesda.org
bizmatch.net
biz-match.com
qrmaster.net
nqsltd.com
iitwelders.com
# Weitere Domains hier eintragen

View File

@@ -0,0 +1,6 @@
rules {
DOCKER_WHITELIST_FORCE {
action = "no action";
expression = "DOCKER_WHITELIST";
}
}

View File

@@ -1,21 +1,47 @@
#!/bin/bash
# user-patches.sh läuft bei jedem Start von DMS automatisch
# user-patches.sh laeuft bei jedem Start von DMS automatisch
ACCOUNTS_FILE="/tmp/docker-mailserver/postfix-accounts.cf"
WHITELIST_FILE="/etc/rspamd/override.d/docker_whitelist.map"
echo "Patching: Generiere Rspamd Whitelist aus Accounts..."
# --- Rspamd Whitelist generieren ---
STATIC_DOMAINS=(
"bayarea-cc.com"
"ruehrgedoens.de"
"annavillesda.org"
"bizmatch.net"
"biz-match.com"
"qrmaster.net"
"nqsltd.com"
"iitwelders.com"
)
if [ -f "$ACCOUNTS_FILE" ]; then
# Whitelist generieren
awk -F'|' '{print $1}' "$ACCOUNTS_FILE" | cut -d'@' -f2 | sort | uniq > "$WHITELIST_FILE"
# Berechtigungen korrigieren
chmod 644 "$WHITELIST_FILE"
chown _rspamd:_rspamd "$WHITELIST_FILE" 2>/dev/null || true
echo "Whitelist erfolgreich erstellt:"
cat "$WHITELIST_FILE"
else
echo "FEHLER: $ACCOUNTS_FILE wurde nicht gefunden!"
echo "Patching: Generiere Rspamd Whitelist aus Accounts + statischen Domains..."
{
for domain in "${STATIC_DOMAINS[@]}"; do
echo "$domain"
done
if [ -f "$ACCOUNTS_FILE" ]; then
awk -F'|' '{print $1}' "$ACCOUNTS_FILE" | cut -d'@' -f2
fi
} | sort | uniq > "$WHITELIST_FILE"
chmod 644 "$WHITELIST_FILE"
chown _rspamd:_rspamd "$WHITELIST_FILE" 2>/dev/null || true
echo "Whitelist erstellt:"
cat "$WHITELIST_FILE"
# --- local.d configs manuell kopieren (DMS kopiert local.d nicht automatisch) ---
echo "Patching: Kopiere custom rspamd local.d configs..."
SRC="/tmp/docker-mailserver/rspamd/local.d"
DST="/etc/rspamd/local.d"
if [ -d "$SRC" ]; then
for f in "$SRC"/*; do
[ -f "$f" ] || continue
cp "$f" "$DST/$(basename "$f")"
chown root:root "$DST/$(basename "$f")"
chmod 644 "$DST/$(basename "$f")"
echo " Kopiert: $(basename "$f") -> $DST/"
done
fi

View File

@@ -1,55 +1,58 @@
#!/bin/bash
# create-queue.sh
# create-queue.sh (v2 — mit SNS Fan-Out + Standby Queue)
# Usage: DOMAIN=andreasknuth.de ./create-queue.sh
#
# Erstellt pro Domain:
# - Primary Queue + DLQ (wie bisher, für Contabo)
# - Standby Queue + DLQ (NEU, für Office-VM)
# - SNS Topic (NEU, Fan-Out)
# - 2 SNS Subscriptions (NEU, Topic → Primary + Standby)
set -e
AWS_REGION="us-east-2"
# Domain aus Environment Variable
if [ -z "$DOMAIN" ]; then
echo "Error: DOMAIN environment variable not set"
echo "Usage: DOMAIN=andreasknuth.de $0"
exit 1
fi
QUEUE_NAME="${DOMAIN//./-}-queue"
DOMAIN_SLUG="${DOMAIN//./-}"
QUEUE_NAME="${DOMAIN_SLUG}-queue"
DLQ_NAME="${QUEUE_NAME}-dlq"
STANDBY_QUEUE_NAME="${DOMAIN_SLUG}-standby-queue"
STANDBY_DLQ_NAME="${STANDBY_QUEUE_NAME}-dlq"
TOPIC_NAME="${DOMAIN_SLUG}-topic"
ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
echo "========================================"
echo "Creating SQS Queue for Email Delivery"
echo "Creating SQS + SNS for Email Delivery"
echo "========================================"
echo ""
echo "📧 Domain: $DOMAIN"
echo " Region: $AWS_REGION"
echo "📧 Domain: $DOMAIN"
echo " Region: $AWS_REGION"
echo " Account: $ACCOUNT_ID"
echo ""
# Dead Letter Queue erstellen
# ============================================================
# 1. Primary DLQ + Queue (wie bisher)
# ============================================================
echo "━━━ Primary Queue (Contabo) ━━━"
echo "Creating DLQ: $DLQ_NAME"
DLQ_URL=$(aws sqs create-queue \
--queue-name "${DLQ_NAME}" \
--region "${AWS_REGION}" \
--attributes '{
"MessageRetentionPeriod": "1209600"
}' \
--query 'QueueUrl' \
--output text 2>/dev/null || aws sqs get-queue-url --queue-name "${DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
--attributes '{"MessageRetentionPeriod": "1209600"}' \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
DLQ_ARN=$(aws sqs get-queue-attributes --queue-url "${DLQ_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ DLQ: ${DLQ_ARN}"
echo " ✓ DLQ URL: ${DLQ_URL}"
# DLQ ARN ermitteln
DLQ_ARN=$(aws sqs get-queue-attributes \
--queue-url "${DLQ_URL}" \
--region "${AWS_REGION}" \
--attribute-names QueueArn \
--query 'Attributes.QueueArn' \
--output text)
echo " ✓ DLQ ARN: ${DLQ_ARN}"
echo ""
# Haupt-Queue erstellen mit Redrive Policy
echo "Creating Main Queue: $QUEUE_NAME"
echo "Creating Queue: $QUEUE_NAME"
QUEUE_URL=$(aws sqs create-queue \
--queue-name "${QUEUE_NAME}" \
--region "${AWS_REGION}" \
@@ -59,18 +62,146 @@ QUEUE_URL=$(aws sqs create-queue \
\"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \
--query 'QueueUrl' \
--output text 2>/dev/null || aws sqs get-queue-url --queue-name "${QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "${QUEUE_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Queue: ${QUEUE_ARN}"
echo ""
echo " ✓ Queue URL: ${QUEUE_URL}"
# ============================================================
# 2. Standby DLQ + Queue (NEU)
# ============================================================
echo "━━━ Standby Queue (Office-VM) ━━━"
echo "Creating Standby DLQ: $STANDBY_DLQ_NAME"
STANDBY_DLQ_URL=$(aws sqs create-queue \
--queue-name "${STANDBY_DLQ_NAME}" \
--region "${AWS_REGION}" \
--attributes '{"MessageRetentionPeriod": "1209600"}' \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${STANDBY_DLQ_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
STANDBY_DLQ_ARN=$(aws sqs get-queue-attributes --queue-url "${STANDBY_DLQ_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Standby DLQ: ${STANDBY_DLQ_ARN}"
echo "Creating Standby Queue: $STANDBY_QUEUE_NAME"
STANDBY_QUEUE_URL=$(aws sqs create-queue \
--queue-name "${STANDBY_QUEUE_NAME}" \
--region "${AWS_REGION}" \
--attributes "{
\"VisibilityTimeout\": \"300\",
\"MessageRetentionPeriod\": \"86400\",
\"ReceiveMessageWaitTimeSeconds\": \"20\",
\"RedrivePolicy\": \"{\\\"deadLetterTargetArn\\\":\\\"${STANDBY_DLQ_ARN}\\\",\\\"maxReceiveCount\\\":\\\"3\\\"}\"
}" \
--query 'QueueUrl' --output text 2>/dev/null \
|| aws sqs get-queue-url --queue-name "${STANDBY_QUEUE_NAME}" --region "${AWS_REGION}" --query 'QueueUrl' --output text)
STANDBY_QUEUE_ARN=$(aws sqs get-queue-attributes --queue-url "${STANDBY_QUEUE_URL}" --region "${AWS_REGION}" \
--attribute-names QueueArn --query 'Attributes.QueueArn' --output text)
echo " ✓ Standby Queue: ${STANDBY_QUEUE_ARN}"
echo ""
# ============================================================
# 3. SNS Topic (NEU)
# ============================================================
echo "━━━ SNS Topic (Fan-Out) ━━━"
echo "Creating Topic: $TOPIC_NAME"
TOPIC_ARN=$(aws sns create-topic \
--name "${TOPIC_NAME}" \
--region "${AWS_REGION}" \
--query 'TopicArn' --output text)
echo " ✓ Topic: ${TOPIC_ARN}"
echo ""
# ============================================================
# 4. SNS → SQS Subscriptions (NEU)
# ============================================================
echo "━━━ Subscriptions ━━━"
# SNS braucht Berechtigung, in die SQS Queues zu schreiben
# Policy für Primary Queue
POLICY_PRIMARY="{
\"Version\": \"2012-10-17\",
\"Statement\": [{
\"Effect\": \"Allow\",
\"Principal\": {\"Service\": \"sns.amazonaws.com\"},
\"Action\": \"sqs:SendMessage\",
\"Resource\": \"${QUEUE_ARN}\",
\"Condition\": {\"ArnEquals\": {\"aws:SourceArn\": \"${TOPIC_ARN}\"}}
}]
}"
aws sqs set-queue-attributes \
--queue-url "${QUEUE_URL}" \
--region "${AWS_REGION}" \
--attributes "{\"Policy\": $(echo "$POLICY_PRIMARY" | jq -c '.' | jq -Rs '.')}" \
> /dev/null
echo " ✓ Primary Queue Policy gesetzt"
# Policy für Standby Queue
POLICY_STANDBY="{
\"Version\": \"2012-10-17\",
\"Statement\": [{
\"Effect\": \"Allow\",
\"Principal\": {\"Service\": \"sns.amazonaws.com\"},
\"Action\": \"sqs:SendMessage\",
\"Resource\": \"${STANDBY_QUEUE_ARN}\",
\"Condition\": {\"ArnEquals\": {\"aws:SourceArn\": \"${TOPIC_ARN}\"}}
}]
}"
aws sqs set-queue-attributes \
--queue-url "${STANDBY_QUEUE_URL}" \
--region "${AWS_REGION}" \
--attributes "{\"Policy\": $(echo "$POLICY_STANDBY" | jq -c '.' | jq -Rs '.')}" \
> /dev/null
echo " ✓ Standby Queue Policy gesetzt"
# Subscription: Topic → Primary Queue
SUB_PRIMARY=$(aws sns subscribe \
--topic-arn "${TOPIC_ARN}" \
--protocol sqs \
--notification-endpoint "${QUEUE_ARN}" \
--region "${AWS_REGION}" \
--attributes '{"RawMessageDelivery": "true"}' \
--query 'SubscriptionArn' --output text)
echo " ✓ Subscription Primary: ${SUB_PRIMARY}"
# Subscription: Topic → Standby Queue
SUB_STANDBY=$(aws sns subscribe \
--topic-arn "${TOPIC_ARN}" \
--protocol sqs \
--notification-endpoint "${STANDBY_QUEUE_ARN}" \
--region "${AWS_REGION}" \
--attributes '{"RawMessageDelivery": "true"}' \
--query 'SubscriptionArn' --output text)
echo " ✓ Subscription Standby: ${SUB_STANDBY}"
echo ""
# ============================================================
# Zusammenfassung
# ============================================================
echo "========================================"
echo "✅ Queue created successfully!"
echo "✅ Setup complete for $DOMAIN"
echo "========================================"
echo ""
echo "Configuration:"
echo " Domain: $DOMAIN"
echo " Queue: $QUEUE_NAME"
echo " Queue URL: $QUEUE_URL"
echo " DLQ: $DLQ_NAME"
echo " Region: $AWS_REGION"
echo "Primary (Contabo):"
echo " Queue: $QUEUE_URL"
echo " DLQ: $DLQ_URL"
echo ""
echo "Standby (Office-VM):"
echo " Queue: $STANDBY_QUEUE_URL"
echo " DLQ: $STANDBY_DLQ_URL"
echo ""
echo "SNS Fan-Out:"
echo " Topic: $TOPIC_ARN"
echo " → Primary: $SUB_PRIMARY"
echo " → Standby: $SUB_STANDBY"
echo ""
echo "⚠️ Nächste Schritte:"
echo " 1. Lambda-Funktion updaten: sns.publish() statt sqs.send_message()"
echo " 2. Lambda IAM Role: sns:Publish Berechtigung hinzufügen"
echo " 3. Worker auf Office-VM: QUEUE_SUFFIX=-standby-queue konfigurieren"
echo " 4. Worker auf Office-VM: STANDBY_MODE=true setzen"

View File

@@ -8,24 +8,38 @@ from botocore.exceptions import ClientError
import time
import random
# Logging konfigurieren
logger = logging.getLogger()
logger.setLevel(logging.INFO)
sqs = boto3.client('sqs')
sns = boto3.client('sns')
sts_account_id = None
# Retry-Konfiguration
MAX_RETRIES = 3
BASE_BACKOFF = 1 # Sekunden
BASE_BACKOFF = 1
def exponential_backoff(attempt):
"""Exponential Backoff mit Jitter"""
return BASE_BACKOFF * (2 ** attempt) + random.uniform(0, 1)
def get_account_id():
global sts_account_id
if sts_account_id is None:
sts_account_id = boto3.client('sts').get_caller_identity()['Account']
return sts_account_id
def get_topic_arn(domain):
"""
Generiert Topic-ARN aus Domain.
Konvention: domain.tld -> domain-tld-topic
"""
topic_name = domain.replace('.', '-') + '-topic'
region = os.environ.get('AWS_REGION', 'us-east-2')
account_id = get_account_id()
return f"arn:aws:sns:{region}:{account_id}:{topic_name}"
def get_queue_url(domain):
"""
Generiert Queue-Namen aus Domain und holt URL.
Konvention: domain.tld -> domain-tld-queue
Fallback: Direkter SQS-Send für Domains ohne SNS-Topic.
"""
queue_name = domain.replace('.', '-') + '-queue'
try:
@@ -38,54 +52,91 @@ def get_queue_url(domain):
else:
raise
def publish_to_sns(topic_arn, message_body, msg_id):
attempt = 0
while attempt < MAX_RETRIES:
try:
sns.publish(
TopicArn=topic_arn,
Message=message_body
)
logger.info(f"✅ Published {msg_id} to SNS: {topic_arn}")
return True
except ClientError as e:
error_code = e.response['Error']['Code']
# Fallback auf SQS bei Topic-nicht-gefunden ODER fehlender Berechtigung
if error_code in ('NotFound', 'NotFoundException', 'AuthorizationError'):
logger.info(f" SNS unavailable for {topic_arn} ({error_code}) — falling back to SQS")
return False
attempt += 1
logger.warning(f"Retry {attempt}/{MAX_RETRIES} SNS: {error_code}")
if attempt == MAX_RETRIES:
raise
time.sleep(exponential_backoff(attempt))
return False
def send_to_sqs(queue_url, message_body, msg_id):
"""Fallback: Direkter SQS-Send (wie bisher)."""
attempt = 0
while attempt < MAX_RETRIES:
try:
sqs.send_message(
QueueUrl=queue_url,
MessageBody=message_body
)
logger.info(f"✅ Sent {msg_id} to SQS: {queue_url}")
return
except ClientError as e:
attempt += 1
error_code = e.response['Error']['Code']
logger.warning(f"Retry {attempt}/{MAX_RETRIES} SQS: {error_code}")
if attempt == MAX_RETRIES:
raise
time.sleep(exponential_backoff(attempt))
def lambda_handler(event, context):
"""
Nimmt SES Event entgegen, extrahiert Domain dynamisch,
verpackt Metadaten als 'Fake SNS' und sendet an die domain-spezifische SQS.
Mit integrierter Retry-Logik für SQS-Send.
Nimmt SES Event entgegen, extrahiert Domain dynamisch.
Strategie: SNS Publish (Fan-Out an Primary + Standby Queue).
Fallback: Direkter SQS-Send falls kein SNS-Topic existiert.
"""
try:
records = event.get('Records', [])
logger.info(f"Received event with {len(records)} records.")
for record in records:
ses_data = record.get('ses', {})
if not ses_data:
logger.warning(f"Invalid SES event: Missing 'ses' in record: {record}")
logger.warning(f"Invalid SES event: Missing 'ses' in record")
continue
mail = ses_data.get('mail', {})
receipt = ses_data.get('receipt', {})
# Domain extrahieren (aus erstem Recipient)
recipients = receipt.get('recipients', []) or mail.get('destination', [])
if not recipients:
logger.warning("No recipients in event - skipping")
continue
first_recipient = recipients[0]
domain = first_recipient.split('@')[-1].lower()
if not domain:
logger.error("Could not extract domain from recipient")
continue
# Wichtige Metadaten loggen
msg_id = mail.get('messageId', 'unknown')
source = mail.get('source', 'unknown')
logger.info(f"Processing Message-ID: {msg_id} for domain: {domain}")
logger.info(f" From: {source}")
logger.info(f" To: {recipients}")
# SES JSON als String serialisieren
logger.info(f" From: {source}")
logger.info(f" To: {recipients}")
ses_json_string = json.dumps(ses_data)
# Payload Größe loggen und checken (Safeguard)
payload_size = len(ses_json_string.encode('utf-8'))
logger.info(f" Metadata Payload Size: {payload_size} bytes")
if payload_size > 200000: # Arbitrary Limit < SQS 256KB
raise ValueError("Payload too large for SQS")
# Fake SNS Payload
logger.info(f" Metadata Payload Size: {payload_size} bytes")
if payload_size > 200000:
raise ValueError("Payload too large")
fake_sns_payload = {
"Type": "Notification",
"MessageId": str(uuid.uuid4()),
@@ -94,30 +145,20 @@ def lambda_handler(event, context):
"Message": ses_json_string,
"Timestamp": datetime.utcnow().isoformat() + "Z"
}
# Queue URL dynamisch holen
queue_url = get_queue_url(domain)
# SQS Send mit Retries
attempt = 0
while attempt < MAX_RETRIES:
try:
sqs.send_message(
QueueUrl=queue_url,
MessageBody=json.dumps(fake_sns_payload)
)
logger.info(f"✅ Successfully forwarded {msg_id} to SQS: {queue_url}")
break
except ClientError as e:
attempt += 1
error_code = e.response['Error']['Code']
logger.warning(f"Retry {attempt}/{MAX_RETRIES} for SQS send: {error_code} - {str(e)}")
if attempt == MAX_RETRIES:
raise
time.sleep(exponential_backoff(attempt))
message_body = json.dumps(fake_sns_payload)
# Strategie: SNS zuerst, SQS als Fallback
topic_arn = get_topic_arn(domain)
sns_success = publish_to_sns(topic_arn, message_body, msg_id)
if not sns_success:
# Kein SNS-Topic für diese Domain → direkt in SQS (wie bisher)
queue_url = get_queue_url(domain)
send_to_sqs(queue_url, message_body, msg_id)
return {'status': 'ok'}
except Exception as e:
logger.error(f"❌ Critical Error in Lambda Shim: {str(e)}", exc_info=True)
raise e

2
caddy/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
caddy-data/
caddy-config/

View File

@@ -19,8 +19,8 @@ services:
# email_autodiscover entfernt - Snippet ist jetzt in mail_certs eingebettet
# email.mobileconfig.html entfernt - Inhalt ist jetzt inline in mail_certs
- $PWD/email-setup:/var/www/email-setup
- caddy_data:/data
- caddy_config:/config
- ./caddy-data:/data
- ./caddy-config:/config
- /home/aknuth/log/caddy:/var/log/caddy
environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN}
@@ -29,8 +29,3 @@ services:
networks:
mail_network:
external: true
volumes:
caddy_data:
external: true
caddy_config:

View File

@@ -48,6 +48,9 @@ export const config = {
// Monitoring
metricsPort: parseInt(process.env.METRICS_PORT ?? '8000', 10),
healthPort: parseInt(process.env.HEALTH_PORT ?? '8080', 10),
queueSuffix: process.env.QUEUE_SUFFIX ?? '-queue',
standbyMode: (process.env.STANDBY_MODE ?? 'false').toLowerCase() === 'true',
} as const;
export type Config = typeof config;
@@ -106,7 +109,7 @@ export function isInternalAddress(email: string): boolean {
/** Convert domain to SQS queue name: bizmatch.net → bizmatch-net-queue */
export function domainToQueueName(domain: string): string {
return domain.replace(/\./g, '-') + '-queue';
return domain.replace(/\./g, '-') + config.queueSuffix;
}
/** Convert domain to S3 bucket name: bizmatch.net → bizmatch-net-emails */

View File

@@ -36,6 +36,9 @@ export class RulesProcessor {
workerName: string,
metricsCallback?: MetricsCallback,
): Promise<boolean> {
if (config.standbyMode) {
return false;
}
const rule = await this.dynamodb.getEmailRules(recipient.toLowerCase());
if (!rule) return false;

View File

@@ -26,7 +26,7 @@ import { BlocklistChecker } from '../email/blocklist.js';
import { BounceHandler } from '../email/bounce-handler.js';
import { parseEmail, isProcessedByWorker } from '../email/parser.js';
import { RulesProcessor } from '../email/rules-processor.js';
import { config } from '../config.js';
// ---------------------------------------------------------------------------
// Processor
// ---------------------------------------------------------------------------
@@ -258,34 +258,40 @@ export class MessageProcessor {
if (totalHandled === recipients.length) {
if (blockedRecipients.length === recipients.length) {
// All blocked
try {
await this.s3.markAsBlocked(
domain,
messageId,
blockedRecipients,
fromAddrFinal,
workerName,
);
await this.s3.deleteBlockedEmail(domain, messageId, workerName);
} catch (err: any) {
log(`⚠ Failed to handle blocked email: ${err.message ?? err}`, 'ERROR', workerName);
return false;
// All blocked — im Standby kein S3 anfassen
if (!config.standbyMode) {
try {
await this.s3.markAsBlocked(
domain,
messageId,
blockedRecipients,
fromAddrFinal,
workerName,
);
await this.s3.deleteBlockedEmail(domain, messageId, workerName);
} catch (err: any) {
log(`⚠ Failed to handle blocked email: ${err.message ?? err}`, 'ERROR', workerName);
return false;
}
}
} else if (successful.length > 0) {
await this.s3.markAsProcessed(
domain,
messageId,
workerName,
failedPermanent.length > 0 ? failedPermanent : undefined,
);
if (!config.standbyMode) {
await this.s3.markAsProcessed(
domain,
messageId,
workerName,
failedPermanent.length > 0 ? failedPermanent : undefined,
);
}
} else if (failedPermanent.length > 0) {
await this.s3.markAsAllInvalid(
domain,
messageId,
failedPermanent,
workerName,
);
if (!config.standbyMode) {
await this.s3.markAsAllInvalid(
domain,
messageId,
failedPermanent,
workerName,
);
}
}
// Summary