File size: 11,322 Bytes
0bfe2e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
# ===================================================================
# nginx.conf (or a file under /etc/nginx/conf.d/aiostreams.conf)
# ===================================================================
#
# This config listens on port 443 for aiostreams.bolabaden.org (HTTPS),
# and proxies all requests to one of three upstreams:
#   - aiostreams-cf.bolabaden.org
#   - aiostreams-koyeb.bolabaden.org
#   - aiostreams.bolabaden.duckdns.org
#
# If any one of the three backends becomes unreachable (TCP error, 5xx, etc.),
# NGINX will mark it “down” for a short period and continue sending new requests
# to the remaining healthy backends.  When it recovers, NGINX will automatically
# send traffic to it again.
#
# We use “ip_hash” so that each client IP tends to hit the same backend (session affinity).
# If that backend dies, NGINX will transparently route the next request to another healthy
# backend.  No DNS‐level trickery is used here—NGINX does all proxying at L7.
#
# Replace the SSL certificate/key paths below with your actual .crt/.key files.

################################################################################
# 1) GLOBAL CONTEXT / EVENTS
################################################################################

user   nginx;
worker_processes  auto;
error_log  /var/log/nginx/error.log  notice;
pid        /var/run/nginx.pid;

events {
    worker_connections 1024;
}


################################################################################
# 2) HTTP CONTEXT
################################################################################

http {
    # mime types, log format, etc. can be inherited from defaults.
    include       /etc/nginx/mime.types;
    default_type  application/octet-stream;

    # Tuning timeouts for proxy connections:
    send_timeout        30s;
    keepalive_timeout   65s;
    client_max_body_size  10m;

    # ------------------------------------------------------------------------
    # 2.a) Define the upstream block with all three backends
    #     “ip_hash” ensures session‐affinity by client IP.  If that server fails,
    #     NGINX (passively) marks it down (max_fails + fail_timeout) and sends
    #     to the next available IP.
    #
    #     Note: We are proxying to each backend over HTTPS (port 443),
    #     so we append “:443 ssl” to each server line.  NGINX by default will
    #     initiate an SSL hand‐shake to the upstream.
    # ------------------------------------------------------------------------

    upstream aiostreams_pool {
        # Use ip_hash for basic session affinity (same client IP → same backend):
        ip_hash;

        # Primary backends.  max_fails=3 means that if 3 consecutive attempts
        # to connect or pass data to a given backend fail within “fail_timeout”
        # (30s), that backend is marked as unavailable for 30s.
        server aiostreams-cf.bolabaden.org:443   max_fails=3 fail_timeout=30s;
        server aiostreams-koyeb.bolabaden.org:443 max_fails=3 fail_timeout=30s;
        server aiostreams.bolabaden.duckdns.org:443 max_fails=3 fail_timeout=30s;
    }


    # ------------------------------------------------------------------------
    # 2.b) Redirect all HTTP (port 80) → HTTPS (port 443).
    #     This ensures clients typing “http://…” get automatically sent
    #     to the secure endpoint.
    # ------------------------------------------------------------------------
    server {
        listen       80;
        listen       [::]:80;
        server_name  aiostreams.bolabaden.org;

        # Redirect every request to https://... preserving URI:
        return 301 https://$host$request_uri;
    }


    # ------------------------------------------------------------------------
    # 2.c) Main HTTPS server block for aiostreams.bolabaden.org
    # ------------------------------------------------------------------------
    server {
        listen              443 ssl http2;
        listen              [::]:443 ssl http2;
        server_name         aiostreams.bolabaden.org;

        # --------------------------------------------------------------------
        # SSL Certificate / Key
        # --------------------------------------------------------------------
        #
        # You must replace these with the actual paths to your certificate
        # and private key (e.g. from Let’s Encrypt or another CA).
        #
        ssl_certificate     /etc/letsencrypt/live/aiostreams.bolabaden.org/fullchain.pem;
        ssl_certificate_key /etc/letsencrypt/live/aiostreams.bolabaden.org/privkey.pem;

        # (Optional but recommended) 
        # Tune SSL protocols/ciphers for security.  Example:
        ssl_protocols       TLSv1.2 TLSv1.3;
        ssl_ciphers         HIGH:!aNULL:!MD5;
        ssl_prefer_server_ciphers on;

        # --------------------------------------------------------------------
        # Access and Error Logs (optional, but recommended for debugging)
        # --------------------------------------------------------------------
        access_log  /var/log/nginx/aiostreams.access.log  combined;
        error_log   /var/log/nginx/aiostreams.error.log   warn;

        # --------------------------------------------------------------------
        # Proxy Buffering / Timeouts (adjust to suit your app’s needs)
        # --------------------------------------------------------------------
        proxy_buffer_size          16k;
        proxy_buffers              4 32k;
        proxy_busy_buffers_size    64k;
        proxy_connect_timeout      5s;
        proxy_send_timeout         30s;
        proxy_read_timeout         30s;
        proxy_buffering            on;
        proxy_buffering           off;   # ← turn OFF if your app does streaming / WebSockets

        # --------------------------------------------------------------------
        # Main Location: proxy everything (/) to the upstream pool
        # --------------------------------------------------------------------
        location / {
            # Pass requests to our upstream group:
            proxy_pass https://aiostreams_pool;

            # Preserve the original Host header so the backends see “aiostreams.bolabaden.org”
            # (if your backends require the Host to match their TLS certificate, you can also
            # use “proxy_ssl_name aiostreams-cf.bolabaden.org;” per-backend via “proxy_set_header Host …”)
            proxy_set_header Host               $host;
            proxy_set_header X-Real-IP          $remote_addr;
            proxy_set_header X-Forwarded-For    $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto  $scheme;

            # If a backend fails with certain HTTP status codes or connection errors,
            # retry on the next available upstream.  Here we retry on: timeout, HTTP 500, 502, 503, 504.
            proxy_next_upstream error timeout http_502 http_503 http_504;

            # Number of retries before giving up:
            proxy_next_upstream_tries 3;

            # When proxying to an HTTPS upstream, ensure the upstream’s TLS name matches:
            proxy_ssl_server_name on;

            # (Optional) If your backends use self-signed certs, disable verification:
            # proxy_ssl_verify off;
            #
            # Otherwise (recommended), keep verification ON and trust the system’s ca‐bundle:
            proxy_ssl_verify on;
            proxy_ssl_trusted_certificate /etc/ssl/certs/ca-bundle.crt;
        }

        # --------------------------------------------------------------------
        # (Optional) Handle ACME (Let’s Encrypt) HTTP-01 challenges if you
        # want to auto-provision certificates.  Skip if you manage certificates manually.
        # --------------------------------------------------------------------
        # location /.well-known/acme-challenge/ {
        #     root /var/www/letsencrypt;
        #     allow all;
        # }

        # --------------------------------------------------------------------
        # (Optional) Return 404 for anything else:
        # --------------------------------------------------------------------
        error_page 404 /404.html;
        location = /404.html {
            internal;
            root /usr/share/nginx/html;
        }
    }

    # ------------------------------------------------------------------------
    # 2.d) (Optional) Upstream health check—passive only (built-in):
    #     NGINX will automatically mark a backend “down” after max_fails,
    #     and retry only after fail_timeout.  If you need active health checks,
    #     you’d install the nginx‐upstream‐healthcheck module (third-party).
    # ------------------------------------------------------------------------
}


################################################################################
# 3) NOTES ON HOW THIS WORKS
################################################################################

# 1) ip_hash
#    - Guarantees “session affinity” by hashing the client’s IP and sending it
#      to the same backend each time (so long as that backend is up).
#    - If the chosen backend becomes unavailable (because of max_fails/fail_timeout),
#      NGINX transparently sends the next request to a healthy backend.
#    - No DNS changes are ever made; clients always talk to https://aiostreams.bolabaden.org,
#      and NGINX does the upstream routing.

# 2) max_fails / fail_timeout
#    - “max_fails=3 fail_timeout=30s” means:
#         • If 3 consecutive attempts to connect (or receive a response) to that backend
#           fail within 30s, that backend is marked “unhealthy” and is skipped for the
#           next 30 seconds.
#         • After 30s passes, NGINX will retry the backend on a new request.
#    - This is a passive health check—based on traffic failures.

# 3) proxy_next_upstream
#    - If NGINX sees a connection error, a timeout, or one of the configured status codes
#      (5xx, 502, 503, 504), it immediately retries the request on the next healthy backend.
#    - Use “proxy_next_upstream_tries 3;” to limit retries.

# 4) proxy_ssl_server_name
#    - When proxying via HTTPS, NGINX by default will send “Host”=IP to the upstream,
#      which may not match the certificate on the backend.  “proxy_ssl_server_name on;”
#      causes NGINX to send SNI = the hostname in the “proxy_pass” URL (e.g. aiostreams-cf.bolabaden.org).

# 5) If you require faster health checks (active probes), you could compile or install
#    the “nginx_upstream_check_module” (third party).  But the above passive checks
#    are sufficient for most “any one of three must be up” scenarios.

# 6) SSL Certificates
#    - This example assumes you store Let’s Encrypt certificates under /etc/letsencrypt/…
#    - If you haven’t yet obtained a certificate for aiostreams.bolabaden.org, do:
#         certbot certonly --webroot -w /var/www/html -d aiostreams.bolabaden.org
#      then update the “ssl_certificate” paths above accordingly.

################################################################################
# END OF CONFIG
################################################################################