smart_ide/deploy/nginx/sites/ia.enso.4nkweb.com.conf
Nicolas Cantu 4d35944de7 Document full HTTPS URLs for ia.enso Ollama and AnythingLLM
**Motivations:**
- Operators need explicit public URLs in one place.

**Root causes:**
- Paths were documented without full https:// host prefix everywhere.

**Correctifs:**
- N/A.

**Evolutions:**
- Site conf header, README table, services.md, feature doc, deploy script success output.

**Pages affectées:**
- deploy/nginx/sites/ia.enso.4nkweb.com.conf
- deploy/nginx/README-ia-enso.md
- deploy/nginx/deploy-ia-enso-to-proxy.sh
- docs/services.md
- docs/features/ia-enso-nginx-proxy-ollama-anythingllm.md
2026-03-23 01:30:16 +01:00

97 lines
3.2 KiB
Plaintext

# ia.enso.4nkweb.com — reverse proxy to LAN host (Ollama + AnythingLLM).
#
# Public HTTPS URLs (after TLS + nginx reload):
# AnythingLLM UI: https://ia.enso.4nkweb.com/anythingllm/
# Ollama OpenAI API: https://ia.enso.4nkweb.com/ollama/v1/ (e.g. .../v1/models, .../v1/chat/completions)
# Ollama native API: https://ia.enso.4nkweb.com/ollama/api/tags (and other /api/* paths)
# /ollama/* requires Authorization: Bearer <secret> at nginx (see map); Cursor base URL: .../ollama/v1
#
# Prerequisites on the proxy host:
# - TLS certificate for ia.enso.4nkweb.com (e.g. certbot).
# - In the main nginx `http { }` block, include the Bearer map (see http-maps/ia-enso-ollama-bearer.map.conf.example).
#
# Upstream backend: replaced at deploy time (default 192.168.1.164). Manual install: replace __IA_ENSO_BACKEND_IP__.
upstream ia_enso_ollama {
server __IA_ENSO_BACKEND_IP__:11434;
keepalive 8;
}
upstream ia_enso_anythingllm {
server __IA_ENSO_BACKEND_IP__:3001;
keepalive 8;
}
server {
listen 80;
server_name ia.enso.4nkweb.com;
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl http2;
server_name ia.enso.4nkweb.com;
ssl_certificate /etc/letsencrypt/live/ia.enso.4nkweb.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/ia.enso.4nkweb.com/privkey.pem;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options "SAMEORIGIN" always;
add_header X-Content-Type-Options "nosniff" always;
client_max_body_size 100M;
# Ollama OpenAI-compatible API: require Authorization: Bearer <shared secret> (see map file).
location /ollama/ {
if ($ia_enso_ollama_authorized = 0) {
return 401;
}
proxy_pass http://ia_enso_ollama/;
proxy_http_version 1.1;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Connection "";
# Ollama does not need the client Bearer; avoids passing the gate secret downstream.
proxy_set_header Authorization "";
proxy_buffering off;
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
}
# AnythingLLM UI + API (application login). Subpath stripped when forwarding.
location /anythingllm/ {
proxy_pass http://ia_enso_anythingllm/;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Prefix /anythingllm;
proxy_read_timeout 3600s;
proxy_send_timeout 3600s;
}
location = /anythingllm {
return 301 https://$host/anythingllm/;
}
}