ia_dev/deploy/_lib/git-flow.sh
Nicolas Cantu 61cec6f430 Sync ia_dev: token resolution via .secrets/<env>/ia_token, doc updates
**Motivations:**
- Align master with current codebase (token from projects/<id>/.secrets/<env>/ia_token)
- Id resolution by mail To or by API token; no slug

**Root causes:**
- Token moved from conf.json to .secrets/<env>/ia_token; env from directory name

**Correctifs:**
- Server and scripts resolve project+env by scanning all projects and envs

**Evolutions:**
- tickets-fetch-inbox routes by To address; notary-ai agents and API doc updated

**Pages affectées:**
- ai_working_help/server.js, docs, project_config.py, lib/project_config.sh
- projects/README.md, lecoffreio/docs/API.md, gitea-issues/tickets-fetch-inbox.py
2026-03-16 15:00:23 +01:00

294 lines
12 KiB
Bash

#!/usr/bin/env bash
# Git flow functions for automatic branch promotion and verification
#
# Prerequisites: This file must be sourced after env-map.sh and ssh.sh
# Functions used: get_env_target_ip, get_env_service_port, get_env_backend_internal_port, ssh_run
# Variables used: DEPLOY_SSH_KEY, DEPLOY_SSH_USER
# Vérifie le succès d'un déploiement
verify_deployment_success() {
local env="$1"
local domain="$2"
local ssh_key="${DEPLOY_SSH_KEY:-$HOME/.ssh/id_ed25519}"
local ssh_user="${DEPLOY_SSH_USER:-ncantu}"
local target_ip
local service_port
local backend_internal_port
# These functions should be available from env-map.sh (sourced before this file)
target_ip="$(get_env_target_ip "$env")"
service_port="$(get_env_service_port "$env")"
backend_internal_port="$(get_env_backend_internal_port "$env")"
# 1. Attendre quelques secondes pour que les services démarrent
info "[verify] Waiting for services to start (15 seconds)..."
sleep 15
# 2. Health check HTTP avec retries
info "[verify] Checking health endpoint via router (port ${service_port})..."
local health_status
local max_retries=3
local retry_count=0
# Vérifier via le router depuis le serveur distant (via SSH)
# Le router route /api/ vers le backend
while [[ $retry_count -lt $max_retries ]]; do
health_status=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"curl -s -o /dev/null -w '%{http_code}' --max-time 10 --connect-timeout 5 'http://localhost:${service_port}/api/v1/public/health' 2>/dev/null || echo '000'")
if [[ "$health_status" == "200" ]]; then
info "[verify] Health check passed via router (HTTP $health_status)"
break
fi
retry_count=$((retry_count + 1))
if [[ $retry_count -lt $max_retries ]]; then
info "[verify] Health check attempt $retry_count failed (HTTP $health_status), retrying in 5 seconds..."
sleep 5
fi
done
if [[ "$health_status" != "200" ]]; then
# Essayer directement le backend en fallback
info "[verify] Router check failed (HTTP $health_status), trying backend directly (port ${backend_internal_port})..."
health_status=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"curl -s -o /dev/null -w '%{http_code}' --max-time 10 --connect-timeout 5 'http://localhost:${backend_internal_port}/api/v1/public/health' 2>/dev/null || echo '000'")
# If 404, backend may mount API at root (API_ROOT_URL=/); try path without /api prefix
if [[ "$health_status" == "404" ]]; then
info "[verify] Backend returned 404 for /api/v1/public/health, trying /v1/public/health..."
health_status=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"curl -s -o /dev/null -w '%{http_code}' --max-time 10 --connect-timeout 5 'http://localhost:${backend_internal_port}/v1/public/health' 2>/dev/null || echo '000'")
fi
if [[ "$health_status" != "200" ]]; then
error "[verify] Health check failed: HTTP $health_status"
# Afficher les logs du backend pour diagnostic
info "[verify] Backend logs (last 50 lines):"
ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"journalctl -u lecoffreio-backend@${domain}.service --no-pager -n 50 2>/dev/null || true" | sed 's/^/ /'
# Afficher l'état des services
info "[verify] Service status:"
ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"systemctl status lecoffreio-backend@${domain}.service lecoffreio-router@${domain}.service --no-pager -l 2>/dev/null || true" | sed 's/^/ /'
# Vérifier si le port est en écoute
info "[verify] Checking if backend port ${backend_internal_port} is listening:"
ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"ss -tlnp | grep ':${backend_internal_port}' || echo ' Port ${backend_internal_port} is not listening'" | sed 's/^/ /'
error "[verify] Backend may not be fully started yet. Check logs: journalctl -u lecoffreio-backend@${domain}.service -n 50"
error "[verify] Router status: systemctl status lecoffreio-router@${domain}.service"
return 1
fi
info "[verify] Health check passed via direct backend (HTTP $health_status)"
fi
# 3. Vérification des services systemd avec retries (frontend peut prendre plus de temps)
info "[verify] Checking systemd services..."
local services_status
local max_service_retries=10
local service_retry_count=0
local all_active=false
while [[ $service_retry_count -lt $max_service_retries && "$all_active" != "true" ]]; do
services_status=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"systemctl is-active lecoffreio-backend@${domain}.service lecoffreio-frontend@${domain}.service lecoffreio-router@${domain}.service 2>/dev/null | grep -vE '^(active|activating)$' || true")
if [[ -z "$services_status" ]]; then
# Vérifier que tous les services sont vraiment "active" (pas "activating")
local all_status
all_status=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"systemctl is-active lecoffreio-backend@${domain}.service lecoffreio-frontend@${domain}.service lecoffreio-router@${domain}.service 2>/dev/null")
# Vérifier s'il y a des erreurs dans les logs du frontend (si en "activating")
if echo "$all_status" | grep -q "activating"; then
# Vérifier les logs du frontend pour voir s'il y a une erreur
local frontend_errors
frontend_errors=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"journalctl -u lecoffreio-frontend@${domain}.service --since '2 minutes ago' --no-pager 2>/dev/null | { grep -iE '(error|fatal|failed)' || true; } | tail -5")
if [[ -n "$frontend_errors" ]]; then
error "[verify] Frontend errors detected while activating:"
echo "$frontend_errors" | sed 's/^/ /'
error "[verify] Check frontend logs: journalctl -u lecoffreio-frontend@${domain}.service -n 50"
return 1
fi
service_retry_count=$((service_retry_count + 1))
if [[ $service_retry_count -lt $max_service_retries ]]; then
info "[verify] Some services still activating, waiting 10 seconds (attempt $service_retry_count/$max_service_retries)..."
sleep 10
fi
else
all_active=true
fi
else
service_retry_count=$((service_retry_count + 1))
if [[ $service_retry_count -lt $max_service_retries ]]; then
info "[verify] Some services not active, waiting 10 seconds (attempt $service_retry_count/$max_service_retries)..."
echo "$services_status" | sed 's/^/ /'
sleep 10
fi
fi
done
if [[ "$all_active" != "true" ]]; then
# Dernière vérification pour afficher l'état final et les logs
services_status=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"systemctl is-active lecoffreio-backend@${domain}.service lecoffreio-frontend@${domain}.service lecoffreio-router@${domain}.service 2>/dev/null || echo 'unknown'")
error "[verify] Some services are not active after $max_service_retries attempts:"
echo "$services_status" | sed 's/^/ /'
# Afficher les logs du frontend si toujours en activating
if echo "$services_status" | grep -q "activating.*frontend"; then
info "[verify] Frontend logs (last 30 lines):"
ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"journalctl -u lecoffreio-frontend@${domain}.service --no-pager -n 30 2>/dev/null || true" | sed 's/^/ /'
fi
error "[verify] Check service status: systemctl status lecoffreio-backend@${domain}.service lecoffreio-frontend@${domain}.service lecoffreio-router@${domain}.service"
return 1
fi
info "[verify] All systemd services are active"
# 3. Vérification des logs (erreurs critiques récentes)
info "[verify] Checking for critical errors in logs..."
local critical_errors
critical_errors=$(ssh_run "$ssh_key" "$ssh_user" "$target_ip" \
"journalctl -u lecoffreio-backend@${domain}.service --since '5 minutes ago' --no-pager 2>/dev/null | { grep -iE '(error|fatal|critical)' || true; } | tail -10")
if [[ -n "$critical_errors" ]]; then
warning "[verify] Critical errors found in recent logs:"
echo "$critical_errors" | sed 's/^/ /'
# Ne pas bloquer pour les warnings, seulement les erreurs fatales
# On pourrait ajouter une logique plus fine ici
fi
info "[verify] Deployment verification passed"
return 0
}
# Détermine l'environnement suivant dans la chaîne
get_next_env() {
local current_env="$1"
case "$current_env" in
dev) echo "test" ;;
test) echo "pprod" ;;
pprod) echo "prod" ;;
prod) echo "" ;;
*) echo "" ;;
esac
}
# Promotion automatique vers l'environnement suivant
auto_promote_to_next_env() {
local current_env="$1"
local current_branch="$2"
local project_root="$3"
local deploy_git_remote="${4:-lecoffre_ng}"
local next_env
local next_branch
# Si on n'est pas sur dev, pas de promotion
if [[ "$current_branch" != "dev" ]]; then
return 0
fi
next_env=$(get_next_env "$current_env")
if [[ -z "$next_env" ]]; then
info "[promote] No next environment (already at prod)"
return 0
fi
# Déterminer la branche cible
case "$next_env" in
test) next_branch="test" ;;
pprod) next_branch="pprod" ;;
prod) next_branch="prod" ;;
*) return 0 ;;
esac
info "[promote] Auto-promoting dev → $next_branch for $next_env environment..."
# 1. Fetch la branche cible
git -C "$project_root" fetch "$deploy_git_remote" "$next_branch" || true
# 2. Checkout la branche cible
git -C "$project_root" checkout "$next_branch" || {
# Branch doesn't exist locally, create it from remote
git -C "$project_root" checkout -b "$next_branch" "${deploy_git_remote}/${next_branch}" 2>/dev/null || {
# Remote branch doesn't exist, create new branch
git -C "$project_root" checkout -b "$next_branch"
}
}
# 3. Merge dev into target branch
if ! git -C "$project_root" merge dev --allow-unrelated-histories --no-edit; then
error "[promote] Merge dev → $next_branch failed. Resolve conflicts manually."
git -C "$project_root" checkout dev
return 1
fi
# 4. Push
info "[promote] Pushing $next_branch..."
git -C "$project_root" push "$deploy_git_remote" "$next_branch"
# 5. Retourner sur dev
info "[promote] Returning to dev branch..."
git -C "$project_root" checkout dev
success "[promote] Successfully promoted dev → $next_branch"
info "[promote] Next step: deploy to $next_env with: ./deploy/scripts_v2/deploy.sh $next_env"
return 0
}
# Stage all changes, commit with message, and push current branch
# Usage: git_add_commit_push <project_root> <commit_message> [remote]
# Example: git_add_commit_push /path/to/repo "fix: something"
git_add_commit_push() {
local project_root="${1:-.}"
local commit_message="$2"
local deploy_git_remote="${3:-lecoffre_ng}"
local current_branch
if [[ -z "$commit_message" ]]; then
error "[git] Commit message required"
return 1
fi
# Lint --fix on all projects before staging (resources, backend, frontend). Non-blocking.
info "[lint] Running lint --fix on lecoffre-ressources-dev, lecoffre-back-main, lecoffre-front-main..."
(cd "${project_root}/lecoffre-ressources-dev" && npm run lint:fix) || warning "[lint] lecoffre-ressources-dev lint:fix failed (non-blocking)"
(cd "${project_root}/lecoffre-back-main" && npm run lint:fix) || warning "[lint] lecoffre-back-main lint:fix failed (non-blocking)"
(cd "${project_root}/lecoffre-front-main" && npm run lint:fix) || warning "[lint] lecoffre-front-main lint:fix failed (non-blocking)"
info "[lint] Lint:fix step done"
info "[git] Staging all changes (add -A)..."
git -C "$project_root" add -A || {
error "[git] git add -A failed"
return 1
}
info "[git] Committing..."
git -C "$project_root" commit -m "$commit_message" || {
error "[git] commit failed"
return 1
}
current_branch=$(git -C "$project_root" branch --show-current)
info "[git] Pushing to $deploy_git_remote $current_branch..."
git -C "$project_root" push "$deploy_git_remote" "$current_branch" || {
error "[git] push failed"
return 1
}
success "[git] add -A, commit, push done"
return 0
}