Fix Dockerfile & config

This commit is contained in:
gisele-igore 2023-04-03 17:18:56 +02:00
parent 36df570f8c
commit 71db439a08
5 changed files with 31 additions and 482 deletions

View File

@ -5,6 +5,7 @@ orbs:
helm: circleci/helm@2.0.1
jobs:
build-push-docker-image:
docker:
- image: cimg/base:stable
@ -12,6 +13,10 @@ jobs:
TAG: << pipeline.git.tag >>
steps:
- checkout
- add_ssh_keys:
fingerprints:
- "4c:8e:00:16:94:44:d9:ad:e9:e9:2c:8b:02:d4:8d:7a"
- run: cp $HOME/.ssh/id_rsa_4c8e00169444d9ade9e92c8b02d48d7a id_rsa
- setup_remote_docker:
version: 20.10.12
docker_layer_caching: true

View File

@ -5,15 +5,14 @@ WORKDIR leCoffre
RUN npm install -D prisma@4.11.0
COPY package.json ./
COPY src/common/databases/schema.prisma ./src/common/databases/schema.prisma
RUN npx prisma generate
ARG FINGERPRINT
ENV FINGERPRINT=$FINGERPRINT
COPY id_rsa_${FINGERPRINT} /root/.ssh/id_ed25519
RUN chmod 600 ~/.ssh/id_ed25519
RUN eval "$(ssh-agent -s)" && ssh-add /root/.ssh/id_ed25519
RUN apk update && apk add openssh-client git
COPY .ssh/id_rsa /root/.ssh/id_rsa
RUN chmod 600 ~/.ssh/id_rsa
RUN eval "$(ssh-agent -s)" && ssh-add /root/.ssh/id_rsa
RUN ssh-keyscan github.com smart-chain-fr/leCoffre-resources.git >> /root/.ssh/known_hosts
RUN npm install --frozen-lockfile
# Rebuild the source code only when needed
@ -21,11 +20,12 @@ FROM node:19-alpine AS builder
WORKDIR leCoffre
COPY . .
COPY --from=deps leCoffre/node_modules ./node_modules
COPY --from=deps leCoffre/package.json package.json
COPY tsconfig.json tsconfig.json
COPY src src
RUN apk update && apk add openssh-client git
#COPY node_modules ./node_modules
COPY src/common/databases/schema.prisma ./src/common/databases/schema.prisma
RUN npx prisma generate
RUN npm run build
@ -34,14 +34,10 @@ FROM node:19-alpine AS production
WORKDIR leCoffre
RUN unset FINGERPRINT
RUN adduser -D lecoffreuser --uid 10000 && chown -R lecoffreuser .
COPY --from=builder --chown=lecoffreuser leCoffre/node_modules ./node_modules
COPY --from=builder --chown=lecoffreuser leCoffre/dist/app/api ./dist/api
COPY --from=builder --chown=lecoffreuser leCoffre/dist/entries ./dist/entries
COPY --from=builder --chown=lecoffreuser leCoffre/dist/common ./dist/common
COPY --from=builder --chown=lecoffreuser leCoffre/src/common/databases/ ./src/common/databases/
COPY --from=builder --chown=lecoffreuser leCoffre/dist dist
COPY --from=builder --chown=lecoffreuser leCoffre/package.json ./package.json
USER lecoffreuser

View File

@ -1,5 +1,5 @@
apiVersion: v2
name: tezos-link
name: leCoffre-back
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.

View File

@ -1,61 +1,43 @@
global:
## @param nameOverride String to partially override `tezoslink.name` template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override `tezoslink.fullname` template with a string
##
fullnameOverride: ""
## @param namespaceOverride String to fully override helpers.namespace
##
namespaceOverride: ""
api:
replicas: 1
image:
repository: rg.fr-par.scw.cloud/tezoslink/p1-api
repository: rg.fr-par.scw.cloud/lecoffre/api
tag: 0.1.0
pullPolicy: Always
service:
ports:
http: 8000
ingress:
host: p2.api.tezoslink.smart-chain.fr
host: api.lecoffre.smart-chain.fr
tls:
hosts:
- p2.api.tezoslink.smart-chain.fr
- api.stg.lecoffre.smart-chain.fr
secretName: api-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
env:
DATABASE_USERNAME: "tz-backend"
DATABASE_PASSWORD: "Titi123!"
DATABASE_URL: "tezoslink-postgresql.tezoslink-postgresql.svc.cluster.local:5432"
DATABASE_HOSTNAME: "localhost"
DATABASE_PORT: "59445"
DATABASE_USER: "postgres"
DATABASE_PASSWORD: "0000"
DATABASE_NAME: "postgres"
POSTGRES_PASSWORD: "0000"
APP_LABEL: "lecoffre"
APP_PORT: 3001
API_ROOT_URL: /api
resources:
limits: {}
requests:
cpu: 100m
memory: 256Mi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /health
@ -65,15 +47,6 @@ api:
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /health
@ -83,428 +56,3 @@ api:
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
front:
replicas: 1
image:
repository: rg.fr-par.scw.cloud/tezoslink/p1-frontend
tag: 0.0.5
pullPolicy: Always
service:
ports:
http: 8080
ingress:
host: p2.front.tezoslink.smart-chain.fr
tls:
hosts:
- p2.front.tezoslink.smart-chain.fr
secretName: front-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
env:
resources:
limits: {}
requests:
cpu: 100m
memory: 256Mi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
proxy:
testnet:
enabled : true
replicaCount: 1
network: "mainnet"
image:
repository: rg.fr-par.scw.cloud/tezoslink/p1-proxy
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "0.1.0"
env:
DATABASE_USERNAME: "tz-backend"
DATABASE_PASSWORD: "Titi123!"
DATABASE_URL: "tezoslink-postgresql.tezoslink-postgresql.svc.cluster.local:5432"
ARCHIVE_NODES_URL: "archive-node.poc-tzk8s.svc.cluster.local"
TEZOS_ARCHIVE_PORT: "8732"
ROLLING_NODES_URL: "rolling-node.poc-tzk8s.svc.cluster.local"
TEZOS_ROLLING_PORT: "8732"
TEZOS_NETWORK: "mainnet"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
## @param Automount service account token for the server service account
##
automountServiceAccountToken: true
## Proxy Service
##
service:
## @param proxy.service.type Kubernetes service type
##
type: ClusterIP
## @param proxy.service.ports.http proxy service port
##
ports:
http: 8001
## @param proxy.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param proxy.service.nodePorts.http Kubernetes Service nodePort
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30080
##
nodePorts:
http: ""
## @param proxy.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param proxy.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
## Set the LoadBalancer service type to internal only
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param proxy.service.externalTrafficPolicy Enable client source IP preservation
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
## There are two available options: Cluster (default) and Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param proxy.service.healthCheckNodePort Specifies the health check node port (numeric port number) for the service if `externalTrafficPolicy` is set to Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
healthCheckNodePort: ""
## @param proxy.service.annotations Additional annotations for proxy service
##
annotations: {}
## @param proxy.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param proxy.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param proxy.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
ingress:
host: p2.proxy.tezoslink.smart-chain.fr
tls:
hosts:
- p2.proxy.tezoslink.smart-chain.fr
secretName: proxy-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
## Proxy pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param proxy.podSecurityContext.enabled Enable pod security context
## @param proxy.podSecurityContext.runAsUser User ID for the container
## @param proxy.podSecurityContext.fsGroup Group ID for the container filesystem
##
podSecurityContext:
enabled: true
runAsUser: 1000
## Prometheus proxy containers' Security Context (only main container)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param proxy.containerSecurityContext.enabled Enable container security context
## @param proxy.containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities which should be dropped
## @param proxy.containerSecurityContext.runAsNonRoot Force the container to run as a non root user
## @param proxy.containerSecurityContext.allowPrivilegeEscalation Switch privilegeEscalation possibility on or off
## @param proxy.containerSecurityContext.readOnlyRootFilesystem Mount / (root) as a readonly filesystem
##
containerSecurityContext:
enabled: true
capabilities:
drop:
- ALL
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param resources.limits The resources limits for the container
## @param resources.requests The requested resources for the container
##
resources:
limits: {}
requests:
cpu: 100m
memory: 256Mi
# mainnet:
# enabled : false
# replicaCount: 1
# network: "mainnet"
# image:
# repository: rg.fr-par.scw.cloud/tezoslink/p1-proxy
# pullPolicy: IfNotPresent
# # Overrides the image tag whose default is the chart appVersion.
# tag: "0.1.0"
# env:
# DATABASE_USERNAME: "tz-backend"
# DATABASE_PASSWORD: "Titi123!"
# DATABASE_URL: "tezoslink-postgresql.tezoslink-postgresql.svc.cluster.local:5432"
# ARCHIVE_NODES_URL: "archive-node.poc-tzk8s.svc.cluster.local"
# TEZOS_ARCHIVE_PORT: "8732"
# ROLLING_NODES_URL: "rolling-node.poc-tzk8s.svc.cluster.local"
# TEZOS_ROLLING_PORT: "8732"
# TEZOS_NETWORK: "mainnet"
# serviceAccount:
# # Specifies whether a service account should be created
# create: true
# # Annotations to add to the service account
# annotations: {}
# # The name of the service account to use.
# # If not set and create is true, a name is generated using the fullname template
# name: ""
# ## @param Automount service account token for the server service account
# ##
# automountServiceAccountToken: true
# ## Proxy Service
# ##
# service:
# ## @param proxy.service.type Kubernetes service type
# ##
# type: ClusterIP
# ## @param proxy.service.ports.http proxy service port
# ##
# ports:
# http: 8001
# ## @param proxy.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
# ## e.g:
# ## clusterIP: None
# ##
# clusterIP: ""
# ## @param proxy.service.nodePorts.http Kubernetes Service nodePort
# ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
# ## e.g:
# ## nodePort: 30080
# ##
# nodePorts:
# http: ""
# ## @param proxy.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
# ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
# ##
# loadBalancerIP: ""
# ## @param proxy.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
# ## Set the LoadBalancer service type to internal only
# ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
# ## e.g:
# ## loadBalancerSourceRanges:
# ## - 10.10.10.0/24
# ##
# loadBalancerSourceRanges: []
# ## @param proxy.service.externalTrafficPolicy Enable client source IP preservation
# ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
# ## There are two available options: Cluster (default) and Local.
# ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# ##
# externalTrafficPolicy: Cluster
# ## @param proxy.service.healthCheckNodePort Specifies the health check node port (numeric port number) for the service if `externalTrafficPolicy` is set to Local.
# ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# ##
# healthCheckNodePort: ""
# ## @param proxy.service.annotations Additional annotations for proxy service
# ##
# annotations: {}
# ## @param proxy.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
# ##
# extraPorts: []
# ## @param proxy.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
# ## If "ClientIP", consecutive client requests will be directed to the same Pod
# ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
# ##
# sessionAffinity: None
# ## @param proxy.service.sessionAffinityConfig Additional settings for the sessionAffinity
# ## sessionAffinityConfig:
# ## clientIP:
# ## timeoutSeconds: 300
# ##
# sessionAffinityConfig: {}
# ## Proxy pods' Security Context
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
# ## @param proxy.podSecurityContext.enabled Enable pod security context
# ## @param proxy.podSecurityContext.runAsUser User ID for the container
# ## @param proxy.podSecurityContext.fsGroup Group ID for the container filesystem
# ##
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# ## Prometheus proxy containers' Security Context (only main container)
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
# ## @param proxy.containerSecurityContext.enabled Enable container security context
# ## @param proxy.containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities which should be dropped
# ## @param proxy.containerSecurityContext.runAsNonRoot Force the container to run as a non root user
# ## @param proxy.containerSecurityContext.allowPrivilegeEscalation Switch privilegeEscalation possibility on or off
# ## @param proxy.containerSecurityContext.readOnlyRootFilesystem Mount / (root) as a readonly filesystem
# ##
# containerSecurityContext:
# enabled: true
# capabilities:
# drop:
# - ALL
# runAsNonRoot: true
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: false
# ## Configure extra options for liveness probe
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
# ## @param operator.livenessProbe.enabled Turn on and off liveness probe
# ## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
# ## @param operator.livenessProbe.periodSeconds How often to perform the probe
# ## @param operator.livenessProbe.timeoutSeconds When the probe times out
# ## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
# ## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
# ##
# livenessProbe:
# enabled: true
# path: /health
# port: 8001
# initialDelaySeconds: 5
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# ## Configure extra options for readiness probe
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
# ## @param operator.readinessProbe.enabled Turn on and off readiness probe
# ## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
# ## @param operator.readinessProbe.periodSeconds How often to perform the probe
# ## @param operator.readinessProbe.timeoutSeconds When the probe times out
# ## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
# ## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
# ##
# readinessProbe:
# enabled: true
# path: /health
# port: 8001
# initialDelaySeconds: 5
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# ## Resource requests and limits
# ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
# ## choice for the user. This also increases chances charts run on environments with little
# ## resources, such as Minikube. If you do want to specify resources, uncomment the following
# ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# ## @param resources.limits The resources limits for the container
# ## @param resources.requests The requested resources for the container
# ##
# resources:
# limits: {}
# requests:
# cpu: 100m
# memory: 256Mi

View File

@ -1,5 +1,5 @@
{
"name": "lecoffre",
"name": "lecoffre-back",
"version": "1.0.0",
"description": "lecoffre project",
"_moduleAliases": {