This commit is contained in:
Afi Elolo Gisèle Dekpe 2023-04-04 12:00:12 +02:00 committed by GitHub
parent 7d78721763
commit cfbdde2153
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 368 additions and 1265 deletions

View File

@ -1,24 +0,0 @@
version: 2.1
orbs:
path-filtering: circleci/path-filtering@0.1.3
setup: true
workflows:
setup-workflow:
jobs:
- path-filtering/filter:
filters:
branches:
only:
- dev
- main
- preprod
base-revision: <<pipeline.git.branch>>
config-path: .circleci/workflows.yml
mapping: |
src/.* src true
devops/.* devops true
.gitleaksignore src true
.gitleaksignore devops true

View File

@ -1,228 +0,0 @@
version: 2.1
orbs:
kubernetes: circleci/kubernetes@1.0.0
helm: circleci/helm@2.0.1
parameters:
project-name:
type: string
description: Name of the global project, it will be used for the container registry, example -> tezoslink (has frontend and backend modules)
default: tezoslink
project-module:
type: string
description: Name of the app or module of the project, it will be used for the container registry, for example tezoslink has a front and back app, example -> front
default: p1-api
dockerfile-location:
type: string
description: Location of the Dockerfile from the root of the repo
default: .
project-helm-values:
type: string
description: Index of values in the helm chart file, example -> testApp
default: tezosLink
########################################################
################## DO NOT EDIT #########################
########################################################
src:
type: boolean
description: This is the directory of the app, used to trigger app build & deploy
default: false
devops:
type: boolean
description: This is the directory of the devops, used to trigger devops only deploy
default: false
########################################################
###################################################################
################ COMMANDS #######################
###################################################################
commands:
install-yq:
parameters:
version:
type: string
default: v4.27.3
steps:
- run:
name: Install YQ
command: >
curl -L -o ${HOME}/yq
https://github.com/mikefarah/yq/releases/download/<< parameters.version
>>/yq_linux_amd64
chmod +x ${HOME}/yq
${HOME}/yq --version
jobs:
default:
docker:
- image: cimg/base:stable
steps:
- run:
name: Nothing to do
command: echo There is nothing to do
verify-tag:
docker:
- image: cimg/base:stable
steps:
- checkout
- install-yq
- run:
name: Get name of project from repo url
command: echo "export NAMESPACE=$(echo '<<pipeline.project.git_url>>' | sed 's:.*/::')">> $BASH_ENV
- run:
name: Slugify name from repo url
command: echo "export NAMESPACE=$(echo $NAMESPACE | awk '{gsub(/[^0-9a-zA-Z .-]/,""); gsub(/^[ \t\r\n]+/, ""); gsub(/[ \t\r\n]+$/, ""); gsub(/[ ]+/,"-"); print tolower($0);}')">> $BASH_ENV
- run:
name: Extract TAG from Chart.yaml
command: echo "export TAG=$(${HOME}/yq '.appVersion' devops/Chart.yaml )" >> $BASH_ENV
- run:
name: Verify if TAG is in Chart.yaml
command: |
if [ -z "1" ]
then
echo "Tag is either null or missformed (check documentation)"
exit 1
fi
- run:
name: Tag is 1
command: echo 1
- setup_remote_docker:
version: 20.10.12
docker_layer_caching: true
- run:
name: Login to SCW registry
command: docker login rg.fr-par.scw.cloud/$NAMESPACE -u nologin -p $SCW_SECRET_KEY
- run:
name: Check if TAG already exists in registry
command: docker manifest inspect rg.fr-par.scw.cloud/<<pipeline.parameters.project-name>>/<<pipeline.parameters.project-module>>:1 && exit 1 || echo 'TAG is available'
- run:
when: on_fail
command: echo 'Please review your tag'
run-gitleaks:
docker:
- image: zricethezav/gitleaks:v8.15.0
steps:
- checkout
- run:
name: Create scan directory
command: mkdir /tmp/gitleaks
- run:
name: Starting scan
command: gitleaks detect --source . --log-opts "-p -n 1" -r /tmp/gitleaks/scan.json -v
- store_artifacts:
path: /tmp/gitleaks/scan.json
build-docker-image:
machine:
image: ubuntu-2004:202010-01
steps:
- checkout
- install-yq
- run:
name: Create persistent workspace
command: mkdir -p workspace
- run:
name: Extract TAG from Chart.yaml
command: echo "export TAG=$(${HOME}/yq '.appVersion' devops/Chart.yaml )" >> $BASH_ENV
- run:
name: Display Tag
command: echo You are building version 1
- run:
name: Build image
command: >
docker build
--tag rg.fr-par.scw.cloud/<<pipeline.parameters.project-name>>/<<pipeline.parameters.project-module>>:1
<<pipeline.parameters.dockerfile-location>> | while read line; do echo "$(date +%T) > $line"; done;
- run:
name: Save image
command: >
docker save
-o workspace/tezoslink-back.tar
rg.fr-par.scw.cloud/<<pipeline.parameters.project-name>>/<<pipeline.parameters.project-module>>:1
- persist_to_workspace:
root: workspace
paths:
- tezoslink-back.tar
push-docker-image:
docker:
- image: cimg/base:stable
steps:
- checkout
- install-yq
- attach_workspace:
# at: /tmp/workspace
at: /workspace
- run:
name: Extract TAG from Chart.yaml
command: echo "export TAG=$(${HOME}/yq '.appVersion' devops/Chart.yaml )" >> $BASH_ENV
- run:
name: Display Tag
command: echo You are pushing version 1
- setup_remote_docker:
version: 20.10.12
docker_layer_caching: true
- run:
name: Login to SCW registry
command: docker login rg.fr-par.scw.cloud -u nologin -p $SCW_SECRET_KEY
- run:
name: Load docker image from workspace
# command: docker load -i /tmp/workspace/tezoslink-back.tar
command: docker load -i /workspace/tezoslink-back.tar
- run:
name: Push image
command: docker push rg.fr-par.scw.cloud/<<pipeline.parameters.project-name>>/<<pipeline.parameters.project-module>>:1
workflows:
version: 2
build-scan-push-deploy-stg:
when: <<pipeline.parameters.src>>
jobs:
- run-gitleaks:
filters: &filters-stg
branches:
only: application-segmentation-2
- verify-tag:
filters:
<<: *filters-stg
requires:
- run-gitleaks
- build-docker-image:
# requires:
# - verify-tag
context:
- application-segmentation-2
filters:
<<: *filters-stg
- push-docker-image:
context:
- application-segmentation-2
filters:
<<: *filters-stg
default:
when:
and:
# - not: <<pipeline.parameters.devops>>
- not: <<pipeline.parameters.src>>
jobs:
- default:
filters:
branches:
only:
- dev

View File

@ -4,8 +4,8 @@ orbs:
kubernetes: circleci/kubernetes@1.0.0
helm: circleci/helm@2.0.1
jobs:
build-push-docker-image:
docker:
- image: cimg/base:stable
@ -13,18 +13,17 @@ jobs:
TAG: << pipeline.git.tag >>
steps:
- checkout
- add_ssh_keys:
fingerprints:
- "4c:8e:00:16:94:44:d9:ad:e9:e9:2c:8b:02:d4:8d:7a"
- run: cp $HOME/.ssh/id_rsa_4c8e00169444d9ade9e92c8b02d48d7a id_rsa
- setup_remote_docker:
version: 20.10.12
docker_layer_caching: true
- run: docker login rg.fr-par.scw.cloud/tezoslink -u nologin -p $SCW_SECRET_KEY
- run: docker build --tag rg.fr-par.scw.cloud/tezoslink/p1-api:$TAG -f Dockerfiles/Dockerfile.api .
- run: docker push rg.fr-par.scw.cloud/tezoslink/p1-api:$TAG
- run: docker login rg.fr-par.scw.cloud/tezoslink -u nologin -p $SCW_SECRET_KEY
- run: docker build --tag rg.fr-par.scw.cloud/tezoslink/p1-frontend:$TAG -f Dockerfiles/Dockerfile.front .
- run: docker push rg.fr-par.scw.cloud/tezoslink/p1-frontend:$TAG
- run: docker login rg.fr-par.scw.cloud/tezoslink -u nologin -p $SCW_SECRET_KEY
- run: docker build --tag rg.fr-par.scw.cloud/tezoslink/p1-proxy:$TAG -f Dockerfiles/Dockerfile.proxy .
- run: docker push rg.fr-par.scw.cloud/tezoslink/p1-proxy:$TAG
- run: docker login rg.fr-par.scw.cloud/lecoffre -u nologin -p $SCW_SECRET_KEY
- run: docker build --tag rg.fr-par.scw.cloud/lecoffre/back:$TAG .
- run: docker push rg.fr-par.scw.cloud/lecoffre/back:$TAG
deploy-docker-image:
docker:
@ -40,15 +39,11 @@ jobs:
name: Deploy
command: >
helm upgrade
tezos-link helm-charts-p2 -i -f helm-charts-p2/values.yaml
-n tezos-link
lecoffre-back devops/ -i -f devops/values.yaml
-n lecoffre
--create-namespace
--set api.image.tag=$TAG
--set api.image.repository='rg.fr-par.scw.cloud/tezoslink/p1-api'
--set front.image.tag=$TAG
--set front.image.repository='rg.fr-par.scw.cloud/tezoslink/p1-frontend'
--set proxy.image.tag=$TAG
--set proxy.image.repository='rg.fr-par.scw.cloud/tezoslink/p1-proxy'
--set lecoffreBack.image.repository='rg.fr-par.scw.cloud/lecoffre/back'
--set lecoffreBack.image.tag=$TAG
workflows:
@ -64,6 +59,8 @@ workflows:
- deploy-docker-image:
requires:
- build-push-docker-image
context:
- staging
filters:
tags:
only: /^v.*/

View File

@ -1,62 +0,0 @@
version: 2.1
orbs:
helm: circleci/helm@2.0.1
parameters:
########################################################
################## DO NOT EDIT #########################
########################################################
charts:
type: boolean
description: This is the directory of the charts
default: false
########################################################
###################################################################
################ COMMANDS #######################
###################################################################
jobs:
package-helm-chart:
docker:
- image: cimg/base:stable
steps:
- checkout
- helm/install-helm-client
- run:
name: Helm dependency build
command: helm dependency build helm-charts
- run:
name: Helm lint
command: helm lint helm-charts
- run:
name: Install Chart Releaser
command: |
cd /tmp
curl -sSL https://github.com/helm/chart-releaser/releases/download/v1.2.1/chart-releaser_1.2.1_linux_amd64.tar.gz | tar xzf -
mv cr ~/bin/cr
- add_ssh_keys:
fingerprints:
- "e6:6b:41:5b:4f:27:66:8e:06:be:f3:70:5a:73:db:3b"
- run:
name: Install Chart Releaser
command: |
git config user.email "devops@smart-chain.fr"
git config user.name "CircleCI Job"
helm package helm-charts --destination .deploy
cr upload -o smart-chain-fr -r tezoslink -p .deploy
git checkout gh-pages
cr index -i ./index.yaml -p .deploy -o smart-chain-fr -r tezoslink -c https://smart-chain-fr.github.io/tezosLink/
git add index.yaml
git commit -m "Automatic commit from CircleCI [skip ci]"
git push origin gh-pages
workflows:
version: 2
deploy-helm-chart:
when: <<pipeline.parameters.charts>>
jobs:
- package-helm-chart

39
.ssh/id_rsa Normal file
View File

@ -0,0 +1,39 @@
-----BEGIN OPENSSH PRIVATE KEY-----
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
NhAAAAAwEAAQAAAYEAznzhT70DcOV4Lhbl9WDbxA/LpT/rSAP+ccOyx3ANIEY94cCCwgBA
I7g7Pndd8wrJxG9MnfpWk9z1PYkSCmtvE8p7vrE4xg8n8fDF/x4ybWxM8neoy5/O7k78P+
pc4NsuWQxGujxKqzyL9ChIbquh7lPKgANJwabEyAFEyt10fKptyOifUbNScgY7yD+VHiAg
Kb6C5oB7s+pinb6EPKig1mprC+KUnOB6+x5UPCEzYZ8kvFzIECJ1PyNk80/yOA8ceSHhVE
OSjnySTYCtXfjwci0llK5/nkV5IvJtYYzIsBiHoVQK4hVtQt9PUhB8Sx40ClD1x0r+JB+8
6ozvqd5GOYMKf5b7AUUp4oY/LNZQA7CZTKo50i3iEPrGNgjZJl1yPAAZM4biSkxk5fvKcx
hoSB83lO3u5aUp94C5+Kdte5CKaRzQK+nWR/VGmG4J8FaJ8K+XC22rUTxciNTX1yg8mbUm
JRkMS0FLfSg6zb1U2c2k7d2MDP2ArESAAwGoCqLDAAAFoN/WlNbf1pTWAAAAB3NzaC1yc2
EAAAGBAM584U+9A3DleC4W5fVg28QPy6U/60gD/nHDssdwDSBGPeHAgsIAQCO4Oz53XfMK
ycRvTJ36VpPc9T2JEgprbxPKe76xOMYPJ/Hwxf8eMm1sTPJ3qMufzu5O/D/qXODbLlkMRr
o8Sqs8i/QoSG6roe5TyoADScGmxMgBRMrddHyqbcjon1GzUnIGO8g/lR4gICm+guaAe7Pq
Yp2+hDyooNZqawvilJzgevseVDwhM2GfJLxcyBAidT8jZPNP8jgPHHkh4VRDko58kk2ArV
348HItJZSuf55FeSLybWGMyLAYh6FUCuIVbULfT1IQfEseNApQ9cdK/iQfvOqM76neRjmD
Cn+W+wFFKeKGPyzWUAOwmUyqOdIt4hD6xjYI2SZdcjwAGTOG4kpMZOX7ynMYaEgfN5Tt7u
WlKfeAufinbXuQimkc0Cvp1kf1RphuCfBWifCvlwttq1E8XIjU19coPJm1JiUZDEtBS30o
Os29VNnNpO3djAz9gKxEgAMBqAqiwwAAAAMBAAEAAAGADtL2VgQq2BMRUrXAIqKmhGfh5e
Y13QC0ZrR3BExuFNAWJzvr50OfVzElx7FIJGe03o5jAMB1ML5fOOC7U9Ysrk4OHj2Y5tc1
te/kNxGS6zooOAwRKTU65O70ux33NwllRW1VMo+biLmbDpdJw/A8euC3HAOgau+vFvDIYI
wPjdeqdA8HSRVHqPn7lr/B09zYkkakHSpCN0NvewWvdq9ghKQ1NTA6HR0clySxRXPYvXxw
Zbl/f3EJ29ONrs4hhJbJAn5chercEGHpCQyXlL+9M595XHPox7bAPCOu7JEZLdRsXfjhgL
5Hk1zK68f10K/P8HyMJGscP+wq7hyBWEFnqWmXbp86pNXZqSuFuyc9MCUGF1wzX0yKIMmD
98/EY1OZnPhZ9cvCKB8DhNMHeK/h1Sf0P4ON28N5KYKD0McG+8XOptX5zbkGXT4AI6BnW/
8uon3JP4WVmUNR8S6vy+/+E6qSEvGmljHnjYwS/+84o7pG9S6pR5xBgwdfv+UNVHKpAAAA
wD4SYRQYWb594TFdGS3lAM3j7TIV1jEWy99QPbOSh477lIJE+ySXXEKp02M1OoWR7QFRNi
TwwIh/s7nRaLq9m1tXZF73Qm0FeX9vfiHXPL9u2dJMqjGSXVvjT8UhnEUWykZHV2VO6pk2
q311zGc5M3vDsHzvTZV0XqfANIPFNqGMGCrPx/22rBkGJ8Ll/Y0v+7i3adqxfTOAl3/MWI
enh4wrCiM2gqBrYfhyGSH3oU1TIKqWt5F9ruCTgsnYgfhy3QAAAMEA7Nsrjrr0kJXP09BV
WI6B5TTU/w4ViWVxqrblt0sQwU3ntm/awdKOGHepvgvuKNN2CC5KlfXfnwt5uJEMJuTbo9
hokYpgZFUw69VrBSQK3J/ged7Cp1sWOhCotvy0xuUaURKqsaJF7nlMI1BqnoTZrjoUhYP2
B20dQ9uZnAGfT1Bv63tK9OMVwIVp4wz7yApxpgI5By2ryoL0KsEVXuIkkOnxUKlDZLGf/W
+Qreq8HCrYkodZiCZATaAZeGfbsZePAAAAwQDfLVqejh0Us1+rKwEp9kTMYCFcQ3gV4UiM
28O4ZvLq11iQcreE4KTbfXMpwtmC4+1iJJfhAmEdu0FicJLjgvhHW0Gqp0cXAcrErYPc/i
dejVznWJDSW3ORgak0MYLS7d9sJ1Pye6f0YavJHUsPpYndid56VzwaDVd2V6Dq1ICYRBtT
iadmpQbQABU04xiw8jqyXDOPVOKWnc+AfBCtf5rM9jRd1l+gEsIACne9b92EnlqPX6O7VK
kKGE5sThn2x40AAAAlZ2lzZWxlLXNtYXJ0Y2hhaW5ATWFjQm9vay1Qcm8tNi5sb2NhbAEC
AwQFBg==
-----END OPENSSH PRIVATE KEY-----

1
.ssh/id_rsa.pub Normal file
View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDOfOFPvQNw5XguFuX1YNvED8ulP+tIA/5xw7LHcA0gRj3hwILCAEAjuDs+d13zCsnEb0yd+laT3PU9iRIKa28Tynu+sTjGDyfx8MX/HjJtbEzyd6jLn87uTvw/6lzg2y5ZDEa6PEqrPIv0KEhuq6HuU8qAA0nBpsTIAUTK3XR8qm3I6J9Rs1JyBjvIP5UeICApvoLmgHuz6mKdvoQ8qKDWamsL4pSc4Hr7HlQ8ITNhnyS8XMgQInU/I2TzT/I4Dxx5IeFUQ5KOfJJNgK1d+PByLSWUrn+eRXki8m1hjMiwGIehVAriFW1C309SEHxLHjQKUPXHSv4kH7zqjO+p3kY5gwp/lvsBRSnihj8s1lADsJlMqjnSLeIQ+sY2CNkmXXI8ABkzhuJKTGTl+8pzGGhIHzeU7e7lpSn3gLn4p217kIppHNAr6dZH9UaYbgnwVonwr5cLbatRPFyI1NfXKDyZtSYlGQxLQUt9KDrNvVTZzaTt3YwM/YCsRIADAagKosM= gisele-smartchain@MacBook-Pro-6.local

View File

@ -8,7 +8,7 @@ COPY package.json ./
RUN apk update && apk add openssh-client git
COPY id_rsa /root/.ssh/id_rsa
COPY .ssh/id_rsa /root/.ssh/id_rsa
RUN chmod 600 ~/.ssh/id_rsa
RUN eval "$(ssh-agent -s)" && ssh-add /root/.ssh/id_rsa
RUN ssh-keyscan github.com smart-chain-fr/leCoffre-resources.git >> /root/.ssh/known_hosts

View File

@ -1,5 +1,5 @@
apiVersion: v2
name: tezos-link
name: leCoffre-back
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
@ -21,4 +21,5 @@ version: 0.0.1
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.0.1"
appVersion: 0.4.4

View File

@ -0,0 +1,19 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: docker-pull-secret
spec:
refreshInterval: 1h
secretStoreRef:
name: dockerpullsecret-vault-cluster-secret-store
kind: ClusterSecretStore
target:
template:
type: kubernetes.io/dockerconfigjson
name: docker-pull-secret
creationPolicy: Owner
data:
- secretKey: .dockerconfigjson
remoteRef:
key: {{ .Values.dockerPullSecret }}
property: .dockerconfigjson

View File

@ -0,0 +1,71 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: lecoffre-back
namespace: {{ .Values.namespace }}
{{if .Values.lecoffreBack.ingress.annotations}}
annotations:
{{toYaml .Values.lecoffreBack.ingress.annotations | indent 4 }}
{{end}}
spec:
tls:
- hosts: {{ .Values.lecoffreBack.ingress.tls.hosts }}
secretName: {{ .Values.lecoffreBack.ingress.tls.secretName }}
rules:
- host: {{ .Values.lecoffreBack.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: lecoffre-back-svc
port:
number: 80
---
apiVersion: v1
kind: Service
metadata:
name: lecoffre-back-svc
namespace: {{ .Values.namespace }}
labels:
spec:
ports:
- port: 80
name: http
targetPort: 3001
selector:
app: lecoffre-back
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: lecoffre-back
namespace: {{ .Values.namespace }}
labels:
app: lecoffre-back
spec:
replicas: 1
selector:
matchLabels:
app: lecoffre-back
template:
metadata:
annotations:
{{toYaml .Values.lecoffreBack.vault.annotations | indent 8 }}
labels:
app: lecoffre-back
spec:
serviceAccountName: {{ .Values.lecoffreBack.serviceAccountName }}
imagePullSecrets:
- name: docker-pull-secret
containers:
- name: lecoffre-back
image: "{{ .Values.lecoffreBack.image.repository }}:v{{ .Chart.AppVersion }}"
{{if .Values.lecoffreBack.resources}}
resources:
{{toYaml .Values.lecoffreBack.resources | indent 10}}
{{end}}
imagePullPolicy: {{ .Values.lecoffreBack.image.pullPolicy }}
command: [{{ .Values.lecoffreBack.command }}]

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ .Values.lecoffreBack.serviceAccountName }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .Values.lecoffreBack.serviceAccountName }}-token
annotations:
kubernetes.io/service-account.name: {{ .Values.lecoffreBack.serviceAccountName }}
type: kubernetes.io/service-account-token

44
devops/values.yaml Normal file
View File

@ -0,0 +1,44 @@
dockerPullSecret: secret/data/minteed-stg/config/dockerpullsecret
namespace: lecoffre
lecoffreBack:
serviceAccountName: lecoffre-back-sa
command: "'sh', '-c', '. /vault/secrets/envs-api && npm run api:start'"
vault:
role : custom_lecoffre-back_injector_rol
server: https://vault-stg.smart-chain.fr
annotations:
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-inject-secret-envs-api: secret/data/lecoffre-back-stg/config/envs-api
vault.hashicorp.com/role: custom_lecoffre-back_injector_rol
vault.hashicorp.com/agent-inject-template-envs-api: |
{{ with secret "secret/data/lecoffre-back-stg/config/envs-api" }}
{{ range $k, $v := .Data.data }}
export {{ $k }}="{{ $v }}"
{{ end }}
{{ end }}
imagePullSecrets:
- name: docker-pull-secret
image:
pullPolicy: Always
repository: "rg.fr-par.scw.cloud/lecoffre/back"
resources:
requests:
cpu: 200m
memory: 1Gi
limits:
memory: 2Gi
ingress:
host: api.stg.lecoffre.smart-chain.fr
tls:
hosts:
- api.stg.lecoffre.smart-chain.fr
secretName: api-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"

View File

@ -1 +0,0 @@
Post Helm installation instruction

View File

@ -1,119 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "helpers.name" -}}
{{- default .Chart.Name .Values.global.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "helpers.fullname" -}}
{{- if .Values.global.fullnameOverride }}
{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- .Release.Name | trunc 63 }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "helpers.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "helpers.labels.common" -}}
helm.sh/chart: {{ include "helpers.chart" . }}
{{ include "helpers.selectorLabels" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Proxy labels
*/}}
{{- define "helpers.labels.proxy" -}}
{{- include "helpers.labels.common" . }}
app.kubernetes.io/component : {{ printf "%s-%s" "proxy" (lower .Values.proxy.network) }}
{{- end }}
{{/*
Proxy testnet abels
*/}}
{{- define "helpers.labels.testnet.proxy" -}}
{{- include "helpers.labels.common" . }}
app.kubernetes.io/component : {{ printf "%s-%s" "proxy" (lower .Values.proxy.testnet.network) }}
{{- end }}
{{/*
front labels
*/}}
{{- define "helpers.labels.front" -}}
{{- include "helpers.labels.common" . }}
app.kubernetes.io/component : front
{{- end }}
{{/*
api labels
*/}}
{{- define "helpers.labels.api" -}}
{{- include "helpers.labels.common" . }}
app.kubernetes.io/component : api
{{- end }}
{{/*
Selector labels
*/}}
{{- define "helpers.selectorLabels" -}}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "helpers.proxy.selectorLabels" -}}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "helpers.front.selectorLabels" -}}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component : front
{{- end }}
{{/*
Selector labels
*/}}
{{- define "helpers.api.selectorLabels" -}}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component : api
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "helpers.serviceAccountName" -}}
{{- default (include "helpers.fullname" .) .serviceAccount.name }}
{{- end }}
{{/*
Allow the release namespace to be overridden for multi-namespace deployments in combined charts.
*/}}
{{- define "helpers.namespace" -}}
{{- default .Release.Namespace .Values.global.namespaceOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- define "common.tplvalues.render" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}

View File

@ -1,47 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "helpers.fullname" . }}-api
namespace: {{ include "helpers.namespace" . | quote }}
labels: {{ include "helpers.labels.api" . | nindent 4 }}
spec:
replicas: {{ .Values.api.replicas }}
selector:
matchLabels:
{{- include "helpers.api.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "helpers.api.selectorLabels" . | nindent 8 }}
spec:
serviceAccountName: api-sa
imagePullSecrets:
- name: docker-pull-secret
containers:
- name: {{ include "helpers.fullname" . }}-api
imagePullPolicy: {{ .Values.api.image.pullPolicy }}
image: "{{ .Values.api.image.repository }}:{{ .Values.api.image.tag }}"
resources:
{{toYaml .Values.api.resources | indent 12}}
env:
- name: API_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: API_URL
- name: PROXY_TESTNET_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: PROXY_TESTNET_URL
- name: FRONT_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: FRONT_URL
{{- range $key, $val := .Values.api.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}

View File

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "helpers.fullname" . }}-api
namespace: {{ include "helpers.namespace" . | quote }}
annotations:
{{toYaml .Values.api.ingress.annotations | indent 4 }}
spec:
tls:
- hosts: {{ .Values.api.ingress.tls.hosts }}
secretName: {{ .Values.api.ingress.tls.secretName }}
rules:
- host: {{ .Values.api.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ include "helpers.fullname" . }}-api
port:
number: {{ .Values.api.service.ports.http }}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "helpers.fullname" . }}-api
namespace: {{ include "helpers.namespace" . | quote }}
labels:
spec:
ports:
- port: {{ .Values.api.service.ports.http }}
name: http
targetPort: {{ .Values.api.service.ports.http }}
selector:
{{- include "helpers.api.selectorLabels" . | nindent 8 }}

View File

@ -1,5 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: api-sa

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "helpers.fullname" . }}-cm
data:
API_URL: {{ .Values.api.ingress.host | quote}}
FRONT_URL: {{ .Values.front.ingress.host | quote }}
PROXY_TESTNET_URL: {{ .Values.proxy.testnet.ingress.host | quote }}

View File

@ -1,45 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "helpers.fullname" . }}-front
namespace: {{ include "helpers.namespace" . | quote }}
labels: {{ include "helpers.labels.front" . | nindent 4 }}
spec:
replicas: {{ .Values.front.replicas }}
selector:
matchLabels:
{{- include "helpers.front.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "helpers.front.selectorLabels" . | nindent 8 }}
spec:
serviceAccountName: front-sa
imagePullSecrets:
- name: docker-pull-secret
containers:
- name: {{ include "helpers.fullname" . }}-front
imagePullPolicy: {{ .Values.front.image.pullPolicy }}
image: "{{ .Values.front.image.repository }}:{{ .Values.front.image.tag }}"
resources:
{{toYaml .Values.front.resources | indent 12}}
env:
- name: API_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: API_URL
- name: PROXY_TESTNET_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: PROXY_TESTNET_URL
- name: FRONT_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: FRONT_URL
{{- range $key, $val := .Values.front.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}

View File

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "helpers.fullname" . }}-front
namespace: {{ include "helpers.namespace" . | quote }}
annotations:
{{toYaml .Values.front.ingress.annotations | indent 4 }}
spec:
tls:
- hosts: {{ .Values.front.ingress.tls.hosts }}
secretName: {{ .Values.front.ingress.tls.secretName }}
rules:
- host: {{ .Values.front.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ include "helpers.fullname" . }}-front
port:
number: {{ .Values.front.service.ports.http }}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "helpers.fullname" . }}-front
namespace: {{ include "helpers.namespace" . | quote }}
labels:
spec:
ports:
- port: {{ .Values.front.service.ports.http }}
name: http
targetPort: {{ .Values.front.service.ports.http }}
selector:
{{- include "helpers.front.selectorLabels" . | nindent 8 }}

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: front-sa

View File

@ -1,79 +0,0 @@
{{- if .Values.proxy.testnet.enabled}}
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "helpers.fullname" . }}-proxy
namespace: {{ include "helpers.namespace" . | quote }}
labels: {{ include "helpers.labels.testnet.proxy" . | nindent 4 }}
spec:
replicas: {{ .Values.proxy.testnet.replicaCount }}
selector:
matchLabels:
{{- include "helpers.proxy.selectorLabels" . | nindent 6 }}
template:
metadata:
labels:
{{- include "helpers.proxy.selectorLabels" . | nindent 8 }}
spec:
serviceAccountName: proxy-sa
containers:
- name: proxy-mainnet
image: "{{ .Values.proxy.testnet.image.repository }}:{{ .Values.proxy.testnet.image.tag }}"
imagePullPolicy: {{ .Values.proxy.testnet.image.pullPolicy }}
resources:
{{- toYaml .Values.proxy.testnet.resources | nindent 12 }}
env:
- name: API_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: API_URL
- name: PROXY_TESTNET_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: PROXY_TESTNET_URL
- name: FRONT_URL
valueFrom:
configMapKeyRef:
name: {{ include "helpers.fullname" . }}-cm
key: FRONT_URL
{{- range $key, $val := .Values.proxy.testnet.env }}
- name: {{ $key }}
value: {{ $val | quote }}
{{- end }}
ports:
- name: http
containerPort: 8001
protocol: TCP
{{- if .Values.proxy.testnet.livenessProbe.enabled }}
livenessProbe:
httpGet:
path: {{ .Values.proxy.testnet.livenessProbe.path }}
port: {{ .Values.proxy.testnet.livenessProbe.port }}
scheme: HTTP
initialDelaySeconds: {{ .Values.proxy.testnet.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.proxy.testnet.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.proxy.testnet.livenessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.proxy.testnet.livenessProbe.failureThreshold }}
successThreshold: {{ .Values.proxy.testnet.livenessProbe.successThreshold }}
{{- end }}
{{- if .Values.proxy.testnet.readinessProbe.enabled }}
readinessProbe:
httpGet:
path: {{ .Values.proxy.testnet.readinessProbe.path }}
port: {{ .Values.proxy.testnet.readinessProbe.port }}
scheme: HTTP
initialDelaySeconds: {{ .Values.proxy.testnet.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.proxy.testnet.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.proxy.testnet.readinessProbe.timeoutSeconds }}
failureThreshold: {{ .Values.proxy.testnet.readinessProbe.failureThreshold }}
successThreshold: {{ .Values.proxy.testnet.readinessProbe.successThreshold }}
{{- end }}
{{- if .Values.proxy.testnet.containerSecurityContext.enabled }}
securityContext: {{- omit .Values.proxy.testnet.containerSecurityContext "enabled" | toYaml | nindent 12 }}
{{- end }}
{{- if .Values.proxy.testnet.podSecurityContext.enabled }}
securityContext: {{- omit .Values.proxy.testnet.podSecurityContext "enabled" | toYaml | nindent 8 }}
{{- end }}
{{- end }}

View File

@ -1,22 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ include "helpers.fullname" . }}-proxy
namespace: {{ include "helpers.namespace" . | quote }}
annotations:
{{toYaml .Values.proxy.testnet.ingress.annotations | indent 4 }}
spec:
tls:
- hosts: {{ .Values.proxy.testnet.ingress.tls.hosts }}
secretName: {{ .Values.proxy.testnet.ingress.tls.secretName }}
rules:
- host: {{ .Values.proxy.testnet.ingress.host }}
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: {{ include "helpers.fullname" . }}-proxy
port:
number: {{ .Values.proxy.testnet.service.ports.http }}

View File

@ -1,13 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "helpers.fullname" . }}-proxy
namespace: {{ include "helpers.namespace" . | quote }}
labels:
spec:
ports:
- port: {{ .Values.proxy.testnet.service.ports.http }}
name: http
targetPort: {{ .Values.proxy.testnet.service.ports.http }}
selector:
{{- include "helpers.proxy.selectorLabels" . | nindent 8 }}

View File

@ -1,4 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: proxy-sa

View File

@ -1,510 +0,0 @@
global:
## @param nameOverride String to partially override `tezoslink.name` template with a string (will prepend the release name)
##
nameOverride: ""
## @param fullnameOverride String to fully override `tezoslink.fullname` template with a string
##
fullnameOverride: ""
## @param namespaceOverride String to fully override helpers.namespace
##
namespaceOverride: ""
api:
replicas: 1
image:
repository: rg.fr-par.scw.cloud/tezoslink/p1-api
tag: 0.1.0
pullPolicy: Always
service:
ports:
http: 8000
ingress:
host: p2.api.tezoslink.smart-chain.fr
tls:
hosts:
- p2.api.tezoslink.smart-chain.fr
secretName: api-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
env:
DATABASE_USERNAME: "tz-backend"
DATABASE_PASSWORD: "Titi123!"
DATABASE_URL: "tezoslink-postgresql.tezoslink-postgresql.svc.cluster.local:5432"
resources:
limits: {}
requests:
cpu: 100m
memory: 256Mi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
front:
replicas: 1
image:
repository: rg.fr-par.scw.cloud/tezoslink/p1-frontend
tag: 0.0.5
pullPolicy: Always
service:
ports:
http: 8080
ingress:
host: p2.front.tezoslink.smart-chain.fr
tls:
hosts:
- p2.front.tezoslink.smart-chain.fr
secretName: front-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
env:
resources:
limits: {}
requests:
cpu: 100m
memory: 256Mi
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
proxy:
testnet:
enabled : true
replicaCount: 1
network: "mainnet"
image:
repository: rg.fr-par.scw.cloud/tezoslink/p1-proxy
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "0.1.0"
env:
DATABASE_USERNAME: "tz-backend"
DATABASE_PASSWORD: "Titi123!"
DATABASE_URL: "tezoslink-postgresql.tezoslink-postgresql.svc.cluster.local:5432"
ARCHIVE_NODES_URL: "archive-node.poc-tzk8s.svc.cluster.local"
TEZOS_ARCHIVE_PORT: "8732"
ROLLING_NODES_URL: "rolling-node.poc-tzk8s.svc.cluster.local"
TEZOS_ROLLING_PORT: "8732"
TEZOS_NETWORK: "mainnet"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
## @param Automount service account token for the server service account
##
automountServiceAccountToken: true
## Proxy Service
##
service:
## @param proxy.service.type Kubernetes service type
##
type: ClusterIP
## @param proxy.service.ports.http proxy service port
##
ports:
http: 8001
## @param proxy.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
## e.g:
## clusterIP: None
##
clusterIP: ""
## @param proxy.service.nodePorts.http Kubernetes Service nodePort
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
## e.g:
## nodePort: 30080
##
nodePorts:
http: ""
## @param proxy.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
##
loadBalancerIP: ""
## @param proxy.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
## Set the LoadBalancer service type to internal only
## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
## e.g:
## loadBalancerSourceRanges:
## - 10.10.10.0/24
##
loadBalancerSourceRanges: []
## @param proxy.service.externalTrafficPolicy Enable client source IP preservation
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
## There are two available options: Cluster (default) and Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
externalTrafficPolicy: Cluster
## @param proxy.service.healthCheckNodePort Specifies the health check node port (numeric port number) for the service if `externalTrafficPolicy` is set to Local.
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
##
healthCheckNodePort: ""
## @param proxy.service.annotations Additional annotations for proxy service
##
annotations: {}
## @param proxy.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
##
extraPorts: []
## @param proxy.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
## If "ClientIP", consecutive client requests will be directed to the same Pod
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
##
sessionAffinity: None
## @param proxy.service.sessionAffinityConfig Additional settings for the sessionAffinity
## sessionAffinityConfig:
## clientIP:
## timeoutSeconds: 300
##
sessionAffinityConfig: {}
ingress:
host: p2.proxy.tezoslink.smart-chain.fr
tls:
hosts:
- p2.proxy.tezoslink.smart-chain.fr
secretName: proxy-tls
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
## Proxy pods' Security Context
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
## @param proxy.podSecurityContext.enabled Enable pod security context
## @param proxy.podSecurityContext.runAsUser User ID for the container
## @param proxy.podSecurityContext.fsGroup Group ID for the container filesystem
##
podSecurityContext:
enabled: true
runAsUser: 1000
## Prometheus proxy containers' Security Context (only main container)
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
## @param proxy.containerSecurityContext.enabled Enable container security context
## @param proxy.containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities which should be dropped
## @param proxy.containerSecurityContext.runAsNonRoot Force the container to run as a non root user
## @param proxy.containerSecurityContext.allowPrivilegeEscalation Switch privilegeEscalation possibility on or off
## @param proxy.containerSecurityContext.readOnlyRootFilesystem Mount / (root) as a readonly filesystem
##
containerSecurityContext:
enabled: true
capabilities:
drop:
- ALL
runAsNonRoot: true
allowPrivilegeEscalation: false
readOnlyRootFilesystem: false
## Configure extra options for liveness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.livenessProbe.enabled Turn on and off liveness probe
## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
## @param operator.livenessProbe.periodSeconds How often to perform the probe
## @param operator.livenessProbe.timeoutSeconds When the probe times out
## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
##
livenessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Configure extra options for readiness probe
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
## @param operator.readinessProbe.enabled Turn on and off readiness probe
## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
## @param operator.readinessProbe.periodSeconds How often to perform the probe
## @param operator.readinessProbe.timeoutSeconds When the probe times out
## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
##
readinessProbe:
enabled: true
path: /health
port: 8001
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 6
successThreshold: 1
## Resource requests and limits
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
## choice for the user. This also increases chances charts run on environments with little
## resources, such as Minikube. If you do want to specify resources, uncomment the following
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
## @param resources.limits The resources limits for the container
## @param resources.requests The requested resources for the container
##
resources:
limits: {}
requests:
cpu: 100m
memory: 256Mi
# mainnet:
# enabled : false
# replicaCount: 1
# network: "mainnet"
# image:
# repository: rg.fr-par.scw.cloud/tezoslink/p1-proxy
# pullPolicy: IfNotPresent
# # Overrides the image tag whose default is the chart appVersion.
# tag: "0.1.0"
# env:
# DATABASE_USERNAME: "tz-backend"
# DATABASE_PASSWORD: "Titi123!"
# DATABASE_URL: "tezoslink-postgresql.tezoslink-postgresql.svc.cluster.local:5432"
# ARCHIVE_NODES_URL: "archive-node.poc-tzk8s.svc.cluster.local"
# TEZOS_ARCHIVE_PORT: "8732"
# ROLLING_NODES_URL: "rolling-node.poc-tzk8s.svc.cluster.local"
# TEZOS_ROLLING_PORT: "8732"
# TEZOS_NETWORK: "mainnet"
# serviceAccount:
# # Specifies whether a service account should be created
# create: true
# # Annotations to add to the service account
# annotations: {}
# # The name of the service account to use.
# # If not set and create is true, a name is generated using the fullname template
# name: ""
# ## @param Automount service account token for the server service account
# ##
# automountServiceAccountToken: true
# ## Proxy Service
# ##
# service:
# ## @param proxy.service.type Kubernetes service type
# ##
# type: ClusterIP
# ## @param proxy.service.ports.http proxy service port
# ##
# ports:
# http: 8001
# ## @param proxy.service.clusterIP Specific cluster IP when service type is cluster IP. Use `None` for headless service
# ## e.g:
# ## clusterIP: None
# ##
# clusterIP: ""
# ## @param proxy.service.nodePorts.http Kubernetes Service nodePort
# ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
# ## e.g:
# ## nodePort: 30080
# ##
# nodePorts:
# http: ""
# ## @param proxy.service.loadBalancerIP `loadBalancerIP` if service type is `LoadBalancer`
# ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
# ##
# loadBalancerIP: ""
# ## @param proxy.service.loadBalancerSourceRanges Address that are allowed when svc is `LoadBalancer`
# ## Set the LoadBalancer service type to internal only
# ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
# ## e.g:
# ## loadBalancerSourceRanges:
# ## - 10.10.10.0/24
# ##
# loadBalancerSourceRanges: []
# ## @param proxy.service.externalTrafficPolicy Enable client source IP preservation
# ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
# ## There are two available options: Cluster (default) and Local.
# ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# ##
# externalTrafficPolicy: Cluster
# ## @param proxy.service.healthCheckNodePort Specifies the health check node port (numeric port number) for the service if `externalTrafficPolicy` is set to Local.
# ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip
# ##
# healthCheckNodePort: ""
# ## @param proxy.service.annotations Additional annotations for proxy service
# ##
# annotations: {}
# ## @param proxy.service.extraPorts Extra ports to expose (normally used with the `sidecar` value)
# ##
# extraPorts: []
# ## @param proxy.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP"
# ## If "ClientIP", consecutive client requests will be directed to the same Pod
# ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
# ##
# sessionAffinity: None
# ## @param proxy.service.sessionAffinityConfig Additional settings for the sessionAffinity
# ## sessionAffinityConfig:
# ## clientIP:
# ## timeoutSeconds: 300
# ##
# sessionAffinityConfig: {}
# ## Proxy pods' Security Context
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod
# ## @param proxy.podSecurityContext.enabled Enable pod security context
# ## @param proxy.podSecurityContext.runAsUser User ID for the container
# ## @param proxy.podSecurityContext.fsGroup Group ID for the container filesystem
# ##
# podSecurityContext:
# enabled: true
# runAsUser: 1000
# ## Prometheus proxy containers' Security Context (only main container)
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container
# ## @param proxy.containerSecurityContext.enabled Enable container security context
# ## @param proxy.containerSecurityContext.capabilities.drop [array] Linux Kernel capabilities which should be dropped
# ## @param proxy.containerSecurityContext.runAsNonRoot Force the container to run as a non root user
# ## @param proxy.containerSecurityContext.allowPrivilegeEscalation Switch privilegeEscalation possibility on or off
# ## @param proxy.containerSecurityContext.readOnlyRootFilesystem Mount / (root) as a readonly filesystem
# ##
# containerSecurityContext:
# enabled: true
# capabilities:
# drop:
# - ALL
# runAsNonRoot: true
# allowPrivilegeEscalation: false
# readOnlyRootFilesystem: false
# ## Configure extra options for liveness probe
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
# ## @param operator.livenessProbe.enabled Turn on and off liveness probe
# ## @param operator.livenessProbe.initialDelaySeconds Delay before liveness probe is initiated
# ## @param operator.livenessProbe.periodSeconds How often to perform the probe
# ## @param operator.livenessProbe.timeoutSeconds When the probe times out
# ## @param operator.livenessProbe.failureThreshold Minimum consecutive failures for the probe
# ## @param operator.livenessProbe.successThreshold Minimum consecutive successes for the probe
# ##
# livenessProbe:
# enabled: true
# path: /health
# port: 8001
# initialDelaySeconds: 5
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# ## Configure extra options for readiness probe
# ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes
# ## @param operator.readinessProbe.enabled Turn on and off readiness probe
# ## @param operator.readinessProbe.initialDelaySeconds Delay before readiness probe is initiated
# ## @param operator.readinessProbe.periodSeconds How often to perform the probe
# ## @param operator.readinessProbe.timeoutSeconds When the probe times out
# ## @param operator.readinessProbe.failureThreshold Minimum consecutive failures for the probe
# ## @param operator.readinessProbe.successThreshold Minimum consecutive successes for the probe
# ##
# readinessProbe:
# enabled: true
# path: /health
# port: 8001
# initialDelaySeconds: 5
# periodSeconds: 10
# timeoutSeconds: 5
# failureThreshold: 6
# successThreshold: 1
# ## Resource requests and limits
# ## ref: https://kubernetes.io/docs/user-guide/compute-resources/
# ## choice for the user. This also increases chances charts run on environments with little
# ## resources, such as Minikube. If you do want to specify resources, uncomment the following
# ## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# ## @param resources.limits The resources limits for the container
# ## @param resources.requests The requested resources for the container
# ##
# resources:
# limits: {}
# requests:
# cpu: 100m
# memory: 256Mi

37
package-lock.json generated
View File

@ -1,11 +1,11 @@
{
"name": "lecoffre",
"name": "lecoffre-back",
"version": "1.0.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "lecoffre",
"name": "lecoffre-back",
"version": "1.0.0",
"license": "ISC",
"dependencies": {
@ -20,6 +20,7 @@
"le-coffre-ressources": "github.com:smart-chain-fr/leCoffre-resources.git",
"module-alias": "^2.2.2",
"next": "^13.1.5",
"node-cache": "^5.1.2",
"node-schedule": "^2.1.1",
"prisma-query": "^2.0.0",
"reflect-metadata": "^0.1.13",
@ -431,6 +432,14 @@
"version": "0.0.1",
"license": "MIT"
},
"node_modules/clone": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz",
"integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==",
"engines": {
"node": ">=0.8"
}
},
"node_modules/combined-stream": {
"version": "1.0.8",
"license": "MIT",
@ -994,6 +1003,17 @@
}
}
},
"node_modules/node-cache": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/node-cache/-/node-cache-5.1.2.tgz",
"integrity": "sha512-t1QzWwnk4sjLWaQAS8CHgOJ+RAfmHpxFWmc36IWTiWHQfs0w5JDMBS1b1ZxQteo0vVVuWJvIUKHDkkeK7vIGCg==",
"dependencies": {
"clone": "2.x"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/node-schedule": {
"version": "2.1.1",
"license": "MIT",
@ -1838,6 +1858,11 @@
"client-only": {
"version": "0.0.1"
},
"clone": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/clone/-/clone-2.1.2.tgz",
"integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w=="
},
"combined-stream": {
"version": "1.0.8",
"requires": {
@ -2157,6 +2182,14 @@
"styled-jsx": "5.1.1"
}
},
"node-cache": {
"version": "5.1.2",
"resolved": "https://registry.npmjs.org/node-cache/-/node-cache-5.1.2.tgz",
"integrity": "sha512-t1QzWwnk4sjLWaQAS8CHgOJ+RAfmHpxFWmc36IWTiWHQfs0w5JDMBS1b1ZxQteo0vVVuWJvIUKHDkkeK7vIGCg==",
"requires": {
"clone": "2.x"
}
},
"node-schedule": {
"version": "2.1.1",
"requires": {

View File

@ -1,5 +1,5 @@
{
"name": "lecoffre",
"name": "lecoffre-back",
"version": "1.0.0",
"description": "lecoffre project",
"_moduleAliases": {
@ -47,6 +47,7 @@
"le-coffre-ressources": "github.com:smart-chain-fr/leCoffre-resources.git",
"module-alias": "^2.2.2",
"next": "^13.1.5",
"node-cache": "^5.1.2",
"node-schedule": "^2.1.1",
"prisma-query": "^2.0.0",
"reflect-metadata": "^0.1.13",

124
temp.yaml Normal file
View File

@ -0,0 +1,124 @@
---
# Source: leCoffre-back/templates/service-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: lecoffre-back-sa
---
# Source: leCoffre-back/templates/service-account.yaml
apiVersion: v1
kind: Secret
metadata:
name: lecoffre-back-sa-token
annotations:
kubernetes.io/service-account.name: lecoffre-back-sa
type: kubernetes.io/service-account-token
---
# Source: leCoffre-back/templates/lecoffre-back.yaml
apiVersion: v1
kind: Service
metadata:
name: lecoffre-back-svc
namespace: lecoffre
labels:
spec:
ports:
- port: 80
name: http
targetPort: 1337
selector:
app: lecoffre-back
---
# Source: leCoffre-back/templates/lecoffre-back.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: lecoffre-back
namespace: lecoffre
labels:
app: lecoffre-back
spec:
replicas: 1
selector:
matchLabels:
app: lecoffre-back
template:
metadata:
annotations:
vault.hashicorp.com/agent-inject: "true"
vault.hashicorp.com/agent-inject-secret-envs-api: secret/data/lecoffre-back-stg/config/envs-api
vault.hashicorp.com/agent-inject-template-envs-api: |
{{ with secret "secret/data/lecoffre-back-stg/config/envs-api" }}
{{ range $k, $v := .Data.data }}
export {{ $k }}="{{ $v }}"
{{ end }}
{{ end }}
vault.hashicorp.com/agent-pre-populate-only: "true"
vault.hashicorp.com/role: custom_lecoffre-back_injector_rol
labels:
app: lecoffre-back
spec:
serviceAccountName: lecoffre-back-sa
imagePullSecrets:
- name: docker-pull-secret
containers:
- name: lecoffre-back
image: "rg.fr-par.scw.cloud/lecoffre/back:v0.3.2"
resources:
limits:
memory: 2Gi
requests:
cpu: 200m
memory: 1Gi
imagePullPolicy: Always
command: ['sh', '-c', '. /vault/secrets/envs-api && npm start']
---
# Source: leCoffre-back/templates/lecoffre-back.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: lecoffre-back
namespace: lecoffre
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
nginx.ingress.kubernetes.io/from-to-www-redirect: "true"
spec:
rules:
- host: api.stg.lecoffre.smart-chain.fr
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: lecoffre-back-svc
port:
number: 80
---
# Source: leCoffre-back/templates/docker-pull-secret.yaml
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: docker-pull-secret
spec:
refreshInterval: 1h
secretStoreRef:
name: dockerpullsecret-vault-cluster-secret-store
kind: ClusterSecretStore
target:
template:
type: kubernetes.io/dockerconfigjson
name: docker-pull-secret
creationPolicy: Owner
data:
- secretKey: .dockerconfigjson
remoteRef:
key: secret/data/minteed-stg/config/dockerpullsecret
property: .dockerconfigjson