FirstProfile
Some checks failed
K8S Fission Deployment / Deployment fission functions (push) Failing after 20s
Some checks failed
K8S Fission Deployment / Deployment fission functions (push) Failing after 20s
This commit is contained in:
50
.devcontainer/devcontainer.json
Normal file
50
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||||
|
// README at: https://github.com/devcontainers/templates/tree/main/src/rust
|
||||||
|
{
|
||||||
|
"name": "fission:ailbl-tag",
|
||||||
|
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||||
|
// "image": "mcr.microsoft.com/devcontainers/rust:0-1-bullseye",
|
||||||
|
// Use docker compose file
|
||||||
|
"dockerComposeFile": "docker-compose.yml",
|
||||||
|
"service": "devcontainer",
|
||||||
|
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
|
||||||
|
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||||
|
// "features": {},
|
||||||
|
// Configure tool-specific properties.
|
||||||
|
"customizations": {
|
||||||
|
// Configure properties specific to VS Code.
|
||||||
|
"vscode": {
|
||||||
|
"settings": {
|
||||||
|
"terminal.integrated.defaultProfile.linux": "bash",
|
||||||
|
"python.formatting.provider": "black",
|
||||||
|
"python.formatting.blackPath": "/usr/local/py-utils/bin/black"
|
||||||
|
},
|
||||||
|
"extensions": [
|
||||||
|
// VS Code specific
|
||||||
|
"ms-azuretools.vscode-docker" ,
|
||||||
|
"dbaeumer.vscode-eslint" ,
|
||||||
|
"EditorConfig.EditorConfig" ,
|
||||||
|
// Python specific
|
||||||
|
"ms-python.python" ,
|
||||||
|
"ms-python.black-formatter" ,
|
||||||
|
// C++ specific
|
||||||
|
"ms-vscode.cpptools" ,
|
||||||
|
"twxs.cmake" ,
|
||||||
|
// Markdown specific
|
||||||
|
"yzhang.markdown-all-in-one" ,
|
||||||
|
// YAML formatter
|
||||||
|
"kennylong.kubernetes-yaml-formatter",
|
||||||
|
// hightlight and format `pyproject.toml`
|
||||||
|
"tamasfe.even-better-toml"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mounts": [ ],
|
||||||
|
// "runArgs": [
|
||||||
|
// "--env-file",
|
||||||
|
// ".devcontainer/.env"
|
||||||
|
// ],
|
||||||
|
"postStartCommand": "/workspaces/${localWorkspaceFolderBasename}/.devcontainer/initscript.sh",
|
||||||
|
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||||
|
"forwardPorts": []
|
||||||
|
}
|
||||||
58
.devcontainer/docker-compose.yml
Normal file
58
.devcontainer/docker-compose.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
services:
|
||||||
|
devcontainer:
|
||||||
|
# All tags avaiable at: https://mcr.microsoft.com/v2/devcontainers/rust/tags/list
|
||||||
|
# image: mcr.microsoft.com/vscode/devcontainers/python:3.10-bullseye
|
||||||
|
image: registry.vegastar.vn/vegacloud/fission-python:3.10-bullseye
|
||||||
|
volumes:
|
||||||
|
- ../..:/workspaces:cached
|
||||||
|
command: sleep infinity
|
||||||
|
env_file:
|
||||||
|
- .env
|
||||||
|
|
||||||
|
k3s-server:
|
||||||
|
image: "rancher/k3s:${K3S_VERSION:-latest}"
|
||||||
|
# command: server --disable traefik --disable servicelb
|
||||||
|
command: server --disable traefik
|
||||||
|
hostname: k3s-server
|
||||||
|
tmpfs: [ "/run", "/var/run" ]
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 65535
|
||||||
|
hard: 65535
|
||||||
|
privileged: true
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- K3S_TOKEN=${K3S_TOKEN:-secret}
|
||||||
|
- K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
|
||||||
|
- K3S_KUBECONFIG_MODE=666
|
||||||
|
volumes:
|
||||||
|
- k3s-server:/var/lib/rancher/k3s
|
||||||
|
# This is just so that we get the kubeconfig file out
|
||||||
|
- .:/output
|
||||||
|
ports:
|
||||||
|
- 6443 # Kubernetes API Server
|
||||||
|
- 80 # Ingress controller port 80
|
||||||
|
- 443 # Ingress controller port 443
|
||||||
|
|
||||||
|
k3s-agent:
|
||||||
|
image: "rancher/k3s:${K3S_VERSION:-latest}"
|
||||||
|
hostname: k3s-agent
|
||||||
|
tmpfs: [ "/run", "/var/run" ]
|
||||||
|
ulimits:
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 65535
|
||||||
|
hard: 65535
|
||||||
|
privileged: true
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- K3S_URL=https://k3s-server:6443
|
||||||
|
- K3S_TOKEN=${K3S_TOKEN:-secret}
|
||||||
|
volumes:
|
||||||
|
- k3s-agent:/var/lib/rancher/k3s
|
||||||
|
profiles: [ "cluster" ] # only start agent if run with profile `cluster`
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
k3s-server: {}
|
||||||
|
k3s-agent: {}
|
||||||
81
.devcontainer/helm/fission-values.yaml
Normal file
81
.devcontainer/helm/fission-values.yaml
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
### helm show values fission-charts/fission-all > .devcontainer/fission-values.yaml
|
||||||
|
|
||||||
|
serviceMonitor:
|
||||||
|
enabled: true
|
||||||
|
##namespace in which you want to deploy servicemonitor
|
||||||
|
##
|
||||||
|
namespace: "monitoring"
|
||||||
|
## Map of additional labels to add to the ServiceMonitor resources
|
||||||
|
# to allow selecting specific ServiceMonitors
|
||||||
|
# in case of multiple prometheus deployments
|
||||||
|
additionalServiceMonitorLabels:
|
||||||
|
release: "prometheus"
|
||||||
|
# key: "value"
|
||||||
|
|
||||||
|
##The following components expose Prometheus metrics and have podmonitors in this chart (disabled by default)
|
||||||
|
##
|
||||||
|
podMonitor:
|
||||||
|
enabled: true
|
||||||
|
##namespace in which you want to deploy podmonitor
|
||||||
|
##
|
||||||
|
namespace: "monitoring"
|
||||||
|
## Map of additional labels to add to the PodMonitor resources
|
||||||
|
# to allow selecting specific PodMonitor
|
||||||
|
# in case of multiple prometheus deployments
|
||||||
|
additionalPodMonitorLabels:
|
||||||
|
release: "monitoring"
|
||||||
|
# key: "value"
|
||||||
|
|
||||||
|
## Enable Grafana Dashboard configmaps for auto dashboard provisioning
|
||||||
|
## If you use kube-prometheus stack for monitoring, these will get imported into grafana
|
||||||
|
grafana:
|
||||||
|
## The namespace in which grafana pod is present
|
||||||
|
namespace: monitoring
|
||||||
|
dashboards:
|
||||||
|
## Disabled by default. switch to true to deploy them
|
||||||
|
enable: true
|
||||||
|
|
||||||
|
# OpenTelemetry is a set of tools for collecting, analyzing, and visualizing
|
||||||
|
# distributed tracing data across function calls.
|
||||||
|
openTelemetry:
|
||||||
|
## Use this flag to set the collector endpoint for OpenTelemetry.
|
||||||
|
## The variable is endpoint of the collector in the format shown below.
|
||||||
|
## otlpCollectorEndpoint: "otel-collector.observability.svc:4317"
|
||||||
|
##
|
||||||
|
otlpCollectorEndpoint: "otel-collector.opentelemetry-operator-system.svc.cluster.local:4317"
|
||||||
|
## Set this flag to false if you are using secure endpoint for the collector.
|
||||||
|
##
|
||||||
|
otlpInsecure: true
|
||||||
|
## Key-value pairs to be used as headers associated with gRPC or HTTP requests to the collector.
|
||||||
|
## Eg. otlpHeaders: "key1=value1,key2=value2"
|
||||||
|
##
|
||||||
|
# otlpHeaders: ""
|
||||||
|
## Supported samplers:
|
||||||
|
## always_on - Sampler that always samples spans, regardless of the parent span's sampling decision.
|
||||||
|
## always_off - Sampler that never samples spans, regardless of the parent span's sampling decision.
|
||||||
|
## traceidratio - Sampler that samples probabalistically based on rate.
|
||||||
|
## parentbased_always_on - (default if empty) Sampler that respects its parent span's sampling decision, but otherwise always samples.
|
||||||
|
## parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples.
|
||||||
|
## parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabalistically based on rate.
|
||||||
|
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
|
||||||
|
##
|
||||||
|
tracesSampler: "parentbased_traceidratio"
|
||||||
|
## Each Sampler type defines its own expected input, if any.
|
||||||
|
## Currently we get trace ratio for the case of,
|
||||||
|
## 1. traceidratio
|
||||||
|
## 2. parentbased_traceidratio
|
||||||
|
## Sampling probability, a number in the [0..1] range, e.g. "0.1". Default is 0.1.
|
||||||
|
##
|
||||||
|
tracesSamplingRate: "1"
|
||||||
|
## Supported providers:
|
||||||
|
## tracecontext - W3C Trace Context
|
||||||
|
## baggage - W3C Baggage
|
||||||
|
## b3 - B3 Single
|
||||||
|
## b3multi - B3 Multi
|
||||||
|
## jaeger - Jaeger uber-trace-id header
|
||||||
|
## xray - AWS X-Ray (third party)
|
||||||
|
## ottrace - OpenTracing Trace (third party)
|
||||||
|
## none - No tracing
|
||||||
|
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
|
||||||
|
##
|
||||||
|
# propagators: "tracecontext,baggage"
|
||||||
132
.devcontainer/helm/opentelemetry-collector.yml
Normal file
132
.devcontainer/helm/opentelemetry-collector.yml
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: ConfigMap
|
||||||
|
metadata:
|
||||||
|
name: otel-collector-conf
|
||||||
|
namespace: opentelemetry-operator-system
|
||||||
|
labels:
|
||||||
|
app: opentelemetry
|
||||||
|
component: otel-collector-conf
|
||||||
|
data:
|
||||||
|
otel-collector-config: |
|
||||||
|
receivers:
|
||||||
|
# Make sure to add the otlp receiver.
|
||||||
|
# This will open up the receiver on port 4317
|
||||||
|
otlp:
|
||||||
|
protocols:
|
||||||
|
grpc:
|
||||||
|
endpoint: "0.0.0.0:4317"
|
||||||
|
processors:
|
||||||
|
extensions:
|
||||||
|
health_check: {}
|
||||||
|
exporters:
|
||||||
|
jaeger:
|
||||||
|
# <service-name>.<namespace>.svc.cluster.local:<service-port>
|
||||||
|
endpoint: "jaeger-collector.jaeger.svc.cluster.local:14250"
|
||||||
|
insecure: true
|
||||||
|
prometheus:
|
||||||
|
endpoint: 0.0.0.0:8889
|
||||||
|
namespace: "testapp"
|
||||||
|
logging:
|
||||||
|
|
||||||
|
service:
|
||||||
|
extensions: [ health_check ]
|
||||||
|
pipelines:
|
||||||
|
traces:
|
||||||
|
receivers: [ otlp ]
|
||||||
|
processors: []
|
||||||
|
exporters: [ jaeger ]
|
||||||
|
metrics:
|
||||||
|
receivers: [ otlp ]
|
||||||
|
processors: []
|
||||||
|
exporters: [ prometheus, logging ]
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: otel-collector
|
||||||
|
namespace: opentelemetry-operator-system
|
||||||
|
labels:
|
||||||
|
app: opentelemetry
|
||||||
|
component: otel-collector
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- name: otlp # Default endpoint for otlp receiver.
|
||||||
|
port: 4317
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 4317
|
||||||
|
nodePort: 30080
|
||||||
|
- name: metrics # Default endpoint for metrics.
|
||||||
|
port: 8889
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 8889
|
||||||
|
selector:
|
||||||
|
component: otel-collector
|
||||||
|
type: NodePort
|
||||||
|
---
|
||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: otel-collector
|
||||||
|
namespace: opentelemetry-operator-system
|
||||||
|
labels:
|
||||||
|
app: opentelemetry
|
||||||
|
component: otel-collector
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: opentelemetry
|
||||||
|
component: otel-collector
|
||||||
|
minReadySeconds: 5
|
||||||
|
progressDeadlineSeconds: 120
|
||||||
|
replicas: 1 #TODO - adjust this to your own requirements
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
annotations:
|
||||||
|
prometheus.io/path: "/metrics"
|
||||||
|
prometheus.io/port: "8889"
|
||||||
|
prometheus.io/scrape: "true"
|
||||||
|
labels:
|
||||||
|
app: opentelemetry
|
||||||
|
component: otel-collector
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- command:
|
||||||
|
- "/otelcol"
|
||||||
|
- "--config=/conf/otel-collector-config.yaml"
|
||||||
|
# Memory Ballast size should be max 1/3 to 1/2 of memory.
|
||||||
|
- "--mem-ballast-size-mib=683"
|
||||||
|
env:
|
||||||
|
- name: GOGC
|
||||||
|
value: "80"
|
||||||
|
image: otel/opentelemetry-collector:0.6.0
|
||||||
|
name: otel-collector
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 1
|
||||||
|
memory: 2Gi
|
||||||
|
requests:
|
||||||
|
cpu: 200m
|
||||||
|
memory: 400Mi
|
||||||
|
ports:
|
||||||
|
- containerPort: 4317 # Default endpoint for otlp receiver.
|
||||||
|
- containerPort: 8889 # Default endpoint for querying metrics.
|
||||||
|
volumeMounts:
|
||||||
|
- name: otel-collector-config-vol
|
||||||
|
mountPath: /conf
|
||||||
|
# - name: otel-collector-secrets
|
||||||
|
# mountPath: /secrets
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 13133 # Health Check extension default port.
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /
|
||||||
|
port: 13133 # Health Check extension default port.
|
||||||
|
volumes:
|
||||||
|
- configMap:
|
||||||
|
name: otel-collector-conf
|
||||||
|
items:
|
||||||
|
- key: otel-collector-config
|
||||||
|
path: otel-collector-config.yaml
|
||||||
|
name: otel-collector-config-vol
|
||||||
170
.devcontainer/initscript.sh
Executable file
170
.devcontainer/initscript.sh
Executable file
@@ -0,0 +1,170 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
## For debugging
|
||||||
|
# set -eux
|
||||||
|
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### DEV PACKAGES
|
||||||
|
#############################
|
||||||
|
export RAKE_VER=0.1.3
|
||||||
|
|
||||||
|
curl -L https://$PRIVATE_GIT_TOKEN@registry.vegastar.vn/vegacloud/make/releases/download/$RAKE_VER/rake-$RAKE_VER-x86_64-unknown-linux-musl.tar.gz | tar xzv -C /tmp/
|
||||||
|
sudo install -o root -g root -m 0755 /tmp/rake-$RAKE_VER-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### KUBECTL
|
||||||
|
#############################
|
||||||
|
|
||||||
|
## Config kubectl
|
||||||
|
mkdir -p ~/.kube
|
||||||
|
cp ${PWD}/.devcontainer/kubeconfig.yaml ~/.kube/config
|
||||||
|
sed -i 's/127.0.0.1/k3s-server/g' ~/.kube/config
|
||||||
|
|
||||||
|
## allow insecure connection
|
||||||
|
shopt -s expand_aliases
|
||||||
|
echo 'alias kubectl="kubectl --insecure-skip-tls-verify"' >> ~/.bashrc
|
||||||
|
echo 'alias k="kubectl --insecure-skip-tls-verify"' >> ~/.bashrc
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### NGINX INGRESS
|
||||||
|
#############################
|
||||||
|
|
||||||
|
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-$NGINX_INGRESS_VER/deploy/static/provider/cloud/deploy.yaml
|
||||||
|
cat <<EOT >> /tmp/nginx-service.yaml
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: ingress-nginx-controller-loadbalancer
|
||||||
|
namespace: ingress-nginx
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app.kubernetes.io/component: controller
|
||||||
|
app.kubernetes.io/instance: ingress-nginx
|
||||||
|
app.kubernetes.io/name: ingress-nginx
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 80
|
||||||
|
- name: https
|
||||||
|
port: 443
|
||||||
|
protocol: TCP
|
||||||
|
targetPort: 443
|
||||||
|
type: LoadBalancer
|
||||||
|
EOT
|
||||||
|
kubectl apply -f /tmp/nginx-service.yaml
|
||||||
|
rm -f /tmp/nginx-service.yaml
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### OPEN TELEMETRY
|
||||||
|
#############################
|
||||||
|
# kubectl create namespace $JAEGER_NAMESPACE
|
||||||
|
# kubectl create namespace $OPENTELEMETRY_NAMESPACE
|
||||||
|
|
||||||
|
# ## cert-manager
|
||||||
|
# kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
|
||||||
|
|
||||||
|
# ## install jaeger
|
||||||
|
# helm repo add jaegertracing https://jaegertracing.github.io/helm-charts
|
||||||
|
# helm install jaeger jaegertracing/jaeger -n $JAEGER_NAMESPACE
|
||||||
|
# kubectl -n $JAEGER_NAMESPACE get po
|
||||||
|
|
||||||
|
# ## open telemetry operator
|
||||||
|
# kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
|
||||||
|
|
||||||
|
# ## create an OpenTelemetry Collector instance
|
||||||
|
# kubectl -n $OPENTELEMETRY_NAMESPACE apply -f .devcontainer/helm/opentelemetry-collector.yaml
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### FISSION PODs
|
||||||
|
#############################
|
||||||
|
kubectl create namespace $FISSION_NAMESPACE
|
||||||
|
|
||||||
|
## install with helm
|
||||||
|
kubectl create -k "github.com/fission/fission/crds/v1?ref=${FISSION_VER}"
|
||||||
|
helm repo add fission-charts https://fission.github.io/fission-charts/ && helm repo update
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: fission
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: gh-eom
|
||||||
|
EOF
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
type: kubernetes.io/dockerconfigjson
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: vega-container-registry
|
||||||
|
namespace: fission
|
||||||
|
data:
|
||||||
|
.dockerconfigjson: >-
|
||||||
|
eyJhdXRocyI6eyJyZWdpc3RyeS52ZWdhc3Rhci52biI6eyJ1c2VybmFtZSI6InRpZW5kZCIsInBhc3N3b3JkIjoiYTBjY2JjMDVjNzMyYzExMjU3OTg1NjMwNjY5ZTFjNjEyNDg0NzU1MyIsImF1dGgiOiJkR2xsYm1Sa09tRXdZMk5pWXpBMVl6Y3pNbU14TVRJMU56azROVFl6TURZMk9XVXhZell4TWpRNE5EYzFOVE09In19fQ==
|
||||||
|
EOF
|
||||||
|
helm upgrade --install fission fission-charts/fission-all --namespace $FISSION_NAMESPACE -f - <<EOF
|
||||||
|
imagePullSecrets:
|
||||||
|
- name: vega-container-registry
|
||||||
|
defaultNamespace: default
|
||||||
|
additionalFissionNamespaces:
|
||||||
|
- gh-eom
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
kubectl apply -f - <<EOF
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: router
|
||||||
|
namespace: fission
|
||||||
|
spec:
|
||||||
|
ingressClassName: nginx
|
||||||
|
rules:
|
||||||
|
- http:
|
||||||
|
paths:
|
||||||
|
- path: /ailbl
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: router
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# ## install without helm
|
||||||
|
# kubectl create -k "github.com/fission/fission/crds/v1?ref=${FISSION_VER}"
|
||||||
|
# kubectl create namespace $FISSION_NAMESPACE
|
||||||
|
# kubectl config set-context --current --namespace=$FISSION_NAMESPACE
|
||||||
|
# kubectl apply -f https://github.com/fission/fission/releases/download/${FISSION_VER}/fission-all-${FISSION_VER}-minikube.yaml
|
||||||
|
# kubectl config set-context --current --namespace=default #to change context to default namespace after installation
|
||||||
|
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### PROMETHEUS AND GRAFANA
|
||||||
|
#############################
|
||||||
|
# kubectl create namespace $METRICS_NAMESPACE
|
||||||
|
|
||||||
|
# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts && helm repo update
|
||||||
|
# helm install prometheus prometheus-community/kube-prometheus-stack -n $METRICS_NAMESPACE
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### UPDATE FISSION
|
||||||
|
#############################
|
||||||
|
|
||||||
|
# helm upgrade fission fission-charts/fission-all --namespace $FISSION_NAMESPACE -f .devcontainer/helm/fission-values.yaml
|
||||||
|
|
||||||
|
#############################
|
||||||
|
### PORT FORWARDING
|
||||||
|
#############################
|
||||||
|
|
||||||
|
## To access jaeger-query, you can use Kubernetes port forwarding
|
||||||
|
# kubectl -n jaeger port-forward svc/jaeger-query 8080:80 --address='0.0.0.0'
|
||||||
|
## To access kabana, you can use Kubernetes port forwarding
|
||||||
|
# kubectl --namespace monitoring port-forward svc/prometheus-grafana 3000:80
|
||||||
|
## For password, you'll need to run the following command:
|
||||||
|
# kubectl get secret --namespace monitoring prometheus-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
|
||||||
146
.fission/access-rules.yaml
Normal file
146
.fission/access-rules.yaml
Normal file
@@ -0,0 +1,146 @@
|
|||||||
|
- id: 'ailbl-tag-create'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags'
|
||||||
|
methods:
|
||||||
|
- POST
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::create","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
|
|
||||||
|
- id: 'ailbl-tag-view'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags'
|
||||||
|
methods:
|
||||||
|
- GET
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::view","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
|
|
||||||
|
- id: 'ailbl-tag-update'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>'
|
||||||
|
methods:
|
||||||
|
- PUT
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::update","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
|
|
||||||
|
- id: 'ailbl-tag-delete'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>'
|
||||||
|
methods:
|
||||||
|
- DELETE
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::delete","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
|
|
||||||
|
- id: 'ailbl-tag-ref-create'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>/refs'
|
||||||
|
methods:
|
||||||
|
- POST
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::create","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
|
|
||||||
|
- id: 'ailbl-tag-ref-view'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>/refs'
|
||||||
|
methods:
|
||||||
|
- GET
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::view","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
|
|
||||||
|
- id: 'ailbl-tag-ref-delete'
|
||||||
|
description: 'This description'
|
||||||
|
upstream:
|
||||||
|
preserve_host: true
|
||||||
|
url: 'http://router.fission.svc.cluster.local:80'
|
||||||
|
strip_path: 'v1/'
|
||||||
|
match:
|
||||||
|
url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>/refs/<[0-9a-fA-F\-]{36}>'
|
||||||
|
methods:
|
||||||
|
- DELETE
|
||||||
|
authenticators:
|
||||||
|
- handler: cookie_session
|
||||||
|
authorizer:
|
||||||
|
handler: remote_json
|
||||||
|
config:
|
||||||
|
remote: 'http://keto-service.ory-staging.svc.cluster.local:4466/relation-tuples/check'
|
||||||
|
payload: >-
|
||||||
|
{"namespace":"AilblAcp","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::delete","relation":"access"}
|
||||||
|
mutators:
|
||||||
|
- handler: header
|
||||||
59
.fission/deployment.json
Normal file
59
.fission/deployment.json
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
{
|
||||||
|
"namespace": "default",
|
||||||
|
"environments": {
|
||||||
|
"user-profile-py": {
|
||||||
|
"image": "ghcr.io/fission/python-env",
|
||||||
|
"builder": "ghcr.io/fission/python-builder",
|
||||||
|
"mincpu": 50,
|
||||||
|
"maxcpu": 100,
|
||||||
|
"minmemory": 50,
|
||||||
|
"maxmemory": 500,
|
||||||
|
"poolsize": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"archives": {
|
||||||
|
"package.zip": {
|
||||||
|
"sourcepath": "apps"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"packages": {
|
||||||
|
"ailbl-user-profile": {
|
||||||
|
"buildcmd": "./build.sh",
|
||||||
|
"sourcearchive": "package.zip",
|
||||||
|
"env": "user-profile-py"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"function_common": {
|
||||||
|
"pkg": "ailbl-user-profile",
|
||||||
|
"secrets": [
|
||||||
|
"fission-ailbl-user-profile-env"
|
||||||
|
],
|
||||||
|
"executor": {
|
||||||
|
"select": "newdeploy",
|
||||||
|
"newdeploy": {
|
||||||
|
"minscale": 1,
|
||||||
|
"maxscale": 1
|
||||||
|
},
|
||||||
|
"poolmgr": {
|
||||||
|
"concurrency": 1,
|
||||||
|
"requestsperpod": 1,
|
||||||
|
"onceonly": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mincpu": 50,
|
||||||
|
"maxcpu": 100,
|
||||||
|
"minmemory": 50,
|
||||||
|
"maxmemory": 500
|
||||||
|
},
|
||||||
|
"secrets": {
|
||||||
|
"fission-ailbl-user-profile-env": {
|
||||||
|
"literals": [
|
||||||
|
"PG_HOST=160.30.113.113",
|
||||||
|
"PG_PORT=45432",
|
||||||
|
"PG_DB=postgres",
|
||||||
|
"PG_USER=postgres",
|
||||||
|
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
14
.fission/dev-deployment.json
Normal file
14
.fission/dev-deployment.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"namespace": "default",
|
||||||
|
"secrets": {
|
||||||
|
"fission-ailbl-user-profile-env": {
|
||||||
|
"literals": [
|
||||||
|
"PG_HOST=160.30.113.113",
|
||||||
|
"PG_PORT=45432",
|
||||||
|
"PG_DB=postgres",
|
||||||
|
"PG_USER=postgres",
|
||||||
|
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
14
.fission/local-deployment.json
Normal file
14
.fission/local-deployment.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"namespace": "default",
|
||||||
|
"secrets": {
|
||||||
|
"fission-ailbl-user-profile-env": {
|
||||||
|
"literals": [
|
||||||
|
"S3_BUCKET=ailbl",
|
||||||
|
"S3_ENDPOINT_URL=http://160.30.113.113:9000",
|
||||||
|
"S3_ACCESS_KEY_ID=quyen",
|
||||||
|
"S3_SECRET_ACCESS_KEY=12345678",
|
||||||
|
"S3_PREFIX=user/avatar"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
14
.fission/staging-deployment.json
Normal file
14
.fission/staging-deployment.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"namespace": "default",
|
||||||
|
"secrets": {
|
||||||
|
"fission-ailbl-user-profile-env": {
|
||||||
|
"literals": [
|
||||||
|
"PG_HOST=160.30.113.113",
|
||||||
|
"PG_PORT=45432",
|
||||||
|
"PG_DB=postgres",
|
||||||
|
"PG_USER=postgres",
|
||||||
|
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
14
.fission/test-deployment.json
Normal file
14
.fission/test-deployment.json
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"namespace": "default",
|
||||||
|
"secrets": {
|
||||||
|
"fission-ailbl-user-profile-env": {
|
||||||
|
"literals": [
|
||||||
|
"PG_HOST=160.30.113.113",
|
||||||
|
"PG_PORT=45432",
|
||||||
|
"PG_DB=postgres",
|
||||||
|
"PG_USER=postgres",
|
||||||
|
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
30
.gitea/workflows/analystic-dispatch.yaml
Normal file
30
.gitea/workflows/analystic-dispatch.yaml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
name: "K8S Fission Code Analystics"
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
jobs:
|
||||||
|
sonarqube:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: 🔍 SonarQube Scan
|
||||||
|
id: scan
|
||||||
|
uses: sonarsource/sonarqube-scan-action@master
|
||||||
|
env:
|
||||||
|
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||||
|
SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }}
|
||||||
|
with:
|
||||||
|
args: >
|
||||||
|
-Dsonar.projectKey=${{ github.event.repository.name }} -Dsonar.sources=.
|
||||||
|
- name: 🔔 Send notification
|
||||||
|
uses: appleboy/telegram-action@master
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
to: ${{ secrets.TELEGRAM_TO }}
|
||||||
|
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||||
|
format: markdown
|
||||||
|
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||||
|
message: |
|
||||||
|
${{ steps.scan.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Scanned ${{ github.event.repository.name }}
|
||||||
|
*Msg*: `${{ github.event.commits[0].message }}`
|
||||||
69
.gitea/workflows/dev-deployment.yaml
Normal file
69
.gitea/workflows/dev-deployment.yaml
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
name: "K8S Fission Deployment"
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ 'main' ]
|
||||||
|
jobs:
|
||||||
|
deployment-fission:
|
||||||
|
name: Deployment fission functions
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
RUNNER_TOOL_CACHE: /toolcache
|
||||||
|
FISSION_PROFILE: DEV
|
||||||
|
FISSION_VER: 1.21.0
|
||||||
|
RAKE_VER: 0.1.3
|
||||||
|
steps:
|
||||||
|
- name: ☸️ Setup kubectl
|
||||||
|
uses: azure/setup-kubectl@v4
|
||||||
|
- name: 🔄 Cache
|
||||||
|
id: cache
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/usr/local/bin/rake
|
||||||
|
/usr/local/bin/fission
|
||||||
|
key: ${{ runner.os }}-${{ github.event.repository.name }}-${{ hashFiles('.fission/deployment.json') }}
|
||||||
|
- name: ☘️ Configure Kubeconfig
|
||||||
|
uses: azure/k8s-set-context@v4
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', env.FISSION_PROFILE)] }}
|
||||||
|
- name: 🔄 Install Dependencies
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
curl -L "https://${{ secrets.REGISTRY_PASSWORD }}@registry.vegastar.vn/vegacloud/make/releases/download/${RAKE_VER}/rake-${RAKE_VER}-x86_64-unknown-linux-musl.tar.gz" | tar xzv -C /tmp/
|
||||||
|
curl -L "https://github.com/fission/fission/releases/download/v${FISSION_VER}/fission-v${FISSION_VER}-linux-amd64" --output /tmp/fission
|
||||||
|
install -o root -g root -m 0755 /tmp/rake-${RAKE_VER}-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||||
|
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||||
|
fission check
|
||||||
|
# rake cfg install fission -f
|
||||||
|
- name: 🕓 Checkout the previous codes
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.before }}
|
||||||
|
- name: ♻️ Remove the previous version
|
||||||
|
# continue-on-error: true
|
||||||
|
run: |
|
||||||
|
echo "use profile [$FISSION_PROFILE]"
|
||||||
|
mkdir -p manifests || true
|
||||||
|
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||||
|
rake sp build -fi && rake sp down -i
|
||||||
|
- name: 🔎 Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: ✨ Deploy the new version
|
||||||
|
id: deploy
|
||||||
|
run: |
|
||||||
|
echo "use profile [$FISSION_PROFILE]"
|
||||||
|
mkdir -p manifests || true
|
||||||
|
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||||
|
rake sp build -fi && rake sp up -i
|
||||||
|
- name: 🔔 Send notification
|
||||||
|
uses: appleboy/telegram-action@master
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
to: ${{ secrets.TELEGRAM_TO }}
|
||||||
|
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||||
|
format: markdown
|
||||||
|
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||||
|
message: |
|
||||||
|
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Install fn ${{ github.event.repository.name }}
|
||||||
|
*Msg*: `${{ github.event.commits[0].message }}`
|
||||||
74
.gitea/workflows/install-dispatch.yaml
Normal file
74
.gitea/workflows/install-dispatch.yaml
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
name: "K8S Fission Deployment"
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
profile:
|
||||||
|
description: "Profile to execute to"
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options: [ DEV, TEST, STAGING ]
|
||||||
|
jobs:
|
||||||
|
deployment-fission:
|
||||||
|
name: Deployment fission functions
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
RUNNER_TOOL_CACHE: /toolcache
|
||||||
|
FISSION_PROFILE: ${{ github.event.inputs.profile }}
|
||||||
|
FISSION_VER: 1.21.0
|
||||||
|
RAKE_VER: 0.1.3
|
||||||
|
steps:
|
||||||
|
- name: ☸️ Setup kubectl
|
||||||
|
uses: azure/setup-kubectl@v4
|
||||||
|
- name: ☘️ Configure Kubeconfig
|
||||||
|
uses: azure/k8s-set-context@v4
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', github.event.inputs.profile)] }}
|
||||||
|
- name: 🔄 Cache
|
||||||
|
id: cache
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/usr/local/bin/rake
|
||||||
|
/usr/local/bin/fission
|
||||||
|
key: ${{ runner.os }}-${{ github.event.repository.name }}-${{ hashFiles('.fission/deployment.json') }}
|
||||||
|
- name: 🔄 Install Dependencies
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
curl -L "https://${{ secrets.REGISTRY_PASSWORD }}@registry.vegastar.vn/vegacloud/make/releases/download/${RAKE_VER}/rake-${RAKE_VER}-x86_64-unknown-linux-musl.tar.gz" | tar xzv -C /tmp/
|
||||||
|
curl -L "https://github.com/fission/fission/releases/download/v${FISSION_VER}/fission-v${FISSION_VER}-linux-amd64" --output /tmp/fission
|
||||||
|
install -o root -g root -m 0755 /tmp/rake-${RAKE_VER}-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||||
|
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||||
|
fission check
|
||||||
|
# rake cfg install fission -f
|
||||||
|
- name: 🕓 Checkout the previous codes
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.before }}
|
||||||
|
- name: ♻️ Remove the previous version
|
||||||
|
# continue-on-error: true
|
||||||
|
run: |
|
||||||
|
echo "use profile [$FISSION_PROFILE]"
|
||||||
|
mkdir -p manifests || true
|
||||||
|
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||||
|
rake sp build -fi && rake sp down -i
|
||||||
|
- name: 🔎 Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: ✨ Deploy the new version
|
||||||
|
id: deploy
|
||||||
|
run: |
|
||||||
|
echo "use profile [$FISSION_PROFILE]"
|
||||||
|
mkdir -p manifests || true
|
||||||
|
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||||
|
rake sp build -fi && rake sp up -i
|
||||||
|
- name: 🔔 Send notification
|
||||||
|
uses: appleboy/telegram-action@master
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
to: ${{ secrets.TELEGRAM_TO }}
|
||||||
|
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||||
|
format: markdown
|
||||||
|
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||||
|
message: |
|
||||||
|
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Install fn ${{ github.event.repository.name }}
|
||||||
|
*Msg*: `${{ github.event.commits[0].message }}`
|
||||||
79
.gitea/workflows/staging-deployment.yaml
Normal file
79
.gitea/workflows/staging-deployment.yaml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
name: "K8S Fission Deployment"
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
profile:
|
||||||
|
description: "Profile to execute to"
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options: [ DEV, TEST, STAGING ]
|
||||||
|
jobs:
|
||||||
|
deployment-fission:
|
||||||
|
name: Deployment fission functions
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
RUNNER_TOOL_CACHE: /toolcache
|
||||||
|
steps:
|
||||||
|
- name: 🍀 Extract branch name
|
||||||
|
run: echo "K8S_PROFILE=`echo ${GITHUB_REF_NAME:-${GITHUB_REF#refs/heads/}} | tr '[:lower:]' '[:upper:]'`" >> $GITHUB_ENV
|
||||||
|
- name: ☸️ Setup kubectl
|
||||||
|
uses: azure/setup-kubectl@v4
|
||||||
|
- name: 🛠️ Configure Kubeconfig
|
||||||
|
uses: azure/k8s-set-context@v4
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', env.K8S_PROFILE)] }}
|
||||||
|
- name: ⇩ Download PyMake CLI
|
||||||
|
uses: ethanjli/cached-download-action@v0.1.3
|
||||||
|
with:
|
||||||
|
url: https://bin.rhosted.com/RhdpPK.py
|
||||||
|
destination: /tmp/pymake
|
||||||
|
cache-key: pymake-cli
|
||||||
|
- name: ⇩ Download fission CLI
|
||||||
|
uses: ethanjli/cached-download-action@v0.1.3
|
||||||
|
with:
|
||||||
|
url: https://github.com/fission/fission/releases/download/v1.21.0/fission-v1.21.0-linux-amd64
|
||||||
|
destination: /tmp/fission
|
||||||
|
cache-key: fission-cli
|
||||||
|
- name: 🔨 Install Tools
|
||||||
|
run: |
|
||||||
|
# curl -L "https://github.com/fission/fission/releases/download/v1.21.0/fission-v1.21.0-linux-amd64" --output /tmp/fission
|
||||||
|
install -o root -g root -m 0755 /tmp/pymake /usr/local/bin/pymake
|
||||||
|
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||||
|
fission check
|
||||||
|
- name: cache fission cli
|
||||||
|
uses: actions/cache@v3
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/usr/local/bin/fission
|
||||||
|
key: go_path-${{ steps.hash-go.outputs.hash }}
|
||||||
|
restore-keys: |-
|
||||||
|
go_cache-${{ steps.hash-go.outputs.hash }}
|
||||||
|
- name: 👀 Checkout the previous codes
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
ref: ${{ github.event.before }}
|
||||||
|
- name: 🧹 Remove the previous version
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
fission specs destroy --force
|
||||||
|
kubectl delete -R -f manifests
|
||||||
|
- name: 👀 Checkout repository
|
||||||
|
id: checkout-new-code
|
||||||
|
if: always()
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: ✨ Deploy the new version
|
||||||
|
id: deploy
|
||||||
|
run: |
|
||||||
|
kubectl apply -R -f manifests
|
||||||
|
fission specs apply --wait
|
||||||
|
- name: 🔔 Send notification
|
||||||
|
uses: appleboy/telegram-action@master
|
||||||
|
if: always() # This ensures the step runs even if previous steps fail
|
||||||
|
with:
|
||||||
|
to: ${{ secrets.TELEGRAM_TO }}
|
||||||
|
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||||
|
format: markdown
|
||||||
|
message: |
|
||||||
|
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} ${{ github.repository }}
|
||||||
|
*Commit*: ${{ github.event.commits[0].message }}
|
||||||
63
.gitea/workflows/uninstall-dispatch.yaml
Normal file
63
.gitea/workflows/uninstall-dispatch.yaml
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
name: "K8S Fission Deployment"
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
profile:
|
||||||
|
description: "Profile to execute to"
|
||||||
|
required: true
|
||||||
|
type: choice
|
||||||
|
options: [ DEV, TEST, STAGING ]
|
||||||
|
jobs:
|
||||||
|
destroy-fission:
|
||||||
|
name: Destroy fission functions
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
env:
|
||||||
|
RUNNER_TOOL_CACHE: /toolcache
|
||||||
|
FISSION_PROFILE: ${{ github.event.inputs.profile }}
|
||||||
|
FISSION_VER: 1.21.0
|
||||||
|
RAKE_VER: 0.1.3
|
||||||
|
steps:
|
||||||
|
- name: ☸️ Setup kubectl
|
||||||
|
uses: azure/setup-kubectl@v4
|
||||||
|
- name: ☘️ Configure Kubeconfig
|
||||||
|
uses: azure/k8s-set-context@v4
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', github.event.inputs.profile)] }}
|
||||||
|
- name: 🔄 Cache
|
||||||
|
id: cache
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
/usr/local/bin/rake
|
||||||
|
/usr/local/bin/fission
|
||||||
|
key: ${{ runner.os }}-${{ github.event.repository.name }}-${{ hashFiles('.fission/deployment.json') }}
|
||||||
|
- name: 🔄 Install Dependencies
|
||||||
|
if: steps.cache.outputs.cache-hit != 'true'
|
||||||
|
run: |
|
||||||
|
curl -L "https://${{ secrets.REGISTRY_PASSWORD }}@registry.vegastar.vn/vegacloud/make/releases/download/${RAKE_VER}/rake-${RAKE_VER}-x86_64-unknown-linux-musl.tar.gz" | tar xzv -C /tmp/
|
||||||
|
curl -L "https://github.com/fission/fission/releases/download/v${FISSION_VER}/fission-v${FISSION_VER}-linux-amd64" --output /tmp/fission
|
||||||
|
install -o root -g root -m 0755 /tmp/rake-${RAKE_VER}-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||||
|
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||||
|
fission check
|
||||||
|
# rake cfg install fission -f
|
||||||
|
- name: 🔎 Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: ♻️ Remove
|
||||||
|
id: deploy
|
||||||
|
run: |
|
||||||
|
echo "use profile [$FISSION_PROFILE]"
|
||||||
|
mkdir -p manifests || true
|
||||||
|
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||||
|
rake sp build -fi && rake sp down -i
|
||||||
|
- name: 🔔 Send notification
|
||||||
|
uses: appleboy/telegram-action@master
|
||||||
|
if: always()
|
||||||
|
with:
|
||||||
|
to: ${{ secrets.TELEGRAM_TO }}
|
||||||
|
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||||
|
format: markdown
|
||||||
|
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||||
|
message: |
|
||||||
|
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Uninstall fn ${{ github.event.repository.name }}
|
||||||
|
*Msg*: `${{ github.event.commits[0].message }}`
|
||||||
191
.gitignore
vendored
Normal file
191
.gitignore
vendored
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
share/python-wheels/
|
||||||
|
# *.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
cover/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
.pybuilder/
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
# For a library or package, you might want to ignore these files since the code is
|
||||||
|
# intended to run in multiple environments; otherwise, check them in:
|
||||||
|
# .python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# poetry
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||||
|
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||||
|
# commonly ignored for libraries.
|
||||||
|
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||||
|
#poetry.lock
|
||||||
|
|
||||||
|
# pdm
|
||||||
|
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||||
|
#pdm.lock
|
||||||
|
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||||
|
# in version control.
|
||||||
|
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
||||||
|
.pdm.toml
|
||||||
|
.pdm-python
|
||||||
|
.pdm-build/
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
|
|
||||||
|
# pytype static type analyzer
|
||||||
|
.pytype/
|
||||||
|
|
||||||
|
# Cython debug symbols
|
||||||
|
cython_debug/
|
||||||
|
|
||||||
|
# PyCharm
|
||||||
|
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||||
|
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||||
|
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||||
|
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||||
|
#.idea/
|
||||||
|
|
||||||
|
## Ignore Temporary directory of Dagster
|
||||||
|
/tmp*
|
||||||
|
|
||||||
|
## Devcontainer cache files, that will make devcontainer start faster after first run
|
||||||
|
/.vscache/.vscode-server/*
|
||||||
|
!/.vscache/.vscode-server/.gitkeep
|
||||||
|
/.vscache/.devcontainer/*
|
||||||
|
!/.vscache/.devcontainer/.gitkeep
|
||||||
|
|
||||||
|
## Ignore K3S config file
|
||||||
|
/.devcontainer/kubeconfig.yaml
|
||||||
|
|
||||||
|
## Ignore packaged files
|
||||||
|
/*.zip
|
||||||
|
# !/package.zip
|
||||||
|
/*.bak
|
||||||
|
|
||||||
|
## Ignore Makefile, it will come with `pymake` package
|
||||||
|
Makefile
|
||||||
|
|
||||||
|
## Ignore fission's specs files
|
||||||
|
/specs/*
|
||||||
|
!/specs/fission-deployment-config.yaml
|
||||||
|
!/specs/README
|
||||||
|
|
||||||
|
/manifests/*
|
||||||
|
|
||||||
|
/fission-dumps
|
||||||
92
README.md
Normal file
92
README.md
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
# py-ailbl-tag
|
||||||
|
|
||||||
|
LabelAI: Tag or Keyword or Topic
|
||||||
|
|
||||||
|
## Getting started
|
||||||
|
|
||||||
|
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
|
||||||
|
|
||||||
|
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
|
||||||
|
|
||||||
|
## Add your files
|
||||||
|
|
||||||
|
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
|
||||||
|
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd existing_repo
|
||||||
|
git remote add origin https://git.vegastar.vn/cloud/srvless/py-ailbl-tag.git
|
||||||
|
git branch -M main
|
||||||
|
git push -uf origin main
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integrate with your tools
|
||||||
|
|
||||||
|
- [ ] [Set up project integrations](https://git.vegastar.vn/cloud/srvless/py-ailbl-tag/-/settings/integrations)
|
||||||
|
|
||||||
|
## Collaborate with your team
|
||||||
|
|
||||||
|
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
|
||||||
|
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
|
||||||
|
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
|
||||||
|
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
|
||||||
|
- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
|
||||||
|
|
||||||
|
## Test and Deploy
|
||||||
|
|
||||||
|
Use the built-in continuous integration in GitLab.
|
||||||
|
|
||||||
|
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
|
||||||
|
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
|
||||||
|
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
|
||||||
|
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
|
||||||
|
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
|
||||||
|
|
||||||
|
***
|
||||||
|
|
||||||
|
# Editing this README
|
||||||
|
|
||||||
|
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
|
||||||
|
|
||||||
|
## Suggestions for a good README
|
||||||
|
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
|
||||||
|
|
||||||
|
## Name
|
||||||
|
Choose a self-explaining name for your project.
|
||||||
|
|
||||||
|
## Description
|
||||||
|
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
|
||||||
|
|
||||||
|
## Badges
|
||||||
|
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
|
||||||
|
|
||||||
|
## Visuals
|
||||||
|
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
|
||||||
|
|
||||||
|
## Support
|
||||||
|
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
If you have ideas for releases in the future, it is a good idea to list them in the README.
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
State if you are open to contributions and what your requirements are for accepting them.
|
||||||
|
|
||||||
|
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
|
||||||
|
|
||||||
|
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
|
||||||
|
|
||||||
|
## Authors and acknowledgment
|
||||||
|
Show your appreciation to those who have contributed to the project.
|
||||||
|
|
||||||
|
## License
|
||||||
|
For open source projects, say how it is licensed.
|
||||||
|
|
||||||
|
## Project status
|
||||||
|
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
|
||||||
BIN
apps/.DS_Store
vendored
Normal file
BIN
apps/.DS_Store
vendored
Normal file
Binary file not shown.
89
apps/ailbl-admin_avatar-insert-update-delete-get.py
Normal file
89
apps/ailbl-admin_avatar-insert-update-delete-get.py
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
import crud
|
||||||
|
from flask import jsonify, request
|
||||||
|
|
||||||
|
ALLOWED_IMAGE_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp"}
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""
|
||||||
|
```fission
|
||||||
|
{
|
||||||
|
"name": "avatar-admin-get-insert-delete-put",
|
||||||
|
"http_triggers": {
|
||||||
|
"avatar-admin-get-insert-delete-put-http": {
|
||||||
|
"url": "/ailbl/admin/avatars",
|
||||||
|
"methods": ["PUT", "POST", "DELETE", "GET"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if request.method == "PUT":
|
||||||
|
return make_update_avatar_request()
|
||||||
|
elif request.method == "DELETE":
|
||||||
|
return make_delete_avatar_request()
|
||||||
|
elif request.method == "POST":
|
||||||
|
return make_insert_request()
|
||||||
|
elif request.method == "GET":
|
||||||
|
return make_get_avatar_request()
|
||||||
|
else:
|
||||||
|
return {"error": "Method not allow"}, 405
|
||||||
|
except Exception as ex:
|
||||||
|
return jsonify({"error": str(ex)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_insert_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User")
|
||||||
|
file = request.files.get("avatar")
|
||||||
|
if not user_id or not file:
|
||||||
|
return jsonify({"error": "user_id or file is required"}), 400
|
||||||
|
if file.mimetype not in ALLOWED_IMAGE_TYPES:
|
||||||
|
return jsonify(
|
||||||
|
{"error": "Invalid file type. Only JPG, PNG, GIF, WEBP are allowed."}
|
||||||
|
), 400
|
||||||
|
response, status = crud.update_or_create_avatar(user_id, file)
|
||||||
|
return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_get_avatar_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User")
|
||||||
|
if not user_id:
|
||||||
|
return jsonify({"error": "user_id is required"}), 400
|
||||||
|
|
||||||
|
return crud.get_avatar_url(user_id)
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_delete_avatar_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User")
|
||||||
|
if not user_id:
|
||||||
|
return jsonify({"error": "user_id is required"}), 400
|
||||||
|
|
||||||
|
response, status = crud.delete_avatar(user_id)
|
||||||
|
return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_update_avatar_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User")
|
||||||
|
file = request.files.get("avatar")
|
||||||
|
if not user_id or not file:
|
||||||
|
return jsonify({"error": "user_id or file is required"}), 400
|
||||||
|
if file.mimetype not in ALLOWED_IMAGE_TYPES:
|
||||||
|
return jsonify(
|
||||||
|
{"error": "Invalid file type. Only JPG, PNG, GIF, WEBP are allowed."}
|
||||||
|
), 400
|
||||||
|
|
||||||
|
response, status = crud.update_or_create_avatar(user_id, file)
|
||||||
|
return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
95
apps/ailbl-user_avatar-insert-update-delete-get.py
Normal file
95
apps/ailbl-user_avatar-insert-update-delete-get.py
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
import crud
|
||||||
|
from flask import jsonify, request
|
||||||
|
|
||||||
|
# from storage.minio_client import get_minio_client, check_existing_avatar_on_minio, upload_to_minio
|
||||||
|
ALLOWED_IMAGE_TYPES = {"image/jpeg", "image/png", "image/gif", "image/webp"}
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""
|
||||||
|
```fission
|
||||||
|
{
|
||||||
|
"name": "avatar-users-get-insert-delete-put",
|
||||||
|
"http_triggers": {
|
||||||
|
"avatar-users-get-insert-delete-put-http": {
|
||||||
|
"url": "/ailbl/users/avatars",
|
||||||
|
"methods": ["PUT", "POST", "DELETE", "GET"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if request.method == "PUT":
|
||||||
|
return make_update_avatar_request()
|
||||||
|
elif request.method == "DELETE":
|
||||||
|
return make_delete_avatar_request()
|
||||||
|
elif request.method == "POST":
|
||||||
|
return make_insert_request()
|
||||||
|
elif request.method == "GET":
|
||||||
|
return make_get_avatar_request()
|
||||||
|
else:
|
||||||
|
return {"error": "Method not allow"}, 405
|
||||||
|
except Exception as ex:
|
||||||
|
return jsonify({"error": str(ex)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_insert_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User") # Lay user_id tu header X-User
|
||||||
|
# Lay file tu form-data voi key la 'avatar'
|
||||||
|
file = request.files.get("avatar")
|
||||||
|
if not user_id or not file:
|
||||||
|
return jsonify({"error": "user_id or file is required"}), 400
|
||||||
|
# Check mimetype(kieu du lieu cua file anh)
|
||||||
|
if file.mimetype not in ALLOWED_IMAGE_TYPES:
|
||||||
|
return jsonify(
|
||||||
|
{"error": "Invalid file type. Only JPG, PNG, GIF, WEBP are allowed."}
|
||||||
|
), 400
|
||||||
|
response, status = crud.update_or_create_avatar(user_id, file)
|
||||||
|
return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_update_avatar_request():
|
||||||
|
try:
|
||||||
|
# Lay user_id tu header X-User, neu co giao dien roi thi cookies se tu dong duoc gui len o trong header
|
||||||
|
user_id = request.headers.get("X-User")
|
||||||
|
# Lay file tu form-data voi key la 'avatar'
|
||||||
|
file = request.files.get("avatar")
|
||||||
|
if not user_id or not file:
|
||||||
|
return jsonify({"error": "user_id or file is required"}), 400
|
||||||
|
# Check mimetype(kieu du lieu cua file anh)
|
||||||
|
if file.mimetype not in ALLOWED_IMAGE_TYPES:
|
||||||
|
return jsonify(
|
||||||
|
{"error": "Invalid file type. Only JPG, PNG, GIF, WEBP are allowed."}
|
||||||
|
), 400
|
||||||
|
response, status = crud.update_or_create_avatar(
|
||||||
|
user_id, file) # Call CRUD function to update avatar
|
||||||
|
return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_delete_avatar_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User") # Lay user_id tu header X-User
|
||||||
|
if not user_id:
|
||||||
|
return jsonify({"error": "user_id is required"}), 400
|
||||||
|
# Call CRUD function to delete avatar
|
||||||
|
response, status = crud.delete_avatar(user_id)
|
||||||
|
return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
|
|
||||||
|
|
||||||
|
def make_get_avatar_request():
|
||||||
|
try:
|
||||||
|
user_id = request.headers.get("X-User")
|
||||||
|
if not user_id:
|
||||||
|
return jsonify({"error": "user_id is required"}), 400
|
||||||
|
return crud.get_avatar_url(user_id)
|
||||||
|
# return jsonify(response), status
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({"error": str(e)}), 500
|
||||||
15
apps/build.sh
Executable file
15
apps/build.sh
Executable file
@@ -0,0 +1,15 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
ID=$( grep "^ID=" /etc/os-release | awk -F= '{print $2}' )
|
||||||
|
|
||||||
|
if [ "${ID}" = "debian" ]
|
||||||
|
then
|
||||||
|
apt-get update && apt-get install -y gcc libpq-dev python3-dev
|
||||||
|
else
|
||||||
|
apk update && apk add gcc postgresql-dev python3-dev
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f ${SRC_PKG}/requirements.txt ]
|
||||||
|
then
|
||||||
|
pip3 install -r ${SRC_PKG}/requirements.txt -t ${SRC_PKG}
|
||||||
|
fi
|
||||||
|
cp -r ${SRC_PKG} ${DEPLOY_PKG}
|
||||||
65
apps/crud.py
Normal file
65
apps/crud.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
import io
|
||||||
|
|
||||||
|
from flask import Response
|
||||||
|
from helpers import S3_BUCKET, get_secret, s3_client
|
||||||
|
from PIL import Image
|
||||||
|
|
||||||
|
|
||||||
|
# Create&Update function to upload or update user avatar S3/Minio
|
||||||
|
def update_or_create_avatar(user_id: str, file):
|
||||||
|
try:
|
||||||
|
file_data = file.read()
|
||||||
|
# Bản chất là đường dẫn trong bucket + tên file = user_id
|
||||||
|
object_name = f"{get_secret('S3_PREFIX')}/{user_id}"
|
||||||
|
result = s3_client.put_object(
|
||||||
|
Bucket=S3_BUCKET,
|
||||||
|
Key=object_name,
|
||||||
|
Body=io.BytesIO(file_data),
|
||||||
|
ContentLength=len(file_data),
|
||||||
|
ContentType=file.content_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
return result, 200
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}, 500
|
||||||
|
|
||||||
|
|
||||||
|
def get_avatar_url(user_id: str): # Read function to get user avatar from S3/Minio
|
||||||
|
try:
|
||||||
|
response = s3_client.get_object(
|
||||||
|
Bucket=S3_BUCKET,
|
||||||
|
Key=f"{get_secret('S3_PREFIX')}/{user_id}"
|
||||||
|
)
|
||||||
|
# image_data = response["body"].read(content_type)
|
||||||
|
image_data = response['Body'].read()
|
||||||
|
|
||||||
|
with Image.open(io.BytesIO(image_data)) as img:
|
||||||
|
fmt = img.format.lower() # ví dụ: 'jpeg', 'png', 'webp'
|
||||||
|
content_type = f"image/{'jpeg' if fmt == 'jpg' else fmt}"
|
||||||
|
# return Response(
|
||||||
|
# io.BytesIO(image_data),
|
||||||
|
# content_type=content_type,
|
||||||
|
# direct_passthrough=True,
|
||||||
|
# )
|
||||||
|
|
||||||
|
return Response(
|
||||||
|
image_data,
|
||||||
|
content_type=content_type,
|
||||||
|
direct_passthrough=True
|
||||||
|
), 200
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}, 500
|
||||||
|
|
||||||
|
|
||||||
|
# Delete Function to delete user avatar from S3/Minio
|
||||||
|
def delete_avatar(user_id: str) -> dict:
|
||||||
|
try:
|
||||||
|
result = s3_client.delete_object(
|
||||||
|
Bucket=S3_BUCKET,
|
||||||
|
Key=f"{get_secret('S3_PREFIX')}/{user_id}"
|
||||||
|
)
|
||||||
|
return result, 200
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}, 500
|
||||||
35
apps/helpers.py
Normal file
35
apps/helpers.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
import logging
|
||||||
|
import boto3
|
||||||
|
SECRET_NAME = "fission-ailbl-user-avatar-env"
|
||||||
|
K8S_NAMESPACE = "default"
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_namespace() -> str:
|
||||||
|
try:
|
||||||
|
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
|
||||||
|
namespace = f.read()
|
||||||
|
except:
|
||||||
|
namespace = K8S_NAMESPACE
|
||||||
|
return str(namespace)
|
||||||
|
|
||||||
|
|
||||||
|
def get_secret(key: str, default=None) -> str:
|
||||||
|
namespace = get_current_namespace()
|
||||||
|
path = f"/secrets/{namespace}/{SECRET_NAME}/{key}"
|
||||||
|
try:
|
||||||
|
with open(path, "r") as f:
|
||||||
|
return f.read()
|
||||||
|
except:
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
S3_BUCKET = get_secret("S3_BUCKET")
|
||||||
|
S3_PREFIX = get_secret("S3_PREFIX")
|
||||||
|
|
||||||
|
s3_client = boto3.client(
|
||||||
|
"s3",
|
||||||
|
endpoint_url=get_secret("S3_ENDPOINT_URL"),
|
||||||
|
aws_access_key_id=get_secret("S3_ACCESS_KEY_ID"),
|
||||||
|
aws_secret_access_key=get_secret("S3_SECRET_ACCESS_KEY"),
|
||||||
|
config=boto3.session.Config(signature_version="s3v4"),
|
||||||
|
)
|
||||||
6
apps/requirements.txt
Normal file
6
apps/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
# Flask==3.1.0
|
||||||
|
# psycopg2-binary==2.9.10
|
||||||
|
# pydantic==2.11.3
|
||||||
|
# minio==7.2.5
|
||||||
|
# Pillow==10.4.0
|
||||||
|
# boto3==1.35.70
|
||||||
33
apps/schemas.py
Normal file
33
apps/schemas.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing import Optional
|
||||||
|
from enum import IntEnum
|
||||||
|
|
||||||
|
|
||||||
|
class TagKind(IntEnum):
|
||||||
|
ProjectGroup = 1
|
||||||
|
ProjectData = 2
|
||||||
|
ProjectMember = 3
|
||||||
|
ProjectDiscussionTopic = 4
|
||||||
|
Project = 5
|
||||||
|
Ticket = 6
|
||||||
|
|
||||||
|
|
||||||
|
class TagRequest(BaseModel):
|
||||||
|
tag: str = Field(..., max_length=128)
|
||||||
|
kind: TagKind
|
||||||
|
ref: Optional[str] = Field(default=None, max_length=36)
|
||||||
|
primary_color: Optional[str] = Field(default=None, max_length=8)
|
||||||
|
secondary_color: Optional[str] = Field(default=None, max_length=8)
|
||||||
|
|
||||||
|
|
||||||
|
class TagRequestUpdate(BaseModel):
|
||||||
|
tag: str = Field(..., max_length=128)
|
||||||
|
kind: TagKind
|
||||||
|
ref: Optional[str] = Field(default=None, max_length=36)
|
||||||
|
primary_color: Optional[str] = Field(default=None, max_length=8)
|
||||||
|
secondary_color: Optional[str] = Field(default=None, max_length=8)
|
||||||
|
|
||||||
|
|
||||||
|
class TagRefRequest(BaseModel):
|
||||||
|
ref: str = Field(..., max_length=64)
|
||||||
|
sub_ref: Optional[str] = Field(default=None, max_length=1024)
|
||||||
20
migrates/schemas.sql
Normal file
20
migrates/schemas.sql
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
CREATE TABLE ailbl_tag (
|
||||||
|
ID VARCHAR(64) PRIMARY KEY,
|
||||||
|
Tag VARCHAR(128) NOT NULL,
|
||||||
|
Kind SMALLINT NOT NULL, -- Giá trị 1 đến 6 như mô tả
|
||||||
|
Ref VARCHAR(36), -- Tham chiếu động theo Kind
|
||||||
|
primary_color VARCHAR(8), -- Mã màu chính (Hex)
|
||||||
|
secondary_color VARCHAR(8), -- Mã màu phụ (Hex)
|
||||||
|
Created timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, -- UTC
|
||||||
|
Modified timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, -- UTC
|
||||||
|
CONSTRAINT unique_tag_kind_ref UNIQUE (Tag, Kind, Ref)
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE TABLE ailbl_tag_ref (
|
||||||
|
ID VARCHAR(64) PRIMARY KEY,
|
||||||
|
tag_id VARCHAR(64) NOT NULL,
|
||||||
|
Ref VARCHAR(64) NOT NULL,
|
||||||
|
sub_ref VARCHAR(1024),
|
||||||
|
CONSTRAINT unique_tagid_ref UNIQUE (TagId, Ref),
|
||||||
|
CONSTRAINT fk_tagref_tag FOREIGN KEY (TagId) REFERENCES ailbl_tag(ID) ON DELETE CASCADE
|
||||||
|
);
|
||||||
19
py-ailbl-user_avatar.code-workspace
Normal file
19
py-ailbl-user_avatar.code-workspace
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
{
|
||||||
|
"folders": [
|
||||||
|
{
|
||||||
|
"path": "."
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../py-gh-avatar"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../py-ailbl-tag-main"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../Upload_File(Project_Vega)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"path": "../Upload_File(Project_Vega) copy"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
42
specs/README
Normal file
42
specs/README
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
|
||||||
|
Fission Specs
|
||||||
|
=============
|
||||||
|
|
||||||
|
This is a set of specifications for a Fission app. This includes functions,
|
||||||
|
environments, and triggers; we collectively call these things "resources".
|
||||||
|
|
||||||
|
How to use these specs
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
These specs are handled with the 'fission spec' command. See 'fission spec --help'.
|
||||||
|
|
||||||
|
'fission spec apply' will "apply" all resources specified in this directory to your
|
||||||
|
cluster. That means it checks what resources exist on your cluster, what resources are
|
||||||
|
specified in the specs directory, and reconciles the difference by creating, updating or
|
||||||
|
deleting resources on the cluster.
|
||||||
|
|
||||||
|
'fission spec apply' will also package up your source code (or compiled binaries) and
|
||||||
|
upload the archives to the cluster if needed. It uses 'ArchiveUploadSpec' resources in
|
||||||
|
this directory to figure out which files to archive.
|
||||||
|
|
||||||
|
You can use 'fission spec apply --watch' to watch for file changes and continuously keep
|
||||||
|
the cluster updated.
|
||||||
|
|
||||||
|
You can add YAMLs to this directory by writing them manually, but it's easier to generate
|
||||||
|
them. Use 'fission function create --spec' to generate a function spec,
|
||||||
|
'fission environment create --spec' to generate an environment spec, and so on.
|
||||||
|
|
||||||
|
You can edit any of the files in this directory, except 'fission-deployment-config.yaml',
|
||||||
|
which contains a UID that you should never change. To apply your changes simply use
|
||||||
|
'fission spec apply'.
|
||||||
|
|
||||||
|
fission-deployment-config.yaml
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
fission-deployment-config.yaml contains a UID. This UID is what fission uses to correlate
|
||||||
|
resources on the cluster to resources in this directory.
|
||||||
|
|
||||||
|
All resources created by 'fission spec apply' are annotated with this UID. Resources on
|
||||||
|
the cluster that are _not_ annotated with this UID are never modified or deleted by
|
||||||
|
fission.
|
||||||
|
|
||||||
7
specs/fission-deployment-config.yaml
Normal file
7
specs/fission-deployment-config.yaml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
# This file is generated by the 'fission spec init' command.
|
||||||
|
# See the README in this directory for background and usage information.
|
||||||
|
# Do not edit the UID below: that will break 'fission spec apply'
|
||||||
|
apiVersion: fission.io/v1
|
||||||
|
kind: DeploymentConfig
|
||||||
|
name: py-ailbl-tag
|
||||||
|
uid: dfd4b9c6-7e2f-4f57-aad5-38b34209eeb1
|
||||||
Reference in New Issue
Block a user