update
Some checks failed
K8S Fission Deployment / Deployment fission functions (push) Failing after 22s
Some checks failed
K8S Fission Deployment / Deployment fission functions (push) Failing after 22s
This commit is contained in:
10
.devcontainer/.env.example
Normal file
10
.devcontainer/.env.example
Normal file
@@ -0,0 +1,10 @@
|
||||
K3S_VERSION=latest
|
||||
K3S_TOKEN=
|
||||
PRIVATE_GIT_TOKEN=
|
||||
|
||||
FISSION_VER=v1.21.0
|
||||
FISSION_NAMESPACE=fission
|
||||
NGINX_INGRESS_VER=v1.7.1
|
||||
METRICS_NAMESPACE=monitoring
|
||||
OPENTELEMETRY_NAMESPACE=opentelemetry-operator-system
|
||||
JAEGER_NAMESPACE=jaeger
|
||||
50
.devcontainer/devcontainer.json
Normal file
50
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,50 @@
|
||||
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
|
||||
// README at: https://github.com/devcontainers/templates/tree/main/src/rust
|
||||
{
|
||||
"name": "fission:ai-work",
|
||||
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
|
||||
// "image": "mcr.microsoft.com/devcontainers/rust:0-1-bullseye",
|
||||
// Use docker compose file
|
||||
"dockerComposeFile": "docker-compose.yml",
|
||||
"service": "devcontainer",
|
||||
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
|
||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||
// "features": {},
|
||||
// Configure tool-specific properties.
|
||||
"customizations": {
|
||||
// Configure properties specific to VS Code.
|
||||
"vscode": {
|
||||
"settings": {
|
||||
"terminal.integrated.defaultProfile.linux": "bash",
|
||||
"python.formatting.provider": "black",
|
||||
"python.formatting.blackPath": "/usr/local/py-utils/bin/black"
|
||||
},
|
||||
"extensions": [
|
||||
// VS Code specific
|
||||
"ms-azuretools.vscode-docker" ,
|
||||
"dbaeumer.vscode-eslint" ,
|
||||
"EditorConfig.EditorConfig" ,
|
||||
// Python specific
|
||||
"ms-python.python" ,
|
||||
"ms-python.black-formatter" ,
|
||||
// C++ specific
|
||||
"ms-vscode.cpptools" ,
|
||||
"twxs.cmake" ,
|
||||
// Markdown specific
|
||||
"yzhang.markdown-all-in-one" ,
|
||||
// YAML formatter
|
||||
"kennylong.kubernetes-yaml-formatter",
|
||||
// hightlight and format `pyproject.toml`
|
||||
"tamasfe.even-better-toml"
|
||||
]
|
||||
}
|
||||
},
|
||||
"mounts": [ ],
|
||||
// "runArgs": [
|
||||
// "--env-file",
|
||||
// ".devcontainer/.env"
|
||||
// ],
|
||||
"postStartCommand": "/workspaces/${localWorkspaceFolderBasename}/.devcontainer/initscript.sh",
|
||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||
"forwardPorts": []
|
||||
}
|
||||
58
.devcontainer/docker-compose.yml
Normal file
58
.devcontainer/docker-compose.yml
Normal file
@@ -0,0 +1,58 @@
|
||||
services:
|
||||
devcontainer:
|
||||
# All tags avaiable at: https://mcr.microsoft.com/v2/devcontainers/rust/tags/list
|
||||
# image: mcr.microsoft.com/vscode/devcontainers/python:3.10-bullseye
|
||||
image: registry.vegastar.vn/vegacloud/fission-python:3.10-bullseye
|
||||
volumes:
|
||||
- ../..:/workspaces:cached
|
||||
command: sleep infinity
|
||||
env_file:
|
||||
- .env
|
||||
|
||||
k3s-server:
|
||||
image: "rancher/k3s:${K3S_VERSION:-latest}"
|
||||
# command: server --disable traefik --disable servicelb
|
||||
command: server --disable traefik
|
||||
hostname: k3s-server
|
||||
tmpfs: [ "/run", "/var/run" ]
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 65535
|
||||
hard: 65535
|
||||
privileged: true
|
||||
restart: always
|
||||
environment:
|
||||
- K3S_TOKEN=${K3S_TOKEN:-secret}
|
||||
- K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
|
||||
- K3S_KUBECONFIG_MODE=666
|
||||
volumes:
|
||||
- k3s-server:/var/lib/rancher/k3s
|
||||
# This is just so that we get the kubeconfig file out
|
||||
- .:/output
|
||||
ports:
|
||||
- 6443 # Kubernetes API Server
|
||||
- 80 # Ingress controller port 80
|
||||
- 443 # Ingress controller port 443
|
||||
|
||||
k3s-agent:
|
||||
image: "rancher/k3s:${K3S_VERSION:-latest}"
|
||||
hostname: k3s-agent
|
||||
tmpfs: [ "/run", "/var/run" ]
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 65535
|
||||
hard: 65535
|
||||
privileged: true
|
||||
restart: always
|
||||
environment:
|
||||
- K3S_URL=https://k3s-server:6443
|
||||
- K3S_TOKEN=${K3S_TOKEN:-secret}
|
||||
volumes:
|
||||
- k3s-agent:/var/lib/rancher/k3s
|
||||
profiles: [ "cluster" ] # only start agent if run with profile `cluster`
|
||||
|
||||
volumes:
|
||||
k3s-server: {}
|
||||
k3s-agent: {}
|
||||
81
.devcontainer/helm/fission-values.yaml
Normal file
81
.devcontainer/helm/fission-values.yaml
Normal file
@@ -0,0 +1,81 @@
|
||||
### helm show values fission-charts/fission-all > .devcontainer/fission-values.yaml
|
||||
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
##namespace in which you want to deploy servicemonitor
|
||||
##
|
||||
namespace: "monitoring"
|
||||
## Map of additional labels to add to the ServiceMonitor resources
|
||||
# to allow selecting specific ServiceMonitors
|
||||
# in case of multiple prometheus deployments
|
||||
additionalServiceMonitorLabels:
|
||||
release: "prometheus"
|
||||
# key: "value"
|
||||
|
||||
##The following components expose Prometheus metrics and have podmonitors in this chart (disabled by default)
|
||||
##
|
||||
podMonitor:
|
||||
enabled: true
|
||||
##namespace in which you want to deploy podmonitor
|
||||
##
|
||||
namespace: "monitoring"
|
||||
## Map of additional labels to add to the PodMonitor resources
|
||||
# to allow selecting specific PodMonitor
|
||||
# in case of multiple prometheus deployments
|
||||
additionalPodMonitorLabels:
|
||||
release: "monitoring"
|
||||
# key: "value"
|
||||
|
||||
## Enable Grafana Dashboard configmaps for auto dashboard provisioning
|
||||
## If you use kube-prometheus stack for monitoring, these will get imported into grafana
|
||||
grafana:
|
||||
## The namespace in which grafana pod is present
|
||||
namespace: monitoring
|
||||
dashboards:
|
||||
## Disabled by default. switch to true to deploy them
|
||||
enable: true
|
||||
|
||||
# OpenTelemetry is a set of tools for collecting, analyzing, and visualizing
|
||||
# distributed tracing data across function calls.
|
||||
openTelemetry:
|
||||
## Use this flag to set the collector endpoint for OpenTelemetry.
|
||||
## The variable is endpoint of the collector in the format shown below.
|
||||
## otlpCollectorEndpoint: "otel-collector.observability.svc:4317"
|
||||
##
|
||||
otlpCollectorEndpoint: "otel-collector.opentelemetry-operator-system.svc.cluster.local:4317"
|
||||
## Set this flag to false if you are using secure endpoint for the collector.
|
||||
##
|
||||
otlpInsecure: true
|
||||
## Key-value pairs to be used as headers associated with gRPC or HTTP requests to the collector.
|
||||
## Eg. otlpHeaders: "key1=value1,key2=value2"
|
||||
##
|
||||
# otlpHeaders: ""
|
||||
## Supported samplers:
|
||||
## always_on - Sampler that always samples spans, regardless of the parent span's sampling decision.
|
||||
## always_off - Sampler that never samples spans, regardless of the parent span's sampling decision.
|
||||
## traceidratio - Sampler that samples probabalistically based on rate.
|
||||
## parentbased_always_on - (default if empty) Sampler that respects its parent span's sampling decision, but otherwise always samples.
|
||||
## parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples.
|
||||
## parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabalistically based on rate.
|
||||
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
|
||||
##
|
||||
tracesSampler: "parentbased_traceidratio"
|
||||
## Each Sampler type defines its own expected input, if any.
|
||||
## Currently we get trace ratio for the case of,
|
||||
## 1. traceidratio
|
||||
## 2. parentbased_traceidratio
|
||||
## Sampling probability, a number in the [0..1] range, e.g. "0.1". Default is 0.1.
|
||||
##
|
||||
tracesSamplingRate: "1"
|
||||
## Supported providers:
|
||||
## tracecontext - W3C Trace Context
|
||||
## baggage - W3C Baggage
|
||||
## b3 - B3 Single
|
||||
## b3multi - B3 Multi
|
||||
## jaeger - Jaeger uber-trace-id header
|
||||
## xray - AWS X-Ray (third party)
|
||||
## ottrace - OpenTracing Trace (third party)
|
||||
## none - No tracing
|
||||
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
|
||||
##
|
||||
# propagators: "tracecontext,baggage"
|
||||
132
.devcontainer/helm/opentelemetry-collector.yml
Normal file
132
.devcontainer/helm/opentelemetry-collector.yml
Normal file
@@ -0,0 +1,132 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: otel-collector-conf
|
||||
namespace: opentelemetry-operator-system
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector-conf
|
||||
data:
|
||||
otel-collector-config: |
|
||||
receivers:
|
||||
# Make sure to add the otlp receiver.
|
||||
# This will open up the receiver on port 4317
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "0.0.0.0:4317"
|
||||
processors:
|
||||
extensions:
|
||||
health_check: {}
|
||||
exporters:
|
||||
jaeger:
|
||||
# <service-name>.<namespace>.svc.cluster.local:<service-port>
|
||||
endpoint: "jaeger-collector.jaeger.svc.cluster.local:14250"
|
||||
insecure: true
|
||||
prometheus:
|
||||
endpoint: 0.0.0.0:8889
|
||||
namespace: "testapp"
|
||||
logging:
|
||||
|
||||
service:
|
||||
extensions: [ health_check ]
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: []
|
||||
exporters: [ jaeger ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: []
|
||||
exporters: [ prometheus, logging ]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: otel-collector
|
||||
namespace: opentelemetry-operator-system
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
spec:
|
||||
ports:
|
||||
- name: otlp # Default endpoint for otlp receiver.
|
||||
port: 4317
|
||||
protocol: TCP
|
||||
targetPort: 4317
|
||||
nodePort: 30080
|
||||
- name: metrics # Default endpoint for metrics.
|
||||
port: 8889
|
||||
protocol: TCP
|
||||
targetPort: 8889
|
||||
selector:
|
||||
component: otel-collector
|
||||
type: NodePort
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: otel-collector
|
||||
namespace: opentelemetry-operator-system
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
minReadySeconds: 5
|
||||
progressDeadlineSeconds: 120
|
||||
replicas: 1 #TODO - adjust this to your own requirements
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/path: "/metrics"
|
||||
prometheus.io/port: "8889"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: opentelemetry
|
||||
component: otel-collector
|
||||
spec:
|
||||
containers:
|
||||
- command:
|
||||
- "/otelcol"
|
||||
- "--config=/conf/otel-collector-config.yaml"
|
||||
# Memory Ballast size should be max 1/3 to 1/2 of memory.
|
||||
- "--mem-ballast-size-mib=683"
|
||||
env:
|
||||
- name: GOGC
|
||||
value: "80"
|
||||
image: otel/opentelemetry-collector:0.6.0
|
||||
name: otel-collector
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 200m
|
||||
memory: 400Mi
|
||||
ports:
|
||||
- containerPort: 4317 # Default endpoint for otlp receiver.
|
||||
- containerPort: 8889 # Default endpoint for querying metrics.
|
||||
volumeMounts:
|
||||
- name: otel-collector-config-vol
|
||||
mountPath: /conf
|
||||
# - name: otel-collector-secrets
|
||||
# mountPath: /secrets
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133 # Health Check extension default port.
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 13133 # Health Check extension default port.
|
||||
volumes:
|
||||
- configMap:
|
||||
name: otel-collector-conf
|
||||
items:
|
||||
- key: otel-collector-config
|
||||
path: otel-collector-config.yaml
|
||||
name: otel-collector-config-vol
|
||||
170
.devcontainer/initscript.sh
Executable file
170
.devcontainer/initscript.sh
Executable file
@@ -0,0 +1,170 @@
|
||||
#!/bin/bash
|
||||
|
||||
## For debugging
|
||||
# set -eux
|
||||
|
||||
|
||||
#############################
|
||||
### DEV PACKAGES
|
||||
#############################
|
||||
export RAKE_VER=0.1.7
|
||||
|
||||
curl -L https://$PRIVATE_GIT_TOKEN@registry.vegastar.vn/vegacloud/make/releases/download/$RAKE_VER/rake-$RAKE_VER-x86_64-unknown-linux-musl.tar.gz | tar xzv -C /tmp/
|
||||
sudo install -o root -g root -m 0755 /tmp/rake-$RAKE_VER-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||
|
||||
#############################
|
||||
### KUBECTL
|
||||
#############################
|
||||
|
||||
## Config kubectl
|
||||
mkdir -p ~/.kube
|
||||
cp ${PWD}/.devcontainer/kubeconfig.yaml ~/.kube/config
|
||||
sed -i 's/127.0.0.1/k3s-server/g' ~/.kube/config
|
||||
|
||||
## allow insecure connection
|
||||
shopt -s expand_aliases
|
||||
echo 'alias kubectl="kubectl --insecure-skip-tls-verify"' >> ~/.bashrc
|
||||
echo 'alias k="kubectl --insecure-skip-tls-verify"' >> ~/.bashrc
|
||||
|
||||
#############################
|
||||
### NGINX INGRESS
|
||||
#############################
|
||||
|
||||
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-$NGINX_INGRESS_VER/deploy/static/provider/cloud/deploy.yaml
|
||||
cat <<EOT >> /tmp/nginx-service.yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: ingress-nginx-controller-loadbalancer
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/component: controller
|
||||
app.kubernetes.io/instance: ingress-nginx
|
||||
app.kubernetes.io/name: ingress-nginx
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
- name: https
|
||||
port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
type: LoadBalancer
|
||||
EOT
|
||||
kubectl apply -f /tmp/nginx-service.yaml
|
||||
rm -f /tmp/nginx-service.yaml
|
||||
|
||||
#############################
|
||||
### OPEN TELEMETRY
|
||||
#############################
|
||||
# kubectl create namespace $JAEGER_NAMESPACE
|
||||
# kubectl create namespace $OPENTELEMETRY_NAMESPACE
|
||||
|
||||
# ## cert-manager
|
||||
# kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
|
||||
|
||||
# ## install jaeger
|
||||
# helm repo add jaegertracing https://jaegertracing.github.io/helm-charts
|
||||
# helm install jaeger jaegertracing/jaeger -n $JAEGER_NAMESPACE
|
||||
# kubectl -n $JAEGER_NAMESPACE get po
|
||||
|
||||
# ## open telemetry operator
|
||||
# kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
|
||||
|
||||
# ## create an OpenTelemetry Collector instance
|
||||
# kubectl -n $OPENTELEMETRY_NAMESPACE apply -f .devcontainer/helm/opentelemetry-collector.yaml
|
||||
|
||||
#############################
|
||||
### FISSION PODs
|
||||
#############################
|
||||
kubectl create namespace $FISSION_NAMESPACE
|
||||
|
||||
## install with helm
|
||||
kubectl create -k "github.com/fission/fission/crds/v1?ref=${FISSION_VER}"
|
||||
helm repo add fission-charts https://fission.github.io/fission-charts/ && helm repo update
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: fission
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: gh-eom
|
||||
EOF
|
||||
kubectl apply -f - <<EOF
|
||||
type: kubernetes.io/dockerconfigjson
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: vega-container-registry
|
||||
namespace: fission
|
||||
data:
|
||||
.dockerconfigjson: >-
|
||||
eyJhdXRocyI6eyJyZWdpc3RyeS52ZWdhc3Rhci52biI6eyJ1c2VybmFtZSI6InRpZW5kZCIsInBhc3N3b3JkIjoiYTBjY2JjMDVjNzMyYzExMjU3OTg1NjMwNjY5ZTFjNjEyNDg0NzU1MyIsImF1dGgiOiJkR2xsYm1Sa09tRXdZMk5pWXpBMVl6Y3pNbU14TVRJMU56azROVFl6TURZMk9XVXhZell4TWpRNE5EYzFOVE09In19fQ==
|
||||
EOF
|
||||
helm upgrade --install fission fission-charts/fission-all --namespace $FISSION_NAMESPACE -f - <<EOF
|
||||
imagePullSecrets:
|
||||
- name: vega-container-registry
|
||||
defaultNamespace: default
|
||||
additionalFissionNamespaces:
|
||||
- gh-eom
|
||||
EOF
|
||||
|
||||
|
||||
|
||||
kubectl apply -f - <<EOF
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: router
|
||||
namespace: fission
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /ai
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: router
|
||||
port:
|
||||
number: 80
|
||||
EOF
|
||||
|
||||
# ## install without helm
|
||||
# kubectl create -k "github.com/fission/fission/crds/v1?ref=${FISSION_VER}"
|
||||
# kubectl create namespace $FISSION_NAMESPACE
|
||||
# kubectl config set-context --current --namespace=$FISSION_NAMESPACE
|
||||
# kubectl apply -f https://github.com/fission/fission/releases/download/${FISSION_VER}/fission-all-${FISSION_VER}-minikube.yaml
|
||||
# kubectl config set-context --current --namespace=default #to change context to default namespace after installation
|
||||
|
||||
|
||||
#############################
|
||||
### PROMETHEUS AND GRAFANA
|
||||
#############################
|
||||
# kubectl create namespace $METRICS_NAMESPACE
|
||||
|
||||
# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts && helm repo update
|
||||
# helm install prometheus prometheus-community/kube-prometheus-stack -n $METRICS_NAMESPACE
|
||||
|
||||
#############################
|
||||
### UPDATE FISSION
|
||||
#############################
|
||||
|
||||
# helm upgrade fission fission-charts/fission-all --namespace $FISSION_NAMESPACE -f .devcontainer/helm/fission-values.yaml
|
||||
|
||||
#############################
|
||||
### PORT FORWARDING
|
||||
#############################
|
||||
|
||||
## To access jaeger-query, you can use Kubernetes port forwarding
|
||||
# kubectl -n jaeger port-forward svc/jaeger-query 8080:80 --address='0.0.0.0'
|
||||
## To access kabana, you can use Kubernetes port forwarding
|
||||
# kubectl --namespace monitoring port-forward svc/prometheus-grafana 3000:80
|
||||
## For password, you'll need to run the following command:
|
||||
# kubectl get secret --namespace monitoring prometheus-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo
|
||||
398
.fission/access-rules.yaml
Normal file
398
.fission/access-rules.yaml
Normal file
@@ -0,0 +1,398 @@
|
||||
# - id: 'ai-work-create'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::create","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-view'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags'
|
||||
# methods:
|
||||
# - GET
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::view","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-update'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>'
|
||||
# methods:
|
||||
# - PUT
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::update","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-delete'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>'
|
||||
# methods:
|
||||
# - DELETE
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::delete","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-create'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>/refs'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::create","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-view'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>/refs'
|
||||
# methods:
|
||||
# - GET
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::view","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-delete'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags/<[0-9a-fA-F\-]{36}>/refs/<[0-9a-fA-F\-]{36}>'
|
||||
# methods:
|
||||
# - DELETE
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::delete","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-owner-create'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::owner::create","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-owner-view'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags'
|
||||
# methods:
|
||||
# - GET
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::owner::view","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-owner-update'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/<[0-9a-fA-F\-]{36}>'
|
||||
# methods:
|
||||
# - PUT
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::owner::update","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-owner-delete'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/<[0-9a-fA-F\-]{36}>'
|
||||
# methods:
|
||||
# - DELETE
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::owner::delete","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-owner-create'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/<[0-9a-fA-F\-]{36}>/refs'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::owner::create","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-owner-view'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/<[0-9a-fA-F\-]{36}>/refs'
|
||||
# methods:
|
||||
# - GET
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::owner::view","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-owner-delete'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/<[0-9a-fA-F\-]{36}>/refs/<[0-9a-fA-F\-]{36}>'
|
||||
# methods:
|
||||
# - DELETE
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::owner::delete","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-owner-create-bulk'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/ref-bulk'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::owner::create::bulk","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-create-bulk'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/admin/tags/ref-bulk'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::create::bulk","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-ref-owner-view-bulk'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/users/tags/refs'
|
||||
# methods:
|
||||
# - GET
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::ref::owner::view::bulk","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-owner-create-default'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/tags/users/defaults/instantiate'
|
||||
# methods:
|
||||
# - POST
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::owner::create::default","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
|
||||
# - id: 'ai-work-owner-view-kind-ticket'
|
||||
# description: 'This description'
|
||||
# upstream:
|
||||
# preserve_host: true
|
||||
# url: 'http://router.fission.svc.cluster.local:80'
|
||||
# strip_path: 'v1/'
|
||||
# match:
|
||||
# url: 'https://api.geohub.vn/v1/ailbl/tags/users/tags-ticket'
|
||||
# methods:
|
||||
# - GET
|
||||
# authenticators:
|
||||
# - handler: cookie_session
|
||||
# authorizer:
|
||||
# handler: remote_json
|
||||
# config:
|
||||
# remote: 'http://keto-service.ory-stack.svc.cluster.local:4466/relation-tuples/check'
|
||||
# payload: >-
|
||||
# {"namespace":"user_group_access","subject_id":"{{print .Subject}}","object":"acp::ailbl::tag::owner::view::kind-ticket","relation":"access"}
|
||||
# mutators:
|
||||
# - handler: header
|
||||
55
.fission/deployment.json
Normal file
55
.fission/deployment.json
Normal file
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"namespace": "default",
|
||||
"environments": {
|
||||
"work-py": {
|
||||
"image": "ghcr.io/fission/python-env",
|
||||
"builder": "ghcr.io/fission/python-builder",
|
||||
"imagepullsecret": "vega-container-registry",
|
||||
"runtime_envs": [
|
||||
"AUDITLOG_ES_ENDPOINT=http://opensearch.observability.svc.cluster.local:9200",
|
||||
"AUDITLOG_ES_INDEX=faas_auditlogs",
|
||||
"AUDITLOG_ES_BASICAUTH_USER=",
|
||||
"AUDITLOG_ES_BASICAUTH_PASSWORD="
|
||||
],
|
||||
"mincpu": 50,
|
||||
"maxcpu": 100,
|
||||
"minmemory": 50,
|
||||
"maxmemory": 500,
|
||||
"poolsize": 1
|
||||
}
|
||||
},
|
||||
"archives": {
|
||||
"package.zip": {
|
||||
"sourcepath": "apps"
|
||||
}
|
||||
},
|
||||
"packages": {
|
||||
"ai-work": {
|
||||
"buildcmd": "./build.sh",
|
||||
"sourcearchive": "package.zip",
|
||||
"env": "work-py"
|
||||
}
|
||||
},
|
||||
"function_common": {
|
||||
"pkg": "ai-work",
|
||||
"secrets": [
|
||||
"fission-ai-work-env"
|
||||
],
|
||||
"executor": {
|
||||
"select": "newdeploy",
|
||||
"newdeploy": {
|
||||
"minscale": 1,
|
||||
"maxscale": 1
|
||||
},
|
||||
"poolmgr": {
|
||||
"concurrency": 1,
|
||||
"requestsperpod": 1,
|
||||
"onceonly": false
|
||||
}
|
||||
},
|
||||
"mincpu": 50,
|
||||
"maxcpu": 100,
|
||||
"minmemory": 50,
|
||||
"maxmemory": 500
|
||||
}
|
||||
}
|
||||
20
.fission/dev-deployment.json
Normal file
20
.fission/dev-deployment.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"namespace": "labelhub",
|
||||
"secrets": {
|
||||
"fission-ai-work-env": {
|
||||
"literals": [
|
||||
"PG_HOST=160.30.113.113",
|
||||
"PG_PORT=45432",
|
||||
"PG_DB=postgres",
|
||||
"PG_USER=postgres",
|
||||
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||
]
|
||||
},
|
||||
"vega-container-registry": {
|
||||
"kind": "docker-registry",
|
||||
"server": "registry.vegastar.vn",
|
||||
"username": "deployer",
|
||||
"password": "13814592a688195094d0ee38c995245053af26ca"
|
||||
}
|
||||
}
|
||||
}
|
||||
20
.fission/local-deployment.json
Normal file
20
.fission/local-deployment.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"namespace": "default",
|
||||
"secrets": {
|
||||
"fission-ai-work-env": {
|
||||
"literals": [
|
||||
"PG_HOST=160.30.113.113",
|
||||
"PG_PORT=45432",
|
||||
"PG_DB=postgres",
|
||||
"PG_USER=postgres",
|
||||
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||
]
|
||||
},
|
||||
"vega-container-registry": {
|
||||
"kind": "docker-registry",
|
||||
"server": "registry.vegastar.vn",
|
||||
"username": "deployer",
|
||||
"password": "13814592a688195094d0ee38c995245053af26ca"
|
||||
}
|
||||
}
|
||||
}
|
||||
14
.fission/staging-deployment.json
Normal file
14
.fission/staging-deployment.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"namespace": "default",
|
||||
"secrets": {
|
||||
"fission-ai-work-env": {
|
||||
"literals": [
|
||||
"PG_HOST=160.30.113.113",
|
||||
"PG_PORT=45432",
|
||||
"PG_DB=postgres",
|
||||
"PG_USER=postgres",
|
||||
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
14
.fission/test-deployment.json
Normal file
14
.fission/test-deployment.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"namespace": "default",
|
||||
"secrets": {
|
||||
"fission-ai-work-env": {
|
||||
"literals": [
|
||||
"PG_HOST=160.30.113.113",
|
||||
"PG_PORT=45432",
|
||||
"PG_DB=postgres",
|
||||
"PG_USER=postgres",
|
||||
"PG_PASS=q2q32RQx9R9qVAp3vkVrrASnSUUhzKvC"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
30
.gitea/workflows/analystic-dispatch.yaml
Normal file
30
.gitea/workflows/analystic-dispatch.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
name: "K8S Fission Code Analystics"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
sonarqube:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: 🔍 SonarQube Scan
|
||||
id: scan
|
||||
uses: sonarsource/sonarqube-scan-action@master
|
||||
env:
|
||||
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
|
||||
SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }}
|
||||
with:
|
||||
args: >
|
||||
-Dsonar.projectKey=${{ github.event.repository.name }} -Dsonar.sources=.
|
||||
- name: 🔔 Send notification
|
||||
uses: appleboy/telegram-action@master
|
||||
if: always()
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_TO }}
|
||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||
format: markdown
|
||||
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||
message: |
|
||||
${{ steps.scan.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Scanned ${{ github.event.repository.name }}
|
||||
*Msg*: `${{ github.event.commits[0].message }}`
|
||||
69
.gitea/workflows/dev-deployment.yaml
Normal file
69
.gitea/workflows/dev-deployment.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
name: "K8S Fission Deployment"
|
||||
on:
|
||||
push:
|
||||
branches: [ 'main' ]
|
||||
jobs:
|
||||
deployment-fission:
|
||||
name: Deployment fission functions
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
FISSION_PROFILE: DEV
|
||||
FISSION_VER: 1.21.0
|
||||
RAKE_VER: 0.1.7
|
||||
steps:
|
||||
- name: ☸️ Setup kubectl
|
||||
uses: azure/setup-kubectl@v4
|
||||
- name: 🔄 Cache
|
||||
id: cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/local/bin/rake
|
||||
/usr/local/bin/fission
|
||||
key: ${{ runner.os }}-${{ github.event.repository.name }}-${{ hashFiles('.fission/deployment.json') }}
|
||||
- name: ☘️ Configure Kubeconfig
|
||||
uses: azure/k8s-set-context@v4
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', env.FISSION_PROFILE)] }}
|
||||
- name: 🔄 Install Dependencies
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
curl -L "https://${{ secrets.REGISTRY_PASSWORD }}@registry.vegastar.vn/vegacloud/make/releases/download/${RAKE_VER}/rake-${RAKE_VER}-x86_64-unknown-linux-musl.tar.gz" | tar xzv -C /tmp/
|
||||
curl -L "https://github.com/fission/fission/releases/download/v${FISSION_VER}/fission-v${FISSION_VER}-linux-amd64" --output /tmp/fission
|
||||
install -o root -g root -m 0755 /tmp/rake-${RAKE_VER}-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||
fission check
|
||||
# rake cfg install fission -f
|
||||
- name: 🕓 Checkout the previous codes
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.before }}
|
||||
- name: ♻️ Remove the previous version
|
||||
# continue-on-error: true
|
||||
run: |
|
||||
echo "use profile [$FISSION_PROFILE]"
|
||||
mkdir -p manifests || true
|
||||
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||
rake sp build -fi && rake sp down -i
|
||||
- name: 🔎 Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: ✨ Deploy the new version
|
||||
id: deploy
|
||||
run: |
|
||||
echo "use profile [$FISSION_PROFILE]"
|
||||
mkdir -p manifests || true
|
||||
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||
rake sp build -fi && rake sp up -i
|
||||
- name: 🔔 Send notification
|
||||
uses: appleboy/telegram-action@master
|
||||
if: always()
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_TO }}
|
||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||
format: markdown
|
||||
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||
message: |
|
||||
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Install fn ${{ github.event.repository.name }}
|
||||
*Msg*: `${{ github.event.commits[0].message }}`
|
||||
74
.gitea/workflows/install-dispatch.yaml
Normal file
74
.gitea/workflows/install-dispatch.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
name: "K8S Fission Deployment"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile:
|
||||
description: "Profile to execute to"
|
||||
required: true
|
||||
type: choice
|
||||
options: [ DEV, TEST, STAGING ]
|
||||
jobs:
|
||||
deployment-fission:
|
||||
name: Deployment fission functions
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
FISSION_PROFILE: ${{ github.event.inputs.profile }}
|
||||
FISSION_VER: 1.21.0
|
||||
RAKE_VER: 0.1.7
|
||||
steps:
|
||||
- name: ☸️ Setup kubectl
|
||||
uses: azure/setup-kubectl@v4
|
||||
- name: ☘️ Configure Kubeconfig
|
||||
uses: azure/k8s-set-context@v4
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', github.event.inputs.profile)] }}
|
||||
- name: 🔄 Cache
|
||||
id: cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/local/bin/rake
|
||||
/usr/local/bin/fission
|
||||
key: ${{ runner.os }}-${{ github.event.repository.name }}-${{ hashFiles('.fission/deployment.json') }}
|
||||
- name: 🔄 Install Dependencies
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
curl -L "https://${{ secrets.REGISTRY_PASSWORD }}@registry.vegastar.vn/vegacloud/make/releases/download/${RAKE_VER}/rake-${RAKE_VER}-x86_64-unknown-linux-musl.tar.gz" | tar xzv -C /tmp/
|
||||
curl -L "https://github.com/fission/fission/releases/download/v${FISSION_VER}/fission-v${FISSION_VER}-linux-amd64" --output /tmp/fission
|
||||
install -o root -g root -m 0755 /tmp/rake-${RAKE_VER}-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||
fission check
|
||||
# rake cfg install fission -f
|
||||
- name: 🕓 Checkout the previous codes
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.before }}
|
||||
- name: ♻️ Remove the previous version
|
||||
# continue-on-error: true
|
||||
run: |
|
||||
echo "use profile [$FISSION_PROFILE]"
|
||||
mkdir -p manifests || true
|
||||
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||
rake sp build -fi && rake sp down -i
|
||||
- name: 🔎 Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: ✨ Deploy the new version
|
||||
id: deploy
|
||||
run: |
|
||||
echo "use profile [$FISSION_PROFILE]"
|
||||
mkdir -p manifests || true
|
||||
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||
rake sp build -fi && rake sp up -i
|
||||
- name: 🔔 Send notification
|
||||
uses: appleboy/telegram-action@master
|
||||
if: always()
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_TO }}
|
||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||
format: markdown
|
||||
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||
message: |
|
||||
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Install fn ${{ github.event.repository.name }}
|
||||
*Msg*: `${{ github.event.commits[0].message }}`
|
||||
79
.gitea/workflows/staging-deployment.yaml
Normal file
79
.gitea/workflows/staging-deployment.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
name: "K8S Fission Deployment"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile:
|
||||
description: "Profile to execute to"
|
||||
required: true
|
||||
type: choice
|
||||
options: [ DEV, TEST, STAGING ]
|
||||
jobs:
|
||||
deployment-fission:
|
||||
name: Deployment fission functions
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
steps:
|
||||
- name: 🍀 Extract branch name
|
||||
run: echo "K8S_PROFILE=`echo ${GITHUB_REF_NAME:-${GITHUB_REF#refs/heads/}} | tr '[:lower:]' '[:upper:]'`" >> $GITHUB_ENV
|
||||
- name: ☸️ Setup kubectl
|
||||
uses: azure/setup-kubectl@v4
|
||||
- name: 🛠️ Configure Kubeconfig
|
||||
uses: azure/k8s-set-context@v4
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', env.K8S_PROFILE)] }}
|
||||
- name: ⇩ Download PyMake CLI
|
||||
uses: ethanjli/cached-download-action@v0.1.7
|
||||
with:
|
||||
url: https://bin.rhosted.com/RhdpPK.py
|
||||
destination: /tmp/pymake
|
||||
cache-key: pymake-cli
|
||||
- name: ⇩ Download fission CLI
|
||||
uses: ethanjli/cached-download-action@v0.1.7
|
||||
with:
|
||||
url: https://github.com/fission/fission/releases/download/v1.21.0/fission-v1.21.0-linux-amd64
|
||||
destination: /tmp/fission
|
||||
cache-key: fission-cli
|
||||
- name: 🔨 Install Tools
|
||||
run: |
|
||||
# curl -L "https://github.com/fission/fission/releases/download/v1.21.0/fission-v1.21.0-linux-amd64" --output /tmp/fission
|
||||
install -o root -g root -m 0755 /tmp/pymake /usr/local/bin/pymake
|
||||
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||
fission check
|
||||
- name: cache fission cli
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
/usr/local/bin/fission
|
||||
key: go_path-${{ steps.hash-go.outputs.hash }}
|
||||
restore-keys: |-
|
||||
go_cache-${{ steps.hash-go.outputs.hash }}
|
||||
- name: 👀 Checkout the previous codes
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.before }}
|
||||
- name: 🧹 Remove the previous version
|
||||
continue-on-error: true
|
||||
run: |
|
||||
fission specs destroy --force
|
||||
kubectl delete -R -f manifests
|
||||
- name: 👀 Checkout repository
|
||||
id: checkout-new-code
|
||||
if: always()
|
||||
uses: actions/checkout@v4
|
||||
- name: ✨ Deploy the new version
|
||||
id: deploy
|
||||
run: |
|
||||
kubectl apply -R -f manifests
|
||||
fission specs apply --wait
|
||||
- name: 🔔 Send notification
|
||||
uses: appleboy/telegram-action@master
|
||||
if: always() # This ensures the step runs even if previous steps fail
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_TO }}
|
||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||
format: markdown
|
||||
message: |
|
||||
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} ${{ github.repository }}
|
||||
*Commit*: ${{ github.event.commits[0].message }}
|
||||
63
.gitea/workflows/uninstall-dispatch.yaml
Normal file
63
.gitea/workflows/uninstall-dispatch.yaml
Normal file
@@ -0,0 +1,63 @@
|
||||
name: "K8S Fission Deployment"
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile:
|
||||
description: "Profile to execute to"
|
||||
required: true
|
||||
type: choice
|
||||
options: [ DEV, TEST, STAGING ]
|
||||
jobs:
|
||||
destroy-fission:
|
||||
name: Destroy fission functions
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
RUNNER_TOOL_CACHE: /toolcache
|
||||
FISSION_PROFILE: ${{ github.event.inputs.profile }}
|
||||
FISSION_VER: 1.21.0
|
||||
RAKE_VER: 0.1.7
|
||||
steps:
|
||||
- name: ☸️ Setup kubectl
|
||||
uses: azure/setup-kubectl@v4
|
||||
- name: ☘️ Configure Kubeconfig
|
||||
uses: azure/k8s-set-context@v4
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets[format('{0}_KUBECONFIG', github.event.inputs.profile)] }}
|
||||
- name: 🔄 Cache
|
||||
id: cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/local/bin/rake
|
||||
/usr/local/bin/fission
|
||||
key: ${{ runner.os }}-${{ github.event.repository.name }}-${{ hashFiles('.fission/deployment.json') }}
|
||||
- name: 🔄 Install Dependencies
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
curl -L "https://${{ secrets.REGISTRY_PASSWORD }}@registry.vegastar.vn/vegacloud/make/releases/download/${RAKE_VER}/rake-${RAKE_VER}-x86_64-unknown-linux-musl.tar.gz" | tar xzv -C /tmp/
|
||||
curl -L "https://github.com/fission/fission/releases/download/v${FISSION_VER}/fission-v${FISSION_VER}-linux-amd64" --output /tmp/fission
|
||||
install -o root -g root -m 0755 /tmp/rake-${RAKE_VER}-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
|
||||
install -o root -g root -m 0755 /tmp/fission /usr/local/bin/fission
|
||||
fission check
|
||||
# rake cfg install fission -f
|
||||
- name: 🔎 Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: ♻️ Remove
|
||||
id: deploy
|
||||
run: |
|
||||
echo "use profile [$FISSION_PROFILE]"
|
||||
mkdir -p manifests || true
|
||||
rake sec detail && rake cfm detail && rake env detail && rake pkg detail && rake fn detail && rake ht detail
|
||||
rake sp build -fi && rake sp down -i
|
||||
- name: 🔔 Send notification
|
||||
uses: appleboy/telegram-action@master
|
||||
if: always()
|
||||
with:
|
||||
to: ${{ secrets.TELEGRAM_TO }}
|
||||
token: ${{ secrets.TELEGRAM_TOKEN }}
|
||||
format: markdown
|
||||
socks5: ${{ secrets.TELEGRAM_PROXY_URL != '' && secrets.TELEGRAM_PROXY_URL || '' }}
|
||||
message: |
|
||||
${{ steps.deploy.outcome == 'success' && '🟢 (=^ ◡ ^=)' || '🔴 (。•́︿•̀。)' }} Uninstall fn ${{ github.event.repository.name }}
|
||||
*Msg*: `${{ github.event.commits[0].message }}`
|
||||
191
.gitignore
vendored
Normal file
191
.gitignore
vendored
Normal file
@@ -0,0 +1,191 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
# *.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
||||
.pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
## Ignore Temporary directory of Dagster
|
||||
/tmp*
|
||||
|
||||
## Devcontainer cache files, that will make devcontainer start faster after first run
|
||||
/.vscache/.vscode-server/*
|
||||
!/.vscache/.vscode-server/.gitkeep
|
||||
/.vscache/.devcontainer/*
|
||||
!/.vscache/.devcontainer/.gitkeep
|
||||
|
||||
## Ignore K3S config file
|
||||
/.devcontainer/kubeconfig.yaml
|
||||
|
||||
## Ignore packaged files
|
||||
/*.zip
|
||||
# !/package.zip
|
||||
/*.bak
|
||||
|
||||
## Ignore Makefile, it will come with `pymake` package
|
||||
Makefile
|
||||
|
||||
## Ignore fission's specs files
|
||||
/specs/*
|
||||
!/specs/fission-deployment-config.yaml
|
||||
!/specs/README
|
||||
|
||||
/manifests/*
|
||||
|
||||
/fission-dumps
|
||||
112
ai_test.openapi.yaml
Normal file
112
ai_test.openapi.yaml
Normal file
@@ -0,0 +1,112 @@
|
||||
openapi: 3.0.1
|
||||
info:
|
||||
title: ai_test
|
||||
description: ''
|
||||
version: 1.0.0
|
||||
tags: []
|
||||
paths:
|
||||
/ai/admin/users:
|
||||
get:
|
||||
summary: filter
|
||||
deprecated: false
|
||||
description: ''
|
||||
tags: []
|
||||
parameters: []
|
||||
responses:
|
||||
'200':
|
||||
description: ''
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties: {}
|
||||
security: []
|
||||
post:
|
||||
summary: create
|
||||
deprecated: false
|
||||
description: ''
|
||||
tags: []
|
||||
parameters: []
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
dob:
|
||||
type: string
|
||||
email:
|
||||
type: string
|
||||
gender:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- dob
|
||||
- email
|
||||
- gender
|
||||
example:
|
||||
name: Duc Nguyen
|
||||
dob: '2002-09-25'
|
||||
email: ducit2509@gmail.com
|
||||
gender: MALE
|
||||
responses:
|
||||
'200':
|
||||
description: ''
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties: {}
|
||||
security: []
|
||||
/:
|
||||
get:
|
||||
summary: delete
|
||||
deprecated: false
|
||||
description: ''
|
||||
tags: []
|
||||
parameters: []
|
||||
responses:
|
||||
'200':
|
||||
description: ''
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties: {}
|
||||
security: []
|
||||
components:
|
||||
schemas: {}
|
||||
responses:
|
||||
Record not found:
|
||||
description: ''
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
code:
|
||||
type: integer
|
||||
message:
|
||||
type: string
|
||||
required:
|
||||
- code
|
||||
- message
|
||||
Invalid input:
|
||||
description: ''
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
code:
|
||||
type: integer
|
||||
message:
|
||||
type: string
|
||||
required:
|
||||
- code
|
||||
- message
|
||||
securitySchemes: {}
|
||||
servers: []
|
||||
security: []
|
||||
0
apps/__init__.py
Normal file
0
apps/__init__.py
Normal file
15
apps/build.sh
Executable file
15
apps/build.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/bin/sh
|
||||
ID=$( grep "^ID=" /etc/os-release | awk -F= '{print $2}' )
|
||||
|
||||
if [ "${ID}" = "debian" ]
|
||||
then
|
||||
apt-get update && apt-get install -y gcc libpq-dev python3-dev
|
||||
else
|
||||
apk update && apk add gcc postgresql-dev python3-dev
|
||||
fi
|
||||
|
||||
if [ -f ${SRC_PKG}/requirements.txt ]
|
||||
then
|
||||
pip3 install -r ${SRC_PKG}/requirements.txt -t ${SRC_PKG}
|
||||
fi
|
||||
cp -r ${SRC_PKG} ${DEPLOY_PKG}
|
||||
238
apps/filter_insert.py
Normal file
238
apps/filter_insert.py
Normal file
@@ -0,0 +1,238 @@
|
||||
import dataclasses
|
||||
import enum
|
||||
import typing
|
||||
import uuid
|
||||
|
||||
from flask import current_app, jsonify, request
|
||||
from helpers import (
|
||||
CORS_HEADERS,
|
||||
db_row_to_dict,
|
||||
db_rows_to_array,
|
||||
init_db_connection,
|
||||
str_to_bool,
|
||||
)
|
||||
from psycopg2 import IntegrityError
|
||||
from pydantic_core import ValidationError
|
||||
from schemas import AiUserCreate
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
```fission
|
||||
{
|
||||
"name": "ai-admin-filter-create-user",
|
||||
"fntimeout": 300,
|
||||
"http_triggers": {
|
||||
"ai-admin-filter-create-user-http": {
|
||||
"url": "/ai/admin/users",
|
||||
"methods": ["POST", "GET"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
if request.method == "GET":
|
||||
return make_filter_request()
|
||||
elif request.method == "POST":
|
||||
return make_insert_request()
|
||||
else:
|
||||
return {"error": "Method not allow"}, 405, CORS_HEADERS
|
||||
except Exception as err:
|
||||
print(f"ErrorType={type(err)}")
|
||||
return {"error": str(err)}, 500, CORS_HEADERS
|
||||
|
||||
|
||||
def make_insert_request():
|
||||
try:
|
||||
body = AiUserCreate(**(request.get_json(silent=True) or {}))
|
||||
except ValidationError as e:
|
||||
return jsonify({"errorCode": "VALIDATION_ERROR", "details": e.errors()}), 400, CORS_HEADERS
|
||||
|
||||
sql = """
|
||||
INSERT INTO public.ai_user (id, name, dob, email, gender)
|
||||
VALUES (%s, %s, %s, %s, %s)
|
||||
RETURNING id, name, dob, email, gender, created, modified
|
||||
"""
|
||||
conn = None
|
||||
try:
|
||||
conn = init_db_connection()
|
||||
with conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(sql, (str(uuid.uuid4()), body.name,
|
||||
body.dob, body.email, body.gender))
|
||||
row = cur.fetchone()
|
||||
return jsonify(db_row_to_dict(cur, row)), 201, CORS_HEADERS
|
||||
except IntegrityError as e:
|
||||
# vi phạm unique(tag,kind,ref)
|
||||
return jsonify({"errorCode": "DUPLICATE_TAG", "details": str(e)}), 409, CORS_HEADERS
|
||||
except Exception as err:
|
||||
return jsonify({"error": str(err)}), 500, CORS_HEADERS
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
|
||||
def make_filter_request():
|
||||
paging = UserPage.from_request_queries()
|
||||
|
||||
conn = None
|
||||
try:
|
||||
conn = init_db_connection()
|
||||
with conn.cursor() as cursor:
|
||||
records = __filter_users(cursor, paging)
|
||||
return jsonify(records)
|
||||
finally:
|
||||
if conn is not None:
|
||||
conn.close()
|
||||
current_app.logger.info("Close DB connection")
|
||||
|
||||
|
||||
def __filter_users(cursor, paging: "UserPage"):
|
||||
conditions = []
|
||||
values = {}
|
||||
|
||||
if paging.filter.ids:
|
||||
conditions.append("id = ANY(%(ids)s)")
|
||||
values["ids"] = paging.filter.ids
|
||||
|
||||
if paging.filter.keyword:
|
||||
conditions.append("(name ILIKE %(q)s OR email ILIKE %(q)s)")
|
||||
values["keyword"] = f"%{paging.filter.keyword.lower()}%"
|
||||
|
||||
if paging.filter.name:
|
||||
conditions.append("LOWER(name) LIKE %(name)s")
|
||||
values["name"] = f"%{paging.filter.name.lower()}%"
|
||||
|
||||
if paging.filter.email:
|
||||
conditions.append("LOWER(email) LIKE %(email)s")
|
||||
values["email"] = f"%{paging.filter.email.lower()}%"
|
||||
|
||||
if paging.filter.created_from:
|
||||
conditions.append("created >= %(created_from)s")
|
||||
values["created_from"] = paging.filter.created_from
|
||||
|
||||
if paging.filter.created_to:
|
||||
conditions.append("created <= %(created_to)s")
|
||||
values["created_to"] = paging.filter.created_to
|
||||
|
||||
if paging.filter.modified_from:
|
||||
conditions.append("modified >= %(modified_from)s")
|
||||
values["modified_from"] = paging.filter.modified_from
|
||||
|
||||
if paging.filter.modified_to:
|
||||
conditions.append("modified <= %(modified_to)s")
|
||||
values["modified_to"] = paging.filter.modified_to
|
||||
|
||||
if paging.filter.dob_from:
|
||||
conditions.append("dob >= %(dob_from)s")
|
||||
values["dob_from"] = paging.filter.dob_from
|
||||
|
||||
if paging.filter.dob_to:
|
||||
conditions.append("dob <= %(dob_to)s")
|
||||
values["dob_to"] = paging.filter.dob_to
|
||||
|
||||
where_clause = " AND ".join(conditions)
|
||||
if where_clause:
|
||||
where_clause = "WHERE " + where_clause
|
||||
|
||||
order_clause = ""
|
||||
if paging.sortby:
|
||||
direction = "ASC" if paging.asc else "DESC"
|
||||
order_clause = f" ORDER BY {paging.sortby.value} {direction} "
|
||||
|
||||
sql = f"""
|
||||
SELECT
|
||||
t.*,
|
||||
(
|
||||
SELECT COUNT(*)
|
||||
FROM ai_user r
|
||||
),
|
||||
count(*) OVER() AS total
|
||||
FROM ai_user t
|
||||
{where_clause}
|
||||
{order_clause}
|
||||
LIMIT %(limit)s OFFSET %(offset)s
|
||||
"""
|
||||
values["limit"] = paging.size
|
||||
values["offset"] = paging.page * paging.size
|
||||
|
||||
cursor.execute(sql, values)
|
||||
rows = cursor.fetchall()
|
||||
return db_rows_to_array(cursor, rows)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class Page:
|
||||
page: typing.Optional[int] = None
|
||||
size: typing.Optional[int] = None
|
||||
asc: typing.Optional[bool] = None
|
||||
|
||||
@classmethod
|
||||
def from_request_queries(cls) -> "Page":
|
||||
paging = Page()
|
||||
paging.page = int(request.args.get("page", 0))
|
||||
paging.size = int(request.args.get("size", 8))
|
||||
paging.asc = request.args.get("asc", type=str_to_bool)
|
||||
return paging
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class UserFilter:
|
||||
ids: typing.Optional[typing.List[str]] = None
|
||||
keyword: typing.Optional[str] = None
|
||||
name: typing.Optional[str] = None
|
||||
email: typing.Optional[str] = None
|
||||
gender: typing.Optional[str] = None
|
||||
created_from: typing.Optional[str] = None
|
||||
created_to: typing.Optional[str] = None
|
||||
modified_from: typing.Optional[str] = None
|
||||
modified_to: typing.Optional[str] = None
|
||||
dob_from: typing.Optional[str] = None
|
||||
dob_to: typing.Optional[str] = None
|
||||
|
||||
@classmethod
|
||||
def from_request_queries(cls) -> "UserFilter":
|
||||
filter = UserFilter()
|
||||
filter.ids = request.args.getlist("filter[ids]")
|
||||
filter.keyword = request.args.get("filter[keyword]")
|
||||
filter.name = request.args.get("filter[name]")
|
||||
filter.email = request.args.get("filter[email]")
|
||||
filter.gender = request.args.get("filter[gender]")
|
||||
filter.created_to = request.args.get("filter[created_to]")
|
||||
filter.created_from = request.args.get("filter[created_from]")
|
||||
filter.modified_from = request.args.get("filter[modified_from]")
|
||||
filter.modified_to = request.args.get("filter[modified_to]")
|
||||
filter.dob_from = request.args.get("filter[dob_from]")
|
||||
filter.dob_to = request.args.get("filter[dob_to]")
|
||||
return filter
|
||||
|
||||
|
||||
class UserSortField(str, enum.Enum):
|
||||
CREATED = "created"
|
||||
MODIFIED = "modified"
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class UserPage(Page):
|
||||
sortby: typing.Optional[UserSortField] = None
|
||||
filter: typing.Optional[UserFilter] = dataclasses.field(
|
||||
default_factory=UserFilter.from_request_queries
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_request_queries(cls) -> "UserPage":
|
||||
base = super(UserPage, cls).from_request_queries()
|
||||
paging = UserPage(**dataclasses.asdict(base))
|
||||
|
||||
sortby = request.args.get("sortby")
|
||||
if sortby:
|
||||
try:
|
||||
paging.sortby = UserSortField[sortby.upper()]
|
||||
except KeyError:
|
||||
try:
|
||||
paging.sortby = UserSortField(sortby)
|
||||
except ValueError:
|
||||
paging.sortby = None
|
||||
|
||||
return paging
|
||||
106
apps/helpers.py
Normal file
106
apps/helpers.py
Normal file
@@ -0,0 +1,106 @@
|
||||
import datetime
|
||||
import logging
|
||||
import socket
|
||||
|
||||
import psycopg2
|
||||
from flask import current_app
|
||||
from psycopg2.extras import LoggingConnection
|
||||
|
||||
CORS_HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
SECRET_NAME = "fission-ai-work-env"
|
||||
CONFIG_NAME = "fission-ai-work-config"
|
||||
K8S_NAMESPACE = "default"
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def init_db_connection():
|
||||
db_host = get_secret("PG_HOST", "127.0.0.1")
|
||||
db_port = int(get_secret("PG_PORT", 5432))
|
||||
|
||||
if not check_port_open(ip=db_host, port=db_port):
|
||||
raise Exception(f"Establishing A Database Connection. `{db_host}:{db_port}`")
|
||||
|
||||
# options = get_secret("PG_DBSCHEMA")
|
||||
# if options:
|
||||
# options = f"-c search_path={options}" # if specific db schema
|
||||
conn = psycopg2.connect(
|
||||
database=get_secret("PG_DB", "postgres"),
|
||||
user=get_secret("PG_USER", "postgres"),
|
||||
password=get_secret("PG_PASS", "secret"),
|
||||
host=get_secret("PG_HOST", "127.0.0.1"),
|
||||
port=int(get_secret("PG_PORT", 5432)),
|
||||
# options=options,
|
||||
# cursor_factory=NamedTupleCursor,
|
||||
connection_factory=LoggingConnection,
|
||||
)
|
||||
conn.initialize(logger)
|
||||
return conn
|
||||
|
||||
|
||||
def db_row_to_dict(cursor, row):
|
||||
record = {}
|
||||
for i, column in enumerate(cursor.description):
|
||||
data = row[i]
|
||||
if isinstance(data, datetime.datetime):
|
||||
data = data.isoformat()
|
||||
record[column.name] = data
|
||||
return record
|
||||
|
||||
|
||||
def db_rows_to_array(cursor, rows):
|
||||
return [db_row_to_dict(cursor, row) for row in rows]
|
||||
|
||||
|
||||
def get_current_namespace() -> str:
|
||||
try:
|
||||
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
|
||||
namespace = f.read()
|
||||
except Exception as err:
|
||||
current_app.logger.error(err)
|
||||
namespace = K8S_NAMESPACE
|
||||
return str(namespace)
|
||||
|
||||
|
||||
def get_secret(key: str, default=None):
|
||||
namespace = get_current_namespace()
|
||||
path = f"/secrets/{namespace}/{SECRET_NAME}/{key}"
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
return f.read()
|
||||
except Exception as err:
|
||||
current_app.logger.error(path, err)
|
||||
return default
|
||||
|
||||
|
||||
def get_config(key: str, default=None):
|
||||
namespace = get_current_namespace()
|
||||
path = f"/configs/{namespace}/{CONFIG_NAME}/{key}"
|
||||
try:
|
||||
with open(path, "r") as f:
|
||||
return f.read()
|
||||
except Exception as err:
|
||||
current_app.logger.error(path, err)
|
||||
return default
|
||||
|
||||
|
||||
def str_to_bool(input: str | None) -> bool:
|
||||
input = input or ""
|
||||
# Dictionary to map string values to boolean
|
||||
BOOL_MAP = {"true": True, "false": False}
|
||||
return BOOL_MAP.get(input.strip().lower(), None)
|
||||
|
||||
|
||||
def check_port_open(ip: str, port: int, timeout: int = 30):
|
||||
try:
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(timeout)
|
||||
result = s.connect_ex((ip, port))
|
||||
return result == 0
|
||||
except Exception as err:
|
||||
current_app.logger.err(f"Check port open error: {err}")
|
||||
return False
|
||||
4
apps/requirements.txt
Normal file
4
apps/requirements.txt
Normal file
@@ -0,0 +1,4 @@
|
||||
psycopg2-binary==2.9.10
|
||||
pydantic==2.11.7
|
||||
PyNaCl==1.6.0
|
||||
Flask==3.1.0
|
||||
50
apps/schemas.py
Normal file
50
apps/schemas.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import re
|
||||
from datetime import date
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class AiUserCreate(BaseModel):
|
||||
id: Optional[str] = None
|
||||
name: str = Field(min_length=1, max_length=128)
|
||||
email: str = Field(..., max_length=256)
|
||||
dob: Optional[date] = None
|
||||
gender: Optional[str] = Field(default=None, max_length=10)
|
||||
|
||||
@field_validator("email")
|
||||
def validate_email(cls, v):
|
||||
if not re.match(r"^[^@\s]+@[^@\s]+\.[^@\s]+$", v):
|
||||
raise ValueError("invalid email")
|
||||
return v
|
||||
|
||||
|
||||
class AiUserUpdate(BaseModel):
|
||||
name: Optional[str] = Field(default=None, min_length=1, max_length=128)
|
||||
email: Optional[str] = Field(default=None, max_length=256)
|
||||
dob: Optional[date] = None
|
||||
gender: Optional[str] = Field(default=None, max_length=10)
|
||||
|
||||
@field_validator("email")
|
||||
def validate_email(cls, v):
|
||||
if not re.match(r"^[^@\s]+@[^@\s]+\.[^@\s]+$", v):
|
||||
raise ValueError("invalid email")
|
||||
return v
|
||||
|
||||
|
||||
class AiUserFilter(BaseModel):
|
||||
q: Optional[str] = None
|
||||
name: Optional[str] = None
|
||||
email: Optional[str] = None
|
||||
gender: Optional[str] = None
|
||||
|
||||
dob_from: Optional[date] = None
|
||||
dob_to: Optional[date] = None
|
||||
|
||||
created_from: Optional[str] = None
|
||||
created_to: Optional[str] = None
|
||||
|
||||
page: int = Field(default=0, ge=0)
|
||||
size: int = Field(default=20, ge=1, le=200)
|
||||
sortby: str = "modified"
|
||||
asc: bool = False
|
||||
128
apps/update_delete.py
Normal file
128
apps/update_delete.py
Normal file
@@ -0,0 +1,128 @@
|
||||
|
||||
|
||||
from flask import current_app, jsonify, request
|
||||
from helpers import CORS_HEADERS, db_row_to_dict, init_db_connection
|
||||
from psycopg2 import IntegrityError
|
||||
from pydantic_core import ValidationError
|
||||
from schemas import AiUserUpdate
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
```fission
|
||||
{
|
||||
"name": "ai-admin-update-delete-user",
|
||||
"fntimeout": 300,
|
||||
"http_triggers": {
|
||||
"ai-admin-update-delete-user-http": {
|
||||
"url": "/ai/admin/users/{UserID}",
|
||||
"methods": ["DELETE", "PUT"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
try:
|
||||
if request.method == "DELETE":
|
||||
return make_delete_request()
|
||||
elif request.method == "PUT":
|
||||
return make_update_request()
|
||||
else:
|
||||
return {"error": "Method not allow"}, 405, CORS_HEADERS
|
||||
except Exception as err:
|
||||
print(f"ErrorType={type(err)}")
|
||||
return {"error": str(err)}, 500, CORS_HEADERS
|
||||
|
||||
|
||||
def make_update_request():
|
||||
user_id = request.headers.get("X-Fission-Params-UserID")
|
||||
if not user_id:
|
||||
return jsonify({"errorCode": "MISSING_USER_ID"}), 400, CORS_HEADERS
|
||||
|
||||
try:
|
||||
body = AiUserUpdate(**(request.get_json(silent=True) or {}))
|
||||
except ValidationError as e:
|
||||
return (
|
||||
jsonify({"error": "Validation failed", "details": e.errors()}),
|
||||
400,
|
||||
CORS_HEADERS,
|
||||
)
|
||||
conn = None
|
||||
try:
|
||||
conn = init_db_connection()
|
||||
with conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"SELECT * FROM ai_user WHERE id=%s FOR UPDATE", (user_id,))
|
||||
row = cur.fetchone()
|
||||
if not row:
|
||||
return jsonify({"errorCode": "USER_NOT_FOUND"}), 404, CORS_HEADERS
|
||||
|
||||
|
||||
sets, params = [], {"id": user_id}
|
||||
if body.name is not None:
|
||||
sets.append("name=%(name)s")
|
||||
params["name"] = body.name
|
||||
|
||||
if body.email is not None:
|
||||
sets.append("email=%(email)s")
|
||||
params["email"] = body.email
|
||||
|
||||
if body.dob is not None:
|
||||
sets.append("dob=%(dob)s")
|
||||
params["dob"] = body.dob
|
||||
|
||||
if body.gender is not None:
|
||||
sets.append("gender=%(gender)s")
|
||||
params["gender"] = body.gender
|
||||
|
||||
sets.append("modified=CURRENT_TIMESTAMP")
|
||||
cur.execute(
|
||||
f"UPDATE ai_user SET {', '.join(sets)} WHERE id=%(id)s RETURNING *",
|
||||
params,
|
||||
)
|
||||
updated = db_row_to_dict(cur, cur.fetchone())
|
||||
return jsonify(updated), 200, CORS_HEADERS
|
||||
except IntegrityError as e:
|
||||
return (
|
||||
jsonify({"errorCode": "DUPLICATE_USER", "details": str(e)}),
|
||||
409,
|
||||
CORS_HEADERS,
|
||||
)
|
||||
finally:
|
||||
if conn:
|
||||
conn.close()
|
||||
|
||||
|
||||
|
||||
|
||||
def __delete_user(cursor, id: str):
|
||||
cursor.execute("SELECT 1 FROM ai_user WHERE id = %(id)s", {"id": id})
|
||||
if not cursor.fetchone():
|
||||
return "USER_NOT_FOUND"
|
||||
|
||||
cursor.execute("DELETE FROM ai_user WHERE id = %(id)s RETURNING *", {"id": id})
|
||||
row = cursor.fetchone()
|
||||
return db_row_to_dict(cursor, row)
|
||||
|
||||
def make_delete_request():
|
||||
|
||||
user_id = request.headers.get("X-Fission-Params-UserID")
|
||||
if not user_id:
|
||||
return jsonify({"errorCode": "MISSING_USER_ID"}), 400, CORS_HEADERS
|
||||
|
||||
conn = None
|
||||
try:
|
||||
conn = init_db_connection()
|
||||
with conn.cursor() as cursor:
|
||||
result = __delete_user(cursor, id=user_id)
|
||||
if result == "USER_NOT_FOUND":
|
||||
return jsonify({"errorCode": "USER_NOT_FOUND"}), 404
|
||||
conn.commit()
|
||||
return jsonify(result), 200
|
||||
except Exception as ex:
|
||||
return jsonify({"error": str(ex)}), 500
|
||||
finally:
|
||||
if conn is not None:
|
||||
conn.close()
|
||||
current_app.logger.info("Close DB connection")
|
||||
142
apps/vault.py
Normal file
142
apps/vault.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import base64
|
||||
|
||||
import nacl.secret
|
||||
|
||||
|
||||
def string_to_hex(text: str) -> str:
|
||||
"""
|
||||
Convert a string to hexadecimal representation.
|
||||
|
||||
Args:
|
||||
text: Input string to convert
|
||||
|
||||
Returns:
|
||||
Hexadecimal string representation
|
||||
"""
|
||||
return text.encode("utf-8").hex()
|
||||
|
||||
|
||||
def hex_to_string(hex_string: str) -> str | None:
|
||||
"""
|
||||
Convert a hexadecimal string back to regular string.
|
||||
|
||||
Args:
|
||||
hex_string: Hexadecimal string to convert
|
||||
|
||||
Returns:
|
||||
Decoded string
|
||||
|
||||
Raises:
|
||||
ValueError: If hex_string is not valid hexadecimal
|
||||
"""
|
||||
return bytes.fromhex(hex_string).decode("utf-8")
|
||||
|
||||
|
||||
def decrypt_vault(vault: str, key: str) -> str:
|
||||
"""
|
||||
Decrypt a vault string encrypted with PyNaCl SecretBox.
|
||||
|
||||
Vault format: "vault:v1:<base64_encrypted_data>"
|
||||
|
||||
Args:
|
||||
vault: Vault-formatted string (e.g., "vault:v1:eW91cl9lbmNyeXB0ZWRfZGF0YQ==")
|
||||
key: Hex string representation of 32-byte encryption key
|
||||
|
||||
Returns:
|
||||
Decrypted string
|
||||
|
||||
Raises:
|
||||
ValueError: If vault format is invalid or key is not valid hex
|
||||
nacl.exceptions.CryptoError: If decryption fails (wrong key or corrupted data)
|
||||
"""
|
||||
# Parse vault format
|
||||
parts = vault.split(":", 2)
|
||||
if len(parts) != 3 or parts[0] != "vault" or parts[1] != "v1":
|
||||
raise ValueError("Invalid vault format. Expected 'vault:v1:<encrypted_data>'")
|
||||
|
||||
encrypted_string = parts[2]
|
||||
# Convert hex string key to bytes
|
||||
key_bytes = bytes.fromhex(key)
|
||||
|
||||
# Create a SecretBox instance with the key
|
||||
box = nacl.secret.SecretBox(key_bytes)
|
||||
|
||||
# Decode the base64-encoded encrypted string
|
||||
encrypted_data = base64.b64decode(encrypted_string)
|
||||
|
||||
# Decrypt the data
|
||||
decrypted_bytes = box.decrypt(encrypted_data)
|
||||
|
||||
# Convert bytes to string
|
||||
return decrypted_bytes.decode("utf-8")
|
||||
|
||||
|
||||
def encrypt_vault(plaintext: str, key: str) -> str:
|
||||
"""
|
||||
Encrypt a string and return it in vault format.
|
||||
|
||||
Args:
|
||||
plaintext: String to encrypt
|
||||
key: Hex string representation of 32-byte encryption key
|
||||
|
||||
Returns:
|
||||
Vault-formatted encrypted string (e.g., "vault:v1:<encrypted_data>")
|
||||
|
||||
Raises:
|
||||
ValueError: If key is not valid hex string
|
||||
"""
|
||||
# Convert hex string key to bytes
|
||||
key_bytes = bytes.fromhex(key)
|
||||
|
||||
# Create a SecretBox instance with the key
|
||||
box = nacl.secret.SecretBox(key_bytes)
|
||||
|
||||
# Encrypt the data
|
||||
encrypted = box.encrypt(plaintext.encode("utf-8"))
|
||||
|
||||
# Encode to base64
|
||||
encrypted_string = base64.b64encode(encrypted).decode("utf-8")
|
||||
|
||||
# Return in vault format
|
||||
return f"vault:v1:{encrypted_string}"
|
||||
|
||||
|
||||
def is_valid_vault_format(vault: str) -> bool:
|
||||
"""
|
||||
Check if a string is in valid vault format.
|
||||
|
||||
Vault format: "vault:v1:<base64_encrypted_data>"
|
||||
|
||||
Args:
|
||||
vault: String to validate
|
||||
|
||||
Returns:
|
||||
True if the string matches vault format structure, False otherwise
|
||||
|
||||
Note:
|
||||
This only checks the format structure, not whether the data can be decrypted
|
||||
"""
|
||||
# Parse vault format
|
||||
parts = vault.split(":", 2)
|
||||
|
||||
# Check basic structure: vault:v1:<data>
|
||||
if len(parts) != 3 or parts[0] != "vault" or parts[1] != "v1":
|
||||
return False
|
||||
|
||||
encrypted_data = parts[2]
|
||||
|
||||
# Check if data part is not empty
|
||||
if not encrypted_data:
|
||||
return False
|
||||
|
||||
# Check if data is valid base64
|
||||
try:
|
||||
decoded = base64.b64decode(encrypted_data)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Check if decoded data has at least nonce bytes (24 bytes for NaCl)
|
||||
if len(decoded) < nacl.secret.SecretBox.NONCE_SIZE:
|
||||
return False
|
||||
|
||||
return True
|
||||
9
migrates/schema.sql
Normal file
9
migrates/schema.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
CREATE TABLE public.ai_user (
|
||||
id varchar(64) PRIMARY KEY NOT NULL,
|
||||
"name" varchar(128) NOT NULL,
|
||||
dob date NULL,
|
||||
email varchar(128) NOT NULL,
|
||||
gender varchar(10) NULL,
|
||||
created timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
modified timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
42
specs/README
Normal file
42
specs/README
Normal file
@@ -0,0 +1,42 @@
|
||||
|
||||
Fission Specs
|
||||
=============
|
||||
|
||||
This is a set of specifications for a Fission app. This includes functions,
|
||||
environments, and triggers; we collectively call these things "resources".
|
||||
|
||||
How to use these specs
|
||||
----------------------
|
||||
|
||||
These specs are handled with the 'fission spec' command. See 'fission spec --help'.
|
||||
|
||||
'fission spec apply' will "apply" all resources specified in this directory to your
|
||||
cluster. That means it checks what resources exist on your cluster, what resources are
|
||||
specified in the specs directory, and reconciles the difference by creating, updating or
|
||||
deleting resources on the cluster.
|
||||
|
||||
'fission spec apply' will also package up your source code (or compiled binaries) and
|
||||
upload the archives to the cluster if needed. It uses 'ArchiveUploadSpec' resources in
|
||||
this directory to figure out which files to archive.
|
||||
|
||||
You can use 'fission spec apply --watch' to watch for file changes and continuously keep
|
||||
the cluster updated.
|
||||
|
||||
You can add YAMLs to this directory by writing them manually, but it's easier to generate
|
||||
them. Use 'fission function create --spec' to generate a function spec,
|
||||
'fission environment create --spec' to generate an environment spec, and so on.
|
||||
|
||||
You can edit any of the files in this directory, except 'fission-deployment-config.yaml',
|
||||
which contains a UID that you should never change. To apply your changes simply use
|
||||
'fission spec apply'.
|
||||
|
||||
fission-deployment-config.yaml
|
||||
------------------------------
|
||||
|
||||
fission-deployment-config.yaml contains a UID. This UID is what fission uses to correlate
|
||||
resources on the cluster to resources in this directory.
|
||||
|
||||
All resources created by 'fission spec apply' are annotated with this UID. Resources on
|
||||
the cluster that are _not_ annotated with this UID are never modified or deleted by
|
||||
fission.
|
||||
|
||||
7
specs/fission-deployment-config.yaml
Normal file
7
specs/fission-deployment-config.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# This file is generated by the 'fission spec init' command.
|
||||
# See the README in this directory for background and usage information.
|
||||
# Do not edit the UID below: that will break 'fission spec apply'
|
||||
apiVersion: fission.io/v1
|
||||
kind: DeploymentConfig
|
||||
name: ai-work
|
||||
uid: 35893faa-b880-461d-a48e-879039ab2de6
|
||||
Reference in New Issue
Block a user