FirstPhone
Some checks failed
K8S Fission Deployment / Deployment fission functions (push) Failing after 21s

This commit is contained in:
QuangMinh_123
2025-12-04 15:41:22 +07:00
commit da64947bae
31 changed files with 1776 additions and 0 deletions

View File

@@ -0,0 +1,50 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/rust
{
"name": "fission:ailbl-tag",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
// "image": "mcr.microsoft.com/devcontainers/rust:0-1-bullseye",
// Use docker compose file
"dockerComposeFile": "docker-compose.yml",
"service": "devcontainer",
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}",
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Configure tool-specific properties.
"customizations": {
// Configure properties specific to VS Code.
"vscode": {
"settings": {
"terminal.integrated.defaultProfile.linux": "bash",
"python.formatting.provider": "black",
"python.formatting.blackPath": "/usr/local/py-utils/bin/black"
},
"extensions": [
// VS Code specific
"ms-azuretools.vscode-docker" ,
"dbaeumer.vscode-eslint" ,
"EditorConfig.EditorConfig" ,
// Python specific
"ms-python.python" ,
"ms-python.black-formatter" ,
// C++ specific
"ms-vscode.cpptools" ,
"twxs.cmake" ,
// Markdown specific
"yzhang.markdown-all-in-one" ,
// YAML formatter
"kennylong.kubernetes-yaml-formatter",
// hightlight and format `pyproject.toml`
"tamasfe.even-better-toml"
]
}
},
"mounts": [ ],
// "runArgs": [
// "--env-file",
// ".devcontainer/.env"
// ],
"postStartCommand": "/workspaces/${localWorkspaceFolderBasename}/.devcontainer/initscript.sh",
// Use 'forwardPorts' to make a list of ports inside the container available locally.
"forwardPorts": []
}

View File

@@ -0,0 +1,58 @@
services:
devcontainer:
# All tags avaiable at: https://mcr.microsoft.com/v2/devcontainers/rust/tags/list
# image: mcr.microsoft.com/vscode/devcontainers/python:3.10-bullseye
image: registry.vegastar.vn/vegacloud/fission-python:3.10-bullseye
volumes:
- ../..:/workspaces:cached
command: sleep infinity
env_file:
- .env
k3s-server:
image: "rancher/k3s:${K3S_VERSION:-latest}"
# command: server --disable traefik --disable servicelb
command: server --disable traefik
hostname: k3s-server
tmpfs: [ "/run", "/var/run" ]
ulimits:
nproc: 65535
nofile:
soft: 65535
hard: 65535
privileged: true
restart: always
environment:
- K3S_TOKEN=${K3S_TOKEN:-secret}
- K3S_KUBECONFIG_OUTPUT=/output/kubeconfig.yaml
- K3S_KUBECONFIG_MODE=666
volumes:
- k3s-server:/var/lib/rancher/k3s
# This is just so that we get the kubeconfig file out
- .:/output
ports:
- 6443 # Kubernetes API Server
- 80 # Ingress controller port 80
- 443 # Ingress controller port 443
k3s-agent:
image: "rancher/k3s:${K3S_VERSION:-latest}"
hostname: k3s-agent
tmpfs: [ "/run", "/var/run" ]
ulimits:
nproc: 65535
nofile:
soft: 65535
hard: 65535
privileged: true
restart: always
environment:
- K3S_URL=https://k3s-server:6443
- K3S_TOKEN=${K3S_TOKEN:-secret}
volumes:
- k3s-agent:/var/lib/rancher/k3s
profiles: [ "cluster" ] # only start agent if run with profile `cluster`
volumes:
k3s-server: {}
k3s-agent: {}

View File

@@ -0,0 +1,81 @@
### helm show values fission-charts/fission-all > .devcontainer/fission-values.yaml
serviceMonitor:
enabled: true
##namespace in which you want to deploy servicemonitor
##
namespace: "monitoring"
## Map of additional labels to add to the ServiceMonitor resources
# to allow selecting specific ServiceMonitors
# in case of multiple prometheus deployments
additionalServiceMonitorLabels:
release: "prometheus"
# key: "value"
##The following components expose Prometheus metrics and have podmonitors in this chart (disabled by default)
##
podMonitor:
enabled: true
##namespace in which you want to deploy podmonitor
##
namespace: "monitoring"
## Map of additional labels to add to the PodMonitor resources
# to allow selecting specific PodMonitor
# in case of multiple prometheus deployments
additionalPodMonitorLabels:
release: "monitoring"
# key: "value"
## Enable Grafana Dashboard configmaps for auto dashboard provisioning
## If you use kube-prometheus stack for monitoring, these will get imported into grafana
grafana:
## The namespace in which grafana pod is present
namespace: monitoring
dashboards:
## Disabled by default. switch to true to deploy them
enable: true
# OpenTelemetry is a set of tools for collecting, analyzing, and visualizing
# distributed tracing data across function calls.
openTelemetry:
## Use this flag to set the collector endpoint for OpenTelemetry.
## The variable is endpoint of the collector in the format shown below.
## otlpCollectorEndpoint: "otel-collector.observability.svc:4317"
##
otlpCollectorEndpoint: "otel-collector.opentelemetry-operator-system.svc.cluster.local:4317"
## Set this flag to false if you are using secure endpoint for the collector.
##
otlpInsecure: true
## Key-value pairs to be used as headers associated with gRPC or HTTP requests to the collector.
## Eg. otlpHeaders: "key1=value1,key2=value2"
##
# otlpHeaders: ""
## Supported samplers:
## always_on - Sampler that always samples spans, regardless of the parent span's sampling decision.
## always_off - Sampler that never samples spans, regardless of the parent span's sampling decision.
## traceidratio - Sampler that samples probabalistically based on rate.
## parentbased_always_on - (default if empty) Sampler that respects its parent span's sampling decision, but otherwise always samples.
## parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples.
## parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabalistically based on rate.
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
##
tracesSampler: "parentbased_traceidratio"
## Each Sampler type defines its own expected input, if any.
## Currently we get trace ratio for the case of,
## 1. traceidratio
## 2. parentbased_traceidratio
## Sampling probability, a number in the [0..1] range, e.g. "0.1". Default is 0.1.
##
tracesSamplingRate: "1"
## Supported providers:
## tracecontext - W3C Trace Context
## baggage - W3C Baggage
## b3 - B3 Single
## b3multi - B3 Multi
## jaeger - Jaeger uber-trace-id header
## xray - AWS X-Ray (third party)
## ottrace - OpenTracing Trace (third party)
## none - No tracing
## See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#general-sdk-configuration
##
# propagators: "tracecontext,baggage"

View File

@@ -0,0 +1,132 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: otel-collector-conf
namespace: opentelemetry-operator-system
labels:
app: opentelemetry
component: otel-collector-conf
data:
otel-collector-config: |
receivers:
# Make sure to add the otlp receiver.
# This will open up the receiver on port 4317
otlp:
protocols:
grpc:
endpoint: "0.0.0.0:4317"
processors:
extensions:
health_check: {}
exporters:
jaeger:
# <service-name>.<namespace>.svc.cluster.local:<service-port>
endpoint: "jaeger-collector.jaeger.svc.cluster.local:14250"
insecure: true
prometheus:
endpoint: 0.0.0.0:8889
namespace: "testapp"
logging:
service:
extensions: [ health_check ]
pipelines:
traces:
receivers: [ otlp ]
processors: []
exporters: [ jaeger ]
metrics:
receivers: [ otlp ]
processors: []
exporters: [ prometheus, logging ]
---
apiVersion: v1
kind: Service
metadata:
name: otel-collector
namespace: opentelemetry-operator-system
labels:
app: opentelemetry
component: otel-collector
spec:
ports:
- name: otlp # Default endpoint for otlp receiver.
port: 4317
protocol: TCP
targetPort: 4317
nodePort: 30080
- name: metrics # Default endpoint for metrics.
port: 8889
protocol: TCP
targetPort: 8889
selector:
component: otel-collector
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: otel-collector
namespace: opentelemetry-operator-system
labels:
app: opentelemetry
component: otel-collector
spec:
selector:
matchLabels:
app: opentelemetry
component: otel-collector
minReadySeconds: 5
progressDeadlineSeconds: 120
replicas: 1 #TODO - adjust this to your own requirements
template:
metadata:
annotations:
prometheus.io/path: "/metrics"
prometheus.io/port: "8889"
prometheus.io/scrape: "true"
labels:
app: opentelemetry
component: otel-collector
spec:
containers:
- command:
- "/otelcol"
- "--config=/conf/otel-collector-config.yaml"
# Memory Ballast size should be max 1/3 to 1/2 of memory.
- "--mem-ballast-size-mib=683"
env:
- name: GOGC
value: "80"
image: otel/opentelemetry-collector:0.6.0
name: otel-collector
resources:
limits:
cpu: 1
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
ports:
- containerPort: 4317 # Default endpoint for otlp receiver.
- containerPort: 8889 # Default endpoint for querying metrics.
volumeMounts:
- name: otel-collector-config-vol
mountPath: /conf
# - name: otel-collector-secrets
# mountPath: /secrets
livenessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
readinessProbe:
httpGet:
path: /
port: 13133 # Health Check extension default port.
volumes:
- configMap:
name: otel-collector-conf
items:
- key: otel-collector-config
path: otel-collector-config.yaml
name: otel-collector-config-vol

170
.devcontainer/initscript.sh Executable file
View File

@@ -0,0 +1,170 @@
#!/bin/bash
## For debugging
# set -eux
#############################
### DEV PACKAGES
#############################
export RAKE_VER=0.1.3
curl -L https://$PRIVATE_GIT_TOKEN@registry.vegastar.vn/vegacloud/make/releases/download/$RAKE_VER/rake-$RAKE_VER-x86_64-unknown-linux-musl.tar.gz | tar xzv -C /tmp/
sudo install -o root -g root -m 0755 /tmp/rake-$RAKE_VER-x86_64-unknown-linux-musl/rake /usr/local/bin/rake
#############################
### KUBECTL
#############################
## Config kubectl
mkdir -p ~/.kube
cp ${PWD}/.devcontainer/kubeconfig.yaml ~/.kube/config
sed -i 's/127.0.0.1/k3s-server/g' ~/.kube/config
## allow insecure connection
shopt -s expand_aliases
echo 'alias kubectl="kubectl --insecure-skip-tls-verify"' >> ~/.bashrc
echo 'alias k="kubectl --insecure-skip-tls-verify"' >> ~/.bashrc
#############################
### NGINX INGRESS
#############################
kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-$NGINX_INGRESS_VER/deploy/static/provider/cloud/deploy.yaml
cat <<EOT >> /tmp/nginx-service.yaml
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx-controller-loadbalancer
namespace: ingress-nginx
spec:
selector:
app.kubernetes.io/component: controller
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/name: ingress-nginx
ports:
- name: http
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
type: LoadBalancer
EOT
kubectl apply -f /tmp/nginx-service.yaml
rm -f /tmp/nginx-service.yaml
#############################
### OPEN TELEMETRY
#############################
# kubectl create namespace $JAEGER_NAMESPACE
# kubectl create namespace $OPENTELEMETRY_NAMESPACE
# ## cert-manager
# kubectl apply -f https://github.com/jetstack/cert-manager/releases/latest/download/cert-manager.yaml
# ## install jaeger
# helm repo add jaegertracing https://jaegertracing.github.io/helm-charts
# helm install jaeger jaegertracing/jaeger -n $JAEGER_NAMESPACE
# kubectl -n $JAEGER_NAMESPACE get po
# ## open telemetry operator
# kubectl apply -f https://github.com/open-telemetry/opentelemetry-operator/releases/latest/download/opentelemetry-operator.yaml
# ## create an OpenTelemetry Collector instance
# kubectl -n $OPENTELEMETRY_NAMESPACE apply -f .devcontainer/helm/opentelemetry-collector.yaml
#############################
### FISSION PODs
#############################
kubectl create namespace $FISSION_NAMESPACE
## install with helm
kubectl create -k "github.com/fission/fission/crds/v1?ref=${FISSION_VER}"
helm repo add fission-charts https://fission.github.io/fission-charts/ && helm repo update
kubectl apply -f - <<EOF
apiVersion: v1
kind: Namespace
metadata:
name: fission
---
apiVersion: v1
kind: Namespace
metadata:
name: gh-eom
EOF
kubectl apply -f - <<EOF
type: kubernetes.io/dockerconfigjson
apiVersion: v1
kind: Secret
metadata:
name: vega-container-registry
namespace: fission
data:
.dockerconfigjson: >-
eyJhdXRocyI6eyJyZWdpc3RyeS52ZWdhc3Rhci52biI6eyJ1c2VybmFtZSI6InRpZW5kZCIsInBhc3N3b3JkIjoiYTBjY2JjMDVjNzMyYzExMjU3OTg1NjMwNjY5ZTFjNjEyNDg0NzU1MyIsImF1dGgiOiJkR2xsYm1Sa09tRXdZMk5pWXpBMVl6Y3pNbU14TVRJMU56azROVFl6TURZMk9XVXhZell4TWpRNE5EYzFOVE09In19fQ==
EOF
helm upgrade --install fission fission-charts/fission-all --namespace $FISSION_NAMESPACE -f - <<EOF
imagePullSecrets:
- name: vega-container-registry
defaultNamespace: default
additionalFissionNamespaces:
- gh-eom
EOF
kubectl apply -f - <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: router
namespace: fission
spec:
ingressClassName: nginx
rules:
- http:
paths:
- path: /ailbl
pathType: Prefix
backend:
service:
name: router
port:
number: 80
EOF
# ## install without helm
# kubectl create -k "github.com/fission/fission/crds/v1?ref=${FISSION_VER}"
# kubectl create namespace $FISSION_NAMESPACE
# kubectl config set-context --current --namespace=$FISSION_NAMESPACE
# kubectl apply -f https://github.com/fission/fission/releases/download/${FISSION_VER}/fission-all-${FISSION_VER}-minikube.yaml
# kubectl config set-context --current --namespace=default #to change context to default namespace after installation
#############################
### PROMETHEUS AND GRAFANA
#############################
# kubectl create namespace $METRICS_NAMESPACE
# helm repo add prometheus-community https://prometheus-community.github.io/helm-charts && helm repo update
# helm install prometheus prometheus-community/kube-prometheus-stack -n $METRICS_NAMESPACE
#############################
### UPDATE FISSION
#############################
# helm upgrade fission fission-charts/fission-all --namespace $FISSION_NAMESPACE -f .devcontainer/helm/fission-values.yaml
#############################
### PORT FORWARDING
#############################
## To access jaeger-query, you can use Kubernetes port forwarding
# kubectl -n jaeger port-forward svc/jaeger-query 8080:80 --address='0.0.0.0'
## To access kabana, you can use Kubernetes port forwarding
# kubectl --namespace monitoring port-forward svc/prometheus-grafana 3000:80
## For password, you'll need to run the following command:
# kubectl get secret --namespace monitoring prometheus-grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo