From 6f8ffe1e49dd1d0499cfb46bedad9e573ef04116 Mon Sep 17 00:00:00 2001 From: andersonid Date: Mon, 6 Oct 2025 10:44:43 -0300 Subject: [PATCH] feat: implement Phase 2 - Sequential Pipeline with Celery Workers --- app/static/index.html | 77 +++++++++++++++---- k8s/celery-worker-deployment.yaml | 120 ++++++++++++++++++++++++++++++ k8s/kustomization.yaml | 1 + scripts/deploy-complete.sh | 4 + 4 files changed, 186 insertions(+), 16 deletions(-) create mode 100644 k8s/celery-worker-deployment.yaml diff --git a/app/static/index.html b/app/static/index.html index b96f4e6..cb22afd 100644 --- a/app/static/index.html +++ b/app/static/index.html @@ -1982,34 +1982,79 @@ // Show fullscreen loading modal showFullscreenLoading( 'Analyzing Cluster Resources', - 'Please wait while we analyze your cluster resources and generate insights... This may take up to 60 seconds for large clusters.' + 'Starting background analysis pipeline... This may take up to 2 minutes for large clusters.' ); // Start smart loading system startSmartLoading(); - // Step 1: Load cluster status - updateSmartProgress(0, 'Connecting to OpenShift API...'); + // Step 1: Start background cluster analysis + updateSmartProgress(0, 'Starting background cluster analysis...'); - const clusterResponse = await fetch('/api/v1/cluster/status'); - if (!clusterResponse.ok) { - throw new Error(`HTTP error! status: ${clusterResponse.status}`); + const analysisResponse = await fetch('/api/v1/tasks/cluster/analyze', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + } + }); + + if (!analysisResponse.ok) { + throw new Error(`HTTP error! status: ${analysisResponse.status}`); } - const clusterData = await clusterResponse.json(); - updateSmartProgress(1, 'Cluster data loaded successfully'); + const analysisData = await analysisResponse.json(); + const taskId = analysisData.task_id; - // Update metrics cards - updateMetricsCards(clusterData); + updateSmartProgress(1, 'Background analysis started, monitoring progress...'); - // Step 2: Load dashboard charts - updateSmartProgress(2, 'Loading dashboard charts...'); - await loadDashboardCharts(); + // Step 2: Monitor task progress + let taskCompleted = false; + let attempts = 0; + const maxAttempts = 120; // 2 minutes max - // Step 3: Complete - updateSmartProgress(3, 'Analysis complete'); + while (!taskCompleted && attempts < maxAttempts) { + await new Promise(resolve => setTimeout(resolve, 1000)); // Wait 1 second + attempts++; + + try { + const statusResponse = await fetch(`/api/v1/tasks/${taskId}/status`); + if (!statusResponse.ok) { + throw new Error(`HTTP error! status: ${statusResponse.status}`); + } + + const statusData = await statusResponse.json(); + + if (statusData.state === 'PROGRESS') { + const progress = Math.round((statusData.current / statusData.total) * 100); + updateSmartProgress(progress, statusData.status); + } else if (statusData.state === 'SUCCESS') { + updateSmartProgress(100, 'Analysis completed successfully!'); + + // Update metrics cards with results + updateMetricsCards(statusData.result); + + // Load dashboard charts + updateSmartProgress(100, 'Loading dashboard charts...'); + await loadDashboardCharts(); + + currentData = { cluster: statusData.result }; + taskCompleted = true; + + } else if (statusData.state === 'FAILURE') { + throw new Error(`Analysis failed: ${statusData.error}`); + } + + } catch (error) { + console.warn('Error checking task status:', error); + if (attempts >= maxAttempts) { + throw new Error('Analysis timeout - please try again'); + } + } + } - currentData = { cluster: clusterData }; + if (!taskCompleted) { + throw new Error('Analysis timeout - please try again'); + } // Stop smart loading and hide modal stopSmartLoading(); diff --git a/k8s/celery-worker-deployment.yaml b/k8s/celery-worker-deployment.yaml new file mode 100644 index 0000000..39881b9 --- /dev/null +++ b/k8s/celery-worker-deployment.yaml @@ -0,0 +1,120 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: celery-worker + namespace: resource-governance + labels: + app.kubernetes.io/name: celery-worker + app.kubernetes.io/component: worker +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: celery-worker + app.kubernetes.io/component: worker + template: + metadata: + labels: + app.kubernetes.io/name: celery-worker + app.kubernetes.io/component: worker + spec: + serviceAccountName: resource-governance-sa + securityContext: + runAsNonRoot: true + containers: + - name: celery-worker + image: quay.io/rh_ee_anobre/resource-governance:latest + imagePullPolicy: Always + command: ["python", "app/workers/celery_worker.py"] + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + env: + - name: KUBECONFIG + value: "/var/run/secrets/kubernetes.io/serviceaccount/token" + - name: REDIS_URL + valueFrom: + configMapKeyRef: + name: redis-config + key: REDIS_URL + - name: CELERY_BROKER_URL + valueFrom: + configMapKeyRef: + name: redis-config + key: CELERY_BROKER_URL + - name: CELERY_RESULT_BACKEND + valueFrom: + configMapKeyRef: + name: redis-config + key: CELERY_RESULT_BACKEND + - name: CPU_LIMIT_RATIO + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: CPU_LIMIT_RATIO + - name: MEMORY_LIMIT_RATIO + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: MEMORY_LIMIT_RATIO + - name: MIN_CPU_REQUEST + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: MIN_CPU_REQUEST + - name: MIN_MEMORY_REQUEST + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: MIN_MEMORY_REQUEST + - name: CRITICAL_NAMESPACES + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: CRITICAL_NAMESPACES + - name: INCLUDE_SYSTEM_NAMESPACES + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: INCLUDE_SYSTEM_NAMESPACES + - name: SYSTEM_NAMESPACE_PREFIXES + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: SYSTEM_NAMESPACE_PREFIXES + - name: PROMETHEUS_URL + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: PROMETHEUS_URL + - name: REPORT_EXPORT_PATH + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: REPORT_EXPORT_PATH + - name: SERVICE_ACCOUNT_NAME + valueFrom: + configMapKeyRef: + name: resource-governance-config + key: SERVICE_ACCOUNT_NAME + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 500m + memory: 512Mi + volumeMounts: + - name: service-account-token + mountPath: /var/run/secrets/kubernetes.io/serviceaccount + readOnly: true + volumes: + - name: service-account-token + secret: + secretName: resource-governance-sa-token + optional: false + restartPolicy: Always diff --git a/k8s/kustomization.yaml b/k8s/kustomization.yaml index 9af14bb..644fbf7 100644 --- a/k8s/kustomization.yaml +++ b/k8s/kustomization.yaml @@ -8,6 +8,7 @@ resources: - redis-configmap.yaml - redis-deployment.yaml - deployment.yaml +- celery-worker-deployment.yaml - service.yaml - route.yaml diff --git a/scripts/deploy-complete.sh b/scripts/deploy-complete.sh index 7bb4090..12e59b1 100755 --- a/scripts/deploy-complete.sh +++ b/scripts/deploy-complete.sh @@ -57,6 +57,10 @@ fi echo -e "${YELLOW}Applying Deployment...${NC}" oc apply -f k8s/deployment.yaml +# Apply Celery Worker Deployment +echo -e "${YELLOW}Applying Celery Worker Deployment...${NC}" +oc apply -f k8s/celery-worker-deployment.yaml + # Apply Service echo -e "${YELLOW}Applying Service...${NC}" oc apply -f k8s/service.yaml