feat: implement Phase 2 - Sequential Pipeline with Celery Workers

This commit is contained in:
2025-10-06 10:44:43 -03:00
parent bf06ae190a
commit 6f8ffe1e49
4 changed files with 186 additions and 16 deletions

View File

@@ -1982,34 +1982,79 @@
// Show fullscreen loading modal
showFullscreenLoading(
'Analyzing Cluster Resources',
'Please wait while we analyze your cluster resources and generate insights... This may take up to 60 seconds for large clusters.'
'Starting background analysis pipeline... This may take up to 2 minutes for large clusters.'
);
// Start smart loading system
startSmartLoading();
// Step 1: Load cluster status
updateSmartProgress(0, 'Connecting to OpenShift API...');
// Step 1: Start background cluster analysis
updateSmartProgress(0, 'Starting background cluster analysis...');
const clusterResponse = await fetch('/api/v1/cluster/status');
if (!clusterResponse.ok) {
throw new Error(`HTTP error! status: ${clusterResponse.status}`);
const analysisResponse = await fetch('/api/v1/tasks/cluster/analyze', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
}
});
if (!analysisResponse.ok) {
throw new Error(`HTTP error! status: ${analysisResponse.status}`);
}
const clusterData = await clusterResponse.json();
updateSmartProgress(1, 'Cluster data loaded successfully');
const analysisData = await analysisResponse.json();
const taskId = analysisData.task_id;
// Update metrics cards
updateMetricsCards(clusterData);
updateSmartProgress(1, 'Background analysis started, monitoring progress...');
// Step 2: Load dashboard charts
updateSmartProgress(2, 'Loading dashboard charts...');
await loadDashboardCharts();
// Step 2: Monitor task progress
let taskCompleted = false;
let attempts = 0;
const maxAttempts = 120; // 2 minutes max
// Step 3: Complete
updateSmartProgress(3, 'Analysis complete');
while (!taskCompleted && attempts < maxAttempts) {
await new Promise(resolve => setTimeout(resolve, 1000)); // Wait 1 second
attempts++;
try {
const statusResponse = await fetch(`/api/v1/tasks/${taskId}/status`);
if (!statusResponse.ok) {
throw new Error(`HTTP error! status: ${statusResponse.status}`);
}
const statusData = await statusResponse.json();
if (statusData.state === 'PROGRESS') {
const progress = Math.round((statusData.current / statusData.total) * 100);
updateSmartProgress(progress, statusData.status);
} else if (statusData.state === 'SUCCESS') {
updateSmartProgress(100, 'Analysis completed successfully!');
// Update metrics cards with results
updateMetricsCards(statusData.result);
// Load dashboard charts
updateSmartProgress(100, 'Loading dashboard charts...');
await loadDashboardCharts();
currentData = { cluster: statusData.result };
taskCompleted = true;
} else if (statusData.state === 'FAILURE') {
throw new Error(`Analysis failed: ${statusData.error}`);
}
} catch (error) {
console.warn('Error checking task status:', error);
if (attempts >= maxAttempts) {
throw new Error('Analysis timeout - please try again');
}
}
}
currentData = { cluster: clusterData };
if (!taskCompleted) {
throw new Error('Analysis timeout - please try again');
}
// Stop smart loading and hide modal
stopSmartLoading();

View File

@@ -0,0 +1,120 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: celery-worker
namespace: resource-governance
labels:
app.kubernetes.io/name: celery-worker
app.kubernetes.io/component: worker
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: celery-worker
app.kubernetes.io/component: worker
template:
metadata:
labels:
app.kubernetes.io/name: celery-worker
app.kubernetes.io/component: worker
spec:
serviceAccountName: resource-governance-sa
securityContext:
runAsNonRoot: true
containers:
- name: celery-worker
image: quay.io/rh_ee_anobre/resource-governance:latest
imagePullPolicy: Always
command: ["python", "app/workers/celery_worker.py"]
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
seccompProfile:
type: RuntimeDefault
env:
- name: KUBECONFIG
value: "/var/run/secrets/kubernetes.io/serviceaccount/token"
- name: REDIS_URL
valueFrom:
configMapKeyRef:
name: redis-config
key: REDIS_URL
- name: CELERY_BROKER_URL
valueFrom:
configMapKeyRef:
name: redis-config
key: CELERY_BROKER_URL
- name: CELERY_RESULT_BACKEND
valueFrom:
configMapKeyRef:
name: redis-config
key: CELERY_RESULT_BACKEND
- name: CPU_LIMIT_RATIO
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: CPU_LIMIT_RATIO
- name: MEMORY_LIMIT_RATIO
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: MEMORY_LIMIT_RATIO
- name: MIN_CPU_REQUEST
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: MIN_CPU_REQUEST
- name: MIN_MEMORY_REQUEST
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: MIN_MEMORY_REQUEST
- name: CRITICAL_NAMESPACES
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: CRITICAL_NAMESPACES
- name: INCLUDE_SYSTEM_NAMESPACES
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: INCLUDE_SYSTEM_NAMESPACES
- name: SYSTEM_NAMESPACE_PREFIXES
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: SYSTEM_NAMESPACE_PREFIXES
- name: PROMETHEUS_URL
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: PROMETHEUS_URL
- name: REPORT_EXPORT_PATH
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: REPORT_EXPORT_PATH
- name: SERVICE_ACCOUNT_NAME
valueFrom:
configMapKeyRef:
name: resource-governance-config
key: SERVICE_ACCOUNT_NAME
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
volumeMounts:
- name: service-account-token
mountPath: /var/run/secrets/kubernetes.io/serviceaccount
readOnly: true
volumes:
- name: service-account-token
secret:
secretName: resource-governance-sa-token
optional: false
restartPolicy: Always

View File

@@ -8,6 +8,7 @@ resources:
- redis-configmap.yaml
- redis-deployment.yaml
- deployment.yaml
- celery-worker-deployment.yaml
- service.yaml
- route.yaml

View File

@@ -57,6 +57,10 @@ fi
echo -e "${YELLOW}Applying Deployment...${NC}"
oc apply -f k8s/deployment.yaml
# Apply Celery Worker Deployment
echo -e "${YELLOW}Applying Celery Worker Deployment...${NC}"
oc apply -f k8s/celery-worker-deployment.yaml
# Apply Service
echo -e "${YELLOW}Applying Service...${NC}"
oc apply -f k8s/service.yaml